public AACFrame(byte[] data, long timeStamp, uint frameId, AACProfile profile, int sampleRate, byte channels) { RawData = data; TimeStamp = timeStamp; FrameId = frameId; Profile = profile; SampleRate = sampleRate; Channels = channels; }
public static byte[] AssembleAudioFrame(byte[] data, AACProfile profile, int samplingFreq, byte channels) { byte[] adtsHeader = new byte[ADTS_HEADER_LENGTH]; int frameSize = data.Length + ADTS_HEADER_LENGTH; byte sampling_index = GetSamplingFrequencyIndex(samplingFreq); adtsHeader[0] = 0xFF; adtsHeader[1] = 0xF0 | (ADTS_HEADER_ID << 3) | 0x1; adtsHeader[2] = (byte)(((byte)profile << 6) | (sampling_index << 2) | 0x2 | (channels & 0x4)); adtsHeader[3] = (byte)(((channels & 0x3) << 6) | 0x30 | (frameSize >> 11)); adtsHeader[4] = (byte)((frameSize >> 3) & 0x00FF); adtsHeader[5] = (byte)(((frameSize & 0x0007) << 5) + 0x1F); adtsHeader[6] = 0xFC; return(adtsHeader); }
public AACFrame AssembleAudioFrame(AudioData data, AACProfile profile, int samplingFreq, byte channels) { AACFrame aacFrame = null; ulong timestamp = data.Timestamp; if (timestamp > _lastProcessedTimestamp) { // Only process new audio frames aacFrame = new AACFrame(data.Data, data.Timestamp, data.FrameId, data.Flags, profile, samplingFreq, channels); _lastProcessedTimestamp = timestamp; } return(aacFrame); }
public static AACFrame AssembleAudioFrame(AudioData data, AACProfile profile, int samplingFreq, byte channels) { return(new AACFrame(data.Data, data.Timestamp, data.FrameId, profile, samplingFreq, channels)); }