public async Task <Tuple <MediaStreamSample, uint> > ParseSampleAsync(ShoutcastStreamProcessor processor, SocketWrapper socket, bool partial = false, byte[] partialBytes = null) { //http://www.mpgedit.org/mpgedit/mpeg_format/MP3Format.html IBuffer buffer = null; MediaStreamSample sample = null; uint sampleLength = 0; if (partial) { buffer = partialBytes.AsBuffer(); sampleLength = MP3Parser.mp3_sampleSize - (uint)partialBytes.Length; //processor.byteOffset += sampleLength; } else { var read = await socket.LoadAsync(MP3Parser.mp3_sampleSize); if (read == 0 || read < MP3Parser.mp3_sampleSize) { //disconnected. throw new ShoutcastDisconnectionException(); } buffer = await socket.ReadBufferAsync(MP3Parser.mp3_sampleSize); //processor.byteOffset += MP3Parser.mp3_sampleSize; sampleLength = MP3Parser.mp3_sampleSize; } sample = MediaStreamSample.CreateFromBuffer(buffer, processor.timeOffSet); sample.Duration = MP3Parser.mp3_sampleDuration; sample.KeyFrame = true; processor.timeOffSet = processor.timeOffSet.Add(MP3Parser.mp3_sampleDuration); return(new Tuple <MediaStreamSample, uint>(sample, sampleLength)); }
private async Task <AudioEncodingProperties> ParseEncodingFromMediaAsync() { //grab the first frame and strip it for information AudioEncodingProperties obtainedProperties = null; IBuffer buffer = null; if (AudioInfo.AudioFormat == StreamAudioFormat.AAC) { //obtainedProperties = AudioEncodingProperties.CreateAac(0, 2, 0); throw new Exception("Not supported."); } var provider = AudioProviderFactory.GetAudioProvider(AudioInfo.AudioFormat); ServerAudioInfo firstFrame = await provider.GrabFrameInfoAsync(streamProcessor, AudioInfo).ConfigureAwait(false); //loop until we receive a few "frames" with identical information. while (true) { cancelTokenSource.Token.ThrowIfCancellationRequested(); ServerAudioInfo secondFrame = await provider.GrabFrameInfoAsync(streamProcessor, AudioInfo).ConfigureAwait(false); if (firstFrame.BitRate == secondFrame.BitRate && firstFrame.SampleRate == secondFrame.SampleRate) { //both frames are identical, use one of them and escape the loop. AudioInfo = firstFrame; break; } else { //frames aren't identical, get rid of the first one using the second frame and loop back. firstFrame = secondFrame; continue; } } cancelTokenSource.Token.ThrowIfCancellationRequested(); if (AudioInfo.AudioFormat == StreamAudioFormat.MP3) { //skip the entire first frame/sample to get back on track await socket.LoadAsync(MP3Parser.mp3_sampleSize - MP3Parser.HeaderLength); buffer = await socket.ReadBufferAsync(MP3Parser.mp3_sampleSize - MP3Parser.HeaderLength); //streamProcessor.byteOffset += MP3Parser.mp3_sampleSize - MP3Parser.HeaderLength; obtainedProperties = AudioEncodingProperties.CreateMp3((uint)AudioInfo.SampleRate, (uint)AudioInfo.ChannelCount, AudioInfo.BitRate); } else if (AudioInfo.AudioFormat == StreamAudioFormat.AAC_ADTS) { //skip the entire first frame/sample to get back on track await socket.LoadAsync(AAC_ADTSParser.aac_adts_sampleSize - AAC_ADTSParser.HeaderLength); buffer = await socket.ReadBufferAsync(AAC_ADTSParser.aac_adts_sampleSize - AAC_ADTSParser.HeaderLength); //streamProcessor.byteOffset += AAC_ADTSParser.aac_adts_sampleSize - AAC_ADTSParser.HeaderLength; obtainedProperties = AudioEncodingProperties.CreateAacAdts((uint)AudioInfo.SampleRate, (uint)AudioInfo.ChannelCount, AudioInfo.BitRate); } else { throw new Exception("Unsupported format."); } if (serverSettings.RequestSongMetdata) { streamProcessor.metadataPos += buffer.Length; //very important or it will throw everything off! } return(obtainedProperties); }