Example #1
0
        public MediaSample PullSample(AudioFormat format, TimeSpan tsDuration)
        {
            int         nSamples  = this.AudioFormat.CalculateNumberOfSamplesForDuration(tsDuration);
            MediaSample RetSample = new MediaSample(nSamples, this.AudioFormat);

            if (MinimumSamples != -1)  // See if we have enough min samples to send
            {
                if ((QueueBuffer.Size / AudioFormat.BytesPerSample) < MinimumSamples)
                {
                    return(RetSample);
                }
            }

            byte[] bData = QueueBuffer.GetNSamples(RetSample.ByteLength);
            RetSample.Data = bData;
            return(RetSample);
        }
Example #2
0
        public MediaSample(MediaSample refsam, bool bShareData)
        {
            MediaType       = refsam.MediaType;
            m_objAudioFormt = new AudioFormat(refsam.AudioFormat.AudioSamplingRate, refsam.AudioFormat.AudioBitsPerSample, refsam.AudioFormat.AudioChannels);

            if (bShareData == true)
            {
                m_bData = refsam.Data;
            }
            else
            {
                if (refsam.Data != null)
                {
                    m_bData = new byte[refsam.Data.Length];
                    Array.Copy(refsam.Data, m_bData, m_bData.Length);
                }
            }
        }
Example #3
0
        public virtual MediaSample Resample(MediaSample sample, AudioFormat outformat)
        {
            if ((sample.AudioFormat.AudioSamplingRate == AudioSamplingRate.sr16000) && (outformat.AudioSamplingRate == AudioSamplingRate.sr8000))
            {
                /// Downsample the data
                ///
                short[] sData = Utils.Resample16000To8000(sample.GetShortData());

                return new MediaSample(sData, outformat);
            }
            else if ((sample.AudioFormat.AudioSamplingRate == AudioSamplingRate.sr16000) && (outformat.AudioSamplingRate == AudioSamplingRate.sr8000))
            {
                /// Upsample the data.  This shouldn't happen because our incoming data should always be higher or equal quality
                ///
                short[] sData = Utils.Resample8000To16000(sample.GetShortData());
                return new MediaSample(sData, outformat);
            }
            return sample;
        }
Example #4
0
        public void PushSample(MediaSample sample, object objSource)
        {
            // no conversion here, sample must be in 16x16 form

            if (AmplitudeMutliplier != 1.0f)
            {
                if (AudioFormat.AudioBitsPerSample == AudioBitsPerSample.Sixteen)
                {
                    short[] sSamples = sample.GetShortData();
                    for (int i = 0; i < sSamples.Length; i++)
                    {
                        sSamples[i] = (short)(AmplitudeMutliplier * sSamples[i]);
                    }
                    QueueBuffer.AppendData(Utils.ConvertShortArrayToByteArray(sSamples));
                }
                if (AudioFormat.AudioBitsPerSample == AudioBitsPerSample.Eight)
                {
                    byte[] bSamples = sample.Data;
                    for (int i = 0; i < bSamples.Length; i++)
                    {
                        bSamples[i] = (byte)(AmplitudeMutliplier * bSamples[i]);
                    }
                    QueueBuffer.AppendData(bSamples);
                }
            }
            else
            {
                QueueBuffer.AppendData(sample.Data);
            }

            int nSamplesInQueueBuffer = QueueBuffer.Size / AudioFormat.BytesPerSample;

            if ((MaxSamples > 0) && (nSamplesInQueueBuffer > MaxSamples))
            {
                int nBytesRemove = (nSamplesInQueueBuffer - MaxSamples) * AudioFormat.BytesPerSample;
                QueueBuffer.GetNSamples(nBytesRemove);
            }
        }
Example #5
0
        public void PushSample(MediaSample sample, object objSource)
        {
            // no conversion here, sample must be in 16x16 form

            if (AmplitudeMutliplier != 1.0f)
            {
                if (AudioFormat.AudioBitsPerSample == AudioBitsPerSample.Sixteen)
                {
                    short[] sSamples = sample.GetShortData();
                    for (int i = 0; i < sSamples.Length; i++)
                    {
                        sSamples[i] = (short)(AmplitudeMutliplier * sSamples[i]);
                    }
                    QueueBuffer.AppendData(Utils.ConvertShortArrayToByteArray(sSamples));
                }
                if (AudioFormat.AudioBitsPerSample == AudioBitsPerSample.Eight)
                {
                    byte[] bSamples = sample.Data;
                    for (int i = 0; i < bSamples.Length; i++)
                    {
                        bSamples[i] = (byte)(AmplitudeMutliplier * bSamples[i]);
                    }
                    QueueBuffer.AppendData(bSamples);
                }
            }
            else
            {
                QueueBuffer.AppendData(sample.Data);
            }

            int nSamplesInQueueBuffer = QueueBuffer.Size / AudioFormat.BytesPerSample;
            if ((MaxSamples > 0) && (nSamplesInQueueBuffer > MaxSamples))
            {
                int nBytesRemove = (nSamplesInQueueBuffer - MaxSamples) * AudioFormat.BytesPerSample;
                QueueBuffer.GetNSamples(nBytesRemove);
            }
        }
Example #6
0
        public MediaSample PullSample(AudioFormat format, TimeSpan tsDuration)
        {
            int nSamples = this.AudioFormat.CalculateNumberOfSamplesForDuration(tsDuration);
            MediaSample RetSample = new MediaSample(nSamples, this.AudioFormat);

            if (MinimumSamples != -1)  // See if we have enough min samples to send
            {
                if ((QueueBuffer.Size / AudioFormat.BytesPerSample) < MinimumSamples)
                    return RetSample;
            }

            byte[] bData = QueueBuffer.GetNSamples(RetSample.ByteLength);
            RetSample.Data = bData;
            return RetSample;
        }
Example #7
0
        public MediaSample(MediaSample refsam, bool bShareData)
        {
            MediaType = refsam.MediaType;
            m_objAudioFormt = new AudioFormat(refsam.AudioFormat.AudioSamplingRate, refsam.AudioFormat.AudioBitsPerSample, refsam.AudioFormat.AudioChannels);

            if (bShareData == true)
                m_bData = refsam.Data;
            else
            {
                if (refsam.Data != null)
                {
                    m_bData = new byte[refsam.Data.Length];
                    Array.Copy(refsam.Data, m_bData, m_bData.Length);

                }
            }
        }
Example #8
0
        /// <summary>
        /// Pull from all our input pins, then combine, then subtract
        /// </summary>
        /// <param name="tsElapsed"></param>
        void DoPushPull(TimeSpan tsElapsed)
        {
            lock (PushPullLock)
            {
                PushPullObject[] members = null;
                lock (MemberLock)
                {
                    members = Members.ToArray();
                }

                if (members.Length <= 0)
                    return;

                Dictionary<IAudioSource, short[]> InputSamples = new Dictionary<IAudioSource, short[]>();

                /// Convert our short data to int so we don't during addition
                // int[] combinedint = Utils.MakeIntArrayFromShortArray(sInitialData);
                int[] combinedint = new int[AudioFormat.CalculateNumberOfSamplesForDuration(tsElapsed)];

                ///Sum the input data from all our input sources, storing the data for each source so we can subtract it when sending
                foreach (PushPullObject nextobj in members)
                {
                    if (nextobj.AudioSource == null)
                        continue;

                    // Always pull data from a source even if it's not active, because some just queue their buffers
                    MediaSample sample = nextobj.AudioSource.PullSample(AudioFormat, tsElapsed);
                    if (sample == null)
                        continue;

                    if (nextobj.AudioSource.IsSourceActive == false)
                        continue;

                    short[] sData = sample.GetShortData();

                    /// Amplify our data if told to
                    if (nextobj.AudioSource.SourceAmplitudeMultiplier != 1.0f)
                    {
                        for (int i = 0; i < sData.Length; i++)
                        {
                            sData[i] = (short)(nextobj.AudioSource.SourceAmplitudeMultiplier * sData[i]);
                        }
                    }

                    InputSamples.Add(nextobj.AudioSource, sData);

                    Utils.SumArrays(combinedint, sData);
                }

                /// Push data to all our output filters, subtracting the data this member supplied
                foreach (PushPullObject nextobj in members)
                {
                    if (nextobj.AudioSink == null)
                        continue;

                    if (nextobj.AudioSink.IsSinkActive == false)
                        continue;

                    /// copy the summed data so we don't mangle it for the next client
                    int[] nCopy = new int[combinedint.Length];
                    Array.Copy(combinedint, nCopy, nCopy.Length);

                    foreach (IAudioSource excludesource in nextobj.SourceExcludeList)
                    {
                        if (InputSamples.ContainsKey(excludesource) == true)  // If we are in the dictionary, we are not muted, so no need to subtract
                        {
                            short[] sData = InputSamples[excludesource];
                            Utils.SubtractArray(nCopy, sData);
                        }
                    }

                    /// Amplify our data if told to
                    if (nextobj.AudioSink.SinkAmplitudeMultiplier != 1.0f)
                    {
                        for (int i = 0; i < nCopy.Length; i++)
                        {
                            nCopy[i] = (int)(nextobj.AudioSink.SinkAmplitudeMultiplier * nCopy[i]);
                        }
                    }

                    //short[] sOutput = Utils.MakeShortArrayFromIntArray(nCopy);
                    short[] sOutput = Utils.AGCAndShortArray(nCopy, short.MaxValue);

                    MediaSample outputsample = new MediaSample(sOutput, AudioFormat);
                    nextobj.AudioSink.PushSample(outputsample, this);
                }
            }
        }
Example #9
0
 /// <summary>
 ///  Push to our no where
 /// </summary>
 /// <param name="sample"></param>
 public void PushSample(MediaSample sample, object objSource)
 {
 }
Example #10
0
        protected override void PushNextPacket()
        {
            if (AudioCodec == null)
                return;

            RTPPacket packet = IncomingRTPPacketBuffer.GetPacket();
            if (packet == null)
                return;

            byte[] bNewAudioData = AudioCodec.DecodeToBytes(packet);

            if (bNewAudioData != null)
            {
                ReceiveAudioQueue.AppendData(bNewAudioData);
                if (ReceiveAudioQueue.Size > m_nPacketBytes*MaxAudioPacketsQueue)  // someone isn't taking our packets (either directly our through IAudioSource), so let's not get too big
                {
                    ReceiveAudioQueue.GetNSamples(ReceiveAudioQueue.Size-m_nPacketBytes*MaxAudioPacketsQueue);
                }

                if (RenderSink != null)
                {
                    MediaSample samp = new MediaSample(bNewAudioData, AudioCodec.AudioFormat);
                    RenderSink.PushSample(samp, this);
                }
            }
        }
Example #11
0
        /// <summary>
        /// Push a sample to this filter's outgoing queue.
        /// </summary>
        /// <param name="sample"></param>
        public void PushSample(MediaSample sample, object objSource)
        {
            if (AudioCodec == null)
                return;

            MediaSample newsample = SendResampler.Resample(sample, AudioCodec.AudioFormat);
            SendAudioQueue.AppendData(newsample.Data);

            if (SendAudioQueue.Size > MaxSendBufferSize)
            {
                SendAudioQueue.GetNSamples(SendAudioQueue.Size - MaxSendBufferSize);
            }
        }
Example #12
0
        public MediaSample PullSample(AudioFormat format, TimeSpan tsDuration)
        {
            if (AudioCodec == null)
                return null;

            int nSamples = AudioCodec.AudioFormat.CalculateNumberOfSamplesForDuration(tsDuration);
            int nBytesNeeded = nSamples * AudioCodec.AudioFormat.BytesPerSample;

            /// Greater than 4 samples in our buffer, remove some
            if (ReceiveAudioQueue.Size > nBytesNeeded * AudioCodec.AudioFormat.BytesPerSample)
                ReceiveAudioQueue.GetNSamples(ReceiveAudioQueue.Size - nBytesNeeded*2);

            if (ReceiveAudioQueue.Size >= nBytesNeeded)
            {
                byte [] bAudioData = ReceiveAudioQueue.GetNSamples(nBytesNeeded);

                /// Incoming RTP packets' audio data is in the codecs native format, we may need to resample for our host (Our windows muxer always expects 16x16, so ulaw must be resampled)
                MediaSample currentsample = new MediaSample(bAudioData, AudioCodec.AudioFormat);

                MediaSample newsample = RecvResampler.Resample(currentsample, format);

                return newsample;
            }

            return null;
        }
Example #13
0
 /// <summary>
 ///  Push to our no where
 /// </summary>
 /// <param name="sample"></param>
 public void PushSample(MediaSample sample, object objSource)
 {
 }