예제 #1
0
        /// <summary>
        /// Allocate storage for all buffers.  This should be called after CreateBufferIndex,
        /// after SetMediaTypes and before Start.  Also create the audio mixer here.
        /// </summary>
        /// <returns></returns>
        public bool CreateBuffers()
        {
            if ((audioMediaType == null) || (videoMediaType == null))
            {
                return(false);
            }

            UW.CSE.MDShow.MediaTypeWaveFormatEx wf = audioMediaType.ToMediaTypeWaveFormatEx();
            audioMixer = new AudioMixer(wf.WaveFormatEx.BitsPerSample, wf.WaveFormatEx.AvgBytesPerSec, wf.WaveFormatEx.Channels);

            if (videoBuffer != null)
            {
                if (!videoBuffer.Create(videoMediaType))
                {
                    return(false);
                }
            }
            else
            {
                return(false);
            }

            lock (this)
            {
                foreach (AudioBuffer ab in audioBuffers.Values)
                {
                    if (!ab.Create())
                    {
                        return(false);
                    }
                }
            }
            return(true);
        }
예제 #2
0
        /// <summary>
        /// Allocates the storage used by the buffer.  After this method call
        /// the buffer is ready to be started.
        /// </summary>
        /// <param name="mt"></param>
        /// <returns></returns>
        public bool Create()
        {
            Debug.Assert(myMediaType != null);

            UW.CSE.MDShow.MediaTypeWaveFormatEx wf = myMediaType.ToMediaTypeWaveFormatEx();
            uint bytesPerSec = wf.WaveFormatEx.AvgBytesPerSec;

            currentChannels = wf.WaveFormatEx.Channels;

            if ((bytesPerSec == 0) || (SIZE_IN_SECONDS == 0))
            {
                return(false);
            }

            //Use about 1/4 second dummy audio sample to correct for lost audio data, or to resync.
            // This has to be evenly divisible by the BlockAlign value which we assume to be 2 or 4.
            // If we assume 4, it works for 2 as well.
            SilenceSize = (uint)(bytesPerSec / 16) * 4;

            Buffer        = new BufferChunk((int)bytesPerSec * SIZE_IN_SECONDS);
            Buffer.Length = (int)bytesPerSec * SIZE_IN_SECONDS;

            PresTime    = new ulong[SIZE_IN_SECONDS];
            QuietBuffer = new byte[SilenceSize];

            AddAudio       = 0;
            SkipAudio      = 0;
            SampleSize     = bytesPerSec;
            SampleCount    = SIZE_IN_SECONDS;
            WriteOffset    = 0;
            ReadOffset     = 0;
            SamplesReady   = 0;
            BufferSize     = (uint)bytesPerSec * SIZE_IN_SECONDS;
            BufferPos      = 0;
            LeftoverBytes  = 0;
            samplesWritten = 0;
            streamStopTime = 0;
            bytesBuffered  = 0;
            started        = false;
            sampleReceived = false;
            return(true);
        }