Example #1
0
 internal byte[] ConvertSamples(byte[] pvInSamples)
 {
     short[] data  = null;
     short[] array = AudioFormatConverter.Convert(pvInSamples, _iInFormatType, AudioCodec.PCM16);
     if (_inWavFormat.nChannels == 2 && _outWavFormat.nChannels == 1)
     {
         data = Resample(_inWavFormat, _outWavFormat, Stereo2Mono(array), _leftMemory);
     }
     else if (_inWavFormat.nChannels == 1 && _outWavFormat.nChannels == 2)
     {
         data = Mono2Stereo(Resample(_inWavFormat, _outWavFormat, array, _leftMemory));
     }
     if (_inWavFormat.nChannels == 2 && _outWavFormat.nChannels == 2)
     {
         if (_inWavFormat.nSamplesPerSec != _outWavFormat.nSamplesPerSec)
         {
             short[] leftSamples;
             short[] rightSamples;
             SplitStereo(array, out leftSamples, out rightSamples);
             data = MergeStereo(Resample(_inWavFormat, _outWavFormat, leftSamples, _leftMemory), Resample(_inWavFormat, _outWavFormat, rightSamples, _rightMemory));
         }
         else
         {
             data = array;
         }
     }
     if (_inWavFormat.nChannels == 1 && _outWavFormat.nChannels == 1)
     {
         data = Resample(_inWavFormat, _outWavFormat, array, _leftMemory);
     }
     _eChunkStatus = Block.Middle;
     return(AudioFormatConverter.Convert(data, AudioCodec.PCM16, _iOutFormatType));
 }
Example #2
0
        internal bool PrepareConverter(ref WAVEFORMATEX inWavFormat, ref WAVEFORMATEX outWavFormat)
        {
            bool result = true;

            if (inWavFormat.nSamplesPerSec <= 0 || inWavFormat.nChannels > 2 || inWavFormat.nChannels <= 0 || outWavFormat.nChannels <= 0 || outWavFormat.nSamplesPerSec <= 0 || outWavFormat.nChannels > 2)
            {
                throw new FormatException();
            }
            _iInFormatType  = AudioFormatConverter.TypeOf(inWavFormat);
            _iOutFormatType = AudioFormatConverter.TypeOf(outWavFormat);
            if (_iInFormatType < AudioCodec.G711U || _iOutFormatType < AudioCodec.G711U)
            {
                throw new FormatException();
            }
            if (outWavFormat.nSamplesPerSec == inWavFormat.nSamplesPerSec && _iOutFormatType == _iInFormatType && outWavFormat.nChannels == inWavFormat.nChannels)
            {
                result = false;
            }
            else
            {
                if (inWavFormat.nSamplesPerSec != outWavFormat.nSamplesPerSec)
                {
                    CreateResamplingFilter(inWavFormat.nSamplesPerSec, outWavFormat.nSamplesPerSec);
                }
                _inWavFormat  = inWavFormat;
                _outWavFormat = outWavFormat;
            }
            return(result);
        }
Example #3
0
        /// <summary>
        ///   Description:
        ///   first read samples into VAPI_PCM16, then judge cases :
        ///   1. STEREO -> mono + resampling
        ///      STEREO  -> 1 mono -> reSampling
        ///   2. mono  -> STEREO + resampling
        ///      mono   -> reSampling -> STEREO
        ///   3. STEREO  -> STEREO + resampling
        ///      STEREO  -> 2 MONO - > reSampling -> 2 MONO -> STEREO
        ///   4. mono  -> mono + resampling
        ///      mono  -> reSampling -> mono
        /// </summary>
        internal byte[] ConvertSamples(byte[] pvInSamples)
        {
            short[] pnBuff = null;

            //--- Convert samples to VAPI_PCM16
            short[] inSamples = AudioFormatConverter.Convert(pvInSamples, _iInFormatType, AudioCodec.PCM16);

            //--- case 1
            if (_inWavFormat.nChannels == 2 && _outWavFormat.nChannels == 1)
            {
                pnBuff = Resample(_inWavFormat, _outWavFormat, Stereo2Mono(inSamples), _leftMemory);
            }

            //--- case 2
            else if (_inWavFormat.nChannels == 1 && _outWavFormat.nChannels == 2)
            {
                //--- resampling
                pnBuff = Mono2Stereo(Resample(_inWavFormat, _outWavFormat, inSamples, _leftMemory));
            }

            //--- case 3
            if (_inWavFormat.nChannels == 2 && _outWavFormat.nChannels == 2)
            {
                if (_inWavFormat.nSamplesPerSec != _outWavFormat.nSamplesPerSec)
                {
                    short[] leftChannel;
                    short[] rightChannel;
                    SplitStereo(inSamples, out leftChannel, out rightChannel);
                    pnBuff = MergeStereo(Resample(_inWavFormat, _outWavFormat, leftChannel, _leftMemory), Resample(_inWavFormat, _outWavFormat, rightChannel, _rightMemory));
                }
                else
                {
                    pnBuff = inSamples;
                }
            }

            //--- case 4
            if (_inWavFormat.nChannels == 1 && _outWavFormat.nChannels == 1)
            {
                pnBuff = Resample(_inWavFormat, _outWavFormat, inSamples, _leftMemory);
            }

            _eChunkStatus = Block.Middle;
            //---Convert samples to output format
            return(AudioFormatConverter.Convert(pnBuff, AudioCodec.PCM16, _iOutFormatType));
        }
Example #4
0
        /// <summary>
        ///   Description:
        ///   first read samples into VAPI_PCM16, then judge cases :
        ///   1. STEREO -> mono + resampling
        ///      STEREO  -> 1 mono -> reSampling
        ///   2. mono  -> STEREO + resampling
        ///      mono   -> reSampling -> STEREO
        ///   3. STEREO  -> STEREO + resampling
        ///      STEREO  -> 2 MONO - > reSampling -> 2 MONO -> STEREO
        ///   4. mono  -> mono + resampling
        ///      mono  -> reSampling -> mono
        /// </summary>
        internal bool PrepareConverter(ref WAVEFORMATEX inWavFormat, ref WAVEFORMATEX outWavFormat)
        {
            bool convert = true;

            // Check if we can deal with the format
            if (!(inWavFormat.nSamplesPerSec > 0 && inWavFormat.nChannels <= 2 && inWavFormat.nChannels > 0 && outWavFormat.nChannels > 0 && outWavFormat.nSamplesPerSec > 0 && outWavFormat.nChannels <= 2))
            {
                throw new FormatException();
            }

            _iInFormatType  = AudioFormatConverter.TypeOf(inWavFormat);
            _iOutFormatType = AudioFormatConverter.TypeOf(outWavFormat);
            if (_iInFormatType < 0 || _iOutFormatType < 0)
            {
                throw new FormatException();
            }

            // Check if Format in == Format out
            if (outWavFormat.nSamplesPerSec == inWavFormat.nSamplesPerSec && _iOutFormatType == _iInFormatType && outWavFormat.nChannels == inWavFormat.nChannels)
            {
                convert = false;
            }
            else
            {
                //--- need reset filter
                if (inWavFormat.nSamplesPerSec != outWavFormat.nSamplesPerSec)
                {
                    CreateResamplingFilter(inWavFormat.nSamplesPerSec, outWavFormat.nSamplesPerSec);
                }

                // Keep a reference to the WaveHeaderformat
                _inWavFormat  = inWavFormat;
                _outWavFormat = outWavFormat;
            }
            return(convert);
        }