Пример #1
0
        public MixedAudioProvider(params NAudioProvider[] AudioProviders)
        {
            foreach (var provider in AudioProviders)
            {
                var bufferedProvider = new BufferedWaveProvider(provider.NAudioWaveFormat)
                {
                    DiscardOnBufferOverflow = true
                };

                provider.DataAvailable += (S, E) =>
                {
                    bufferedProvider.AddSamples(E.Buffer, 0, E.Length);
                };

                var sampleProvider = bufferedProvider.ToSampleProvider();

                var providerWf = provider.WaveFormat;

                // Mono to Stereo
                if (providerWf.Channels == 1)
                {
                    sampleProvider = sampleProvider.ToStereo();
                }

                // Resample
                if (providerWf.SampleRate != WaveFormat.SampleRate)
                {
                    sampleProvider = new WdlResamplingSampleProvider(sampleProvider, WaveFormat.SampleRate);
                }

                _audioProviders.Add(provider, sampleProvider);
            }

            if (_audioProviders.Count == 1)
            {
                _mixingWaveProvider = _audioProviders
                                      .Values
                                      .First()
                                      .ToWaveProvider16();
            }
            else
            {
                var mixingSampleProvider = new MixingSampleProvider(_audioProviders.Values);

                // Screna expects 44.1 kHz 16-bit Stereo
                _mixingWaveProvider = mixingSampleProvider.ToWaveProvider16();
            }

            var bufferSize = (int)
                             (
                (ReadInterval / 1000.0)
                * WaveFormat.SampleRate
                * WaveFormat.Channels
                * (WaveFormat.BitsPerSample / 8.0)
                             );

            _buffer = new byte[bufferSize];

            Task.Factory.StartNew(Loop, TaskCreationOptions.LongRunning);
        }
Пример #2
0
        private void waveIn_DataAvailable(object sender, WaveInEventArgs e)
        {
            byte[] liveBytes = AdjustAudioLevelDB(e.Buffer, sendGain);
            liveInput.AddSamples(liveBytes, 0, e.BytesRecorded);
            byte[] mixed = new byte[e.BytesRecorded];
            MixingSampleProvider.ToWaveProvider16().Read(mixed, 0, e.BytesRecorded);
            byte[] soundBuffer = new byte[e.BytesRecorded + notEncodedBuffer.Length];
            for (int i = 0; i < notEncodedBuffer.Length; i++)
            {
                soundBuffer[i] = notEncodedBuffer[i];
            }
            for (int i = 0; i < e.BytesRecorded; i++)
            {
                soundBuffer[i + notEncodedBuffer.Length] = mixed[i];
            }

            sendAudiolevel = AudioLevelDB(soundBuffer);
            int byteCap         = bytesPerSegment;
            int segmentCount    = (int)Math.Floor((decimal)soundBuffer.Length / byteCap);
            int segmentsEnd     = segmentCount * byteCap;
            int notEncodedCount = soundBuffer.Length - segmentsEnd;

            notEncodedBuffer = new byte[notEncodedCount];

            for (int i = 0; i < notEncodedCount; i++)
            {
                notEncodedBuffer[i] = soundBuffer[segmentsEnd + i];
            }
            for (int i = 0; i < segmentCount; i++)
            {
                byte[] segment = new byte[byteCap];
                for (int j = 0; j < segment.Length; j++)
                {
                    segment[j] = soundBuffer[(i * byteCap) + j];
                }
                int    len;
                byte[] buff    = encoder.Encode(segment, segment.Length, out len);
                byte[] newbuff = new byte[++len];
                newbuff[0] = 16;
                for (int j = 1; j < newbuff.Length; j++)
                {
                    newbuff[j] = buff[j - 1];
                }
                session.SendAudioFrame((uint)segmentFrames, 21, newbuff);
                playersForm.IsEncoding = true;
            }
            if (playBuffer.BufferedDuration.TotalMilliseconds > 10)
            {
                button1.ForeColor = Color.Black;
                button1.BackColor = Color.Green;
            }
            else
            {
                button1.ForeColor = Color.White;
                button1.BackColor = Color.Red;
            }
        }
Пример #3
0
        static void Main(string[] args)
        {
            foreach (var d in CaptureDeviceList.Instance)
            {
                d.OnPacketArrival += OnPacketArrival;
                d.Open();
                d.Filter = string.Format("port {0}", port); // captures all incoming and outgoing on this port
                d.StartCapture();
            }

            Console.WriteLine("Press Enter to stop...");
            Console.ReadLine();

            // grab any straggler bytes
            byte[] buffer = new byte[new[] { incomingWaveProvider.BufferedBytes, outgoingWaveProvider.BufferedBytes }.Max()];
            streamProvider.ToWaveProvider16().Read(buffer, 0, buffer.Length);
            record.Write(buffer, 0, buffer.Length);
            record.Dispose();
        }
        private void OnVoiceMessage(object sender, MessageWebSocketMessageReceivedEventArgs messageEventArgs)
        {
            using (var reader = messageEventArgs.GetDataStream())
                using (var stream = reader.AsStreamForRead())
                    using (var mStream = new MemoryStream())
                    {
                        var bufferSize = 32000;
                        var bytes      = new List <byte>();
                        var buf        = new byte[bufferSize];
                        var length     = stream.Read(buf, 0, buf.Length);
                        while (length - bufferSize == 0)
                        {
                            bytes.AddRange(buf);
                            length = stream.Read(buf, 0, buf.Length);
                        }
                        if (length > 0)
                        {
                            bytes.AddRange(buf.Take(length).ToArray());
                        }

                        var fullData = bytes.ToArray();
                        mStream.Write(fullData, 0, fullData.Length);
                        mStream.Position = 0;
                        var bitsPerSampleBytes = fullData.Skip(34).Take(2).ToArray();
                        var channelBytes       = fullData.Skip(22).Take(2).ToArray();
                        var samplingBytes      = fullData.Skip(24).Take(4).ToArray();
                        var bitsPerSample      = BitConverter.ToInt16(bitsPerSampleBytes, 0);
                        var channel            = BitConverter.ToInt16(channelBytes, 0);
                        var samplingRate       = BitConverter.ToInt32(samplingBytes, 0);

                        using (var player = new WasapiOutRT(AudioClientShareMode.Shared, 250))
                        {
                            player.Init(() =>
                            {
                                var waveChannel32 =
                                    new WaveChannel32(new RawSourceWaveStream(mStream,
                                                                              new WaveFormat(samplingRate, bitsPerSample, channel)));
                                var mixer = new MixingSampleProvider(new[] { waveChannel32.ToSampleProvider() });

                                return(mixer.ToWaveProvider16());
                            });

                            player.Play();
                            while (player.PlaybackState == PlaybackState.Playing)
                            {
                            }
                        }
                    }
        }
Пример #5
0
        public MixedAudioProvider(IEnumerable <NAudioProvider> audioProviders)
        {
            foreach (var provider in audioProviders)
            {
                var bufferedProvider = new BufferedWaveProvider(provider.NAudioWaveFormat);

                provider.DataAvailable += (sender, e) =>
                {
                    bufferedProvider.AddSamples(e.Buffer, 0, e.Length);
                };

                var sampleProvider = bufferedProvider.ToSampleProvider();

                var providerWf = provider.WaveFormat;

                // Mono to Stereo
                if (providerWf.Channels == 1)
                {
                    sampleProvider = sampleProvider.ToStereo();
                }

                // Resample
                if (providerWf.SampleRate != WaveFormat.SampleRate)
                {
                    sampleProvider = new WdlResamplingSampleProvider(sampleProvider, WaveFormat.SampleRate);
                }

                _audioProviders.Add(provider, sampleProvider);
            }

            var mixingSampleProvider = new MixingSampleProvider(_audioProviders.Values);

            // Screna expects 44.1 kHz 16-bit Stereo
            _mixingWaveProvider = mixingSampleProvider.ToWaveProvider16();

            var bufferSize = (int)
                             (
                (ReadInterval / 1000.0)
                * WaveFormat.SampleRate
                * WaveFormat.Channels
                * (WaveFormat.BitsPerSample / 8.0)
                             );

            _buffer = new byte[bufferSize];

            Task.Factory.StartNew(Loop, TaskCreationOptions.LongRunning);
        }
Пример #6
0
        public PlaybackMixer(WaveFormat waveFormat)
        {
            /*
             * var mmFmt = outputDevice.AudioClient.MixFormat;
             * WaveFormat = new WaveFormat(mmFmt.SampleRate, mmFmt.BitsPerSample <= 16 ? mmFmt.BitsPerSample : 16, mmFmt.Channels);
             */
            WaveFormat = waveFormat;

            mixer           = new MixingSampleProvider(WaveFormat.CreateIeeeFloatWaveFormat(WaveFormat.SampleRate, WaveFormat.Channels));
            mixer.ReadFully = true;

            // convert to 32 bit floating point
            bufferStream32 = new Pcm16BitToSampleProvider(mixer.ToWaveProvider16());
            // pass through the effects
            effectStream = new EffectStream(bufferStream32);
            //effectStream.UpdateEffectChain(effects.ToArray());
        }
Пример #7
0
        /// <summary>
        /// Uses the Windows TTS library to get the bytes of a PCM Wave file from a text messages.
        /// </summary>
        /// <param name="message"></param>
        /// <param name="voiceName"></param>
        /// <returns>The way files bytes, or an empty array if something went wrong.</returns>
        public byte[] GenerateRadioMessageWavBytes(string message, string voiceName = null)
        {
            if (string.IsNullOrEmpty(message))
            {
                message = "";                                // Make sure message is not null
            }
            // No voice name provided, use the default voice instead
            if (voiceName == null)
            {
                if (DefaultVoice == null) // Default voice not set/doesn't exist
                {
                    return(new byte[0]);
                }

                voiceName = DefaultVoice;
            }

            try { Reader.SelectVoice(voiceName); }
            catch (Exception) { return(new byte[0]); }

            // Text-to-speech
            MemoryStream ttsStream = new MemoryStream();            // create a new memory stream

            Reader.SetOutputToWaveStream(ttsStream);                // set the stream as output for the TTS reader
            Reader.Volume = 35;
            Reader.Speak(message);                                  // read the text into the stream
            ttsStream.Seek(0, SeekOrigin.Begin);                    // rewind the stream to position 0
            WaveFileReader waveTTS = new WaveFileReader(ttsStream); // read the stream into a WaveFileReader object

            // Mix voice with radio static
            WaveFileReader  waveStatic     = new WaveFileReader($"{HQTools.PATH_MEDIA}RadioMessageGenerator/Loop.wav"); // load the static sound loop
            ISampleProvider providerSpeech = new TTSAMRadioFilter(waveTTS.ToSampleProvider(), FXIntensity * 250);       // get the sample provider for the TTS, apply a radio filter
            ISampleProvider providerStatic = waveStatic.ToSampleProvider();                                             // get the sample provider for the static
            TimeSpan        ttsDuration    = waveTTS.TotalTime;                                                         // get the tts wave duration

            if (ttsDuration < TimeSpan.FromSeconds(MIN_SPEECH_DURATION))
            {
                ttsDuration = TimeSpan.FromSeconds(MIN_SPEECH_DURATION);                                                          // check min value
            }
            if (ttsDuration > TimeSpan.FromSeconds(MAX_SPEECH_DURATION))
            {
                ttsDuration = TimeSpan.FromSeconds(MAX_SPEECH_DURATION);                                                 // check max value
            }
            ISampleProvider[]    sources = new[] { providerSpeech.Take(ttsDuration), providerStatic.Take(ttsDuration) }; // use both providers as source with a duration of ttsDuration
            MixingSampleProvider mixingSampleProvider = new MixingSampleProvider(sources);                               // mix both channels
            IWaveProvider        radioMix             = mixingSampleProvider.ToWaveProvider16();                         // convert the mix output to a PCM 16bit sample provider

            // Concatenate radio in/out sounds
            WaveFileReader waveRadioIn  = new WaveFileReader($"{HQTools.PATH_MEDIA}RadioMessageGenerator/In.wav");  // load the radio in FX
            WaveFileReader waveRadioOut = new WaveFileReader($"{HQTools.PATH_MEDIA}RadioMessageGenerator/Out.wav"); // load the radio out FX

            IWaveProvider[] radioFXParts = new IWaveProvider[] { waveRadioIn, radioMix, waveRadioOut };             // create an array with all 3 parts

            byte[]         buffer         = new byte[1024];                                                         // create a buffer to store wav data to concatenate
            MemoryStream   finalWavStr    = new MemoryStream();                                                     // create a stream for the final concatenated wav
            WaveFileWriter waveFileWriter = null;                                                                   // create a writer to fill the stream

            foreach (IWaveProvider wav in radioFXParts)                                                             // iterate all three parts
            {
                if (waveFileWriter == null)                                                                         // no writer, first part of the array
                {
                    waveFileWriter = new WaveFileWriter(finalWavStr, wav.WaveFormat);                               // create a writer of the proper format
                }
                else if (!wav.WaveFormat.Equals(waveFileWriter.WaveFormat))                                         // else, check the other parts are of the same format
                {
                    continue;                                                                                       // file is not of the proper format
                }
                int read;                                                                                           // bytes read
                while ((read = wav.Read(buffer, 0, buffer.Length)) > 0)                                             // read data from the wave
                {
                    waveFileWriter.Write(buffer, 0, read);
                }                                          // fill the buffer with it
            }

            // Copy the stream to a byte array
            waveFileWriter.Flush();
            finalWavStr.Seek(0, SeekOrigin.Begin);
            byte[] waveBytes = new byte[finalWavStr.Length];
            finalWavStr.Read(waveBytes, 0, waveBytes.Length);

            // Close/dispose of everything
            ttsStream.Close(); ttsStream.Dispose();
            waveTTS.Close(); waveTTS.Dispose();
            waveStatic.Close(); waveStatic.Dispose();
            waveRadioIn.Close(); waveRadioIn.Dispose();
            waveRadioOut.Close(); waveRadioOut.Dispose();
            waveFileWriter.Close(); waveFileWriter.Dispose();
            finalWavStr.Close(); finalWavStr.Dispose();

            // Return the bytes
            return(waveBytes);
        }
        private byte[] DoRadioMix(ISampleProvider voiceProvider, TimeSpan duration)
        {
            // Media files are stored in the Release build directory, so if we're running a Debug build, we have to look for them here.
            string debugPathToRelease = "";

#if DEBUG
            debugPathToRelease = "..\\Release\\";
#endif

            // Mix voice with radio static
            WaveFileReader  waveStatic     = new WaveFileReader(debugPathToRelease + "Media/Loop.wav"); // load the static sound loop
            ISampleProvider providerStatic = waveStatic.ToSampleProvider();                             // get the sample provider for the static

            if (duration < TimeSpan.FromSeconds(MIN_SPEECH_DURATION))
            {
                duration = TimeSpan.FromSeconds(MIN_SPEECH_DURATION);                                                       // check min value
            }
            if (duration > TimeSpan.FromSeconds(MAX_SPEECH_DURATION))
            {
                duration = TimeSpan.FromSeconds(MAX_SPEECH_DURATION);                                             // check max value
            }
            ISampleProvider[]    sources = new[] { voiceProvider.Take(duration), providerStatic.Take(duration) }; // use both providers as source with a duration of ttsDuration
            MixingSampleProvider mixingSampleProvider = new MixingSampleProvider(sources);                        // mix both channels
            IWaveProvider        radioMix             = mixingSampleProvider.ToWaveProvider16();                  // convert the mix output to a PCM 16bit sample provider

            // Concatenate radio in/out sounds
            WaveFileReader  waveRadioIn  = new WaveFileReader(debugPathToRelease + "Media/In.wav");     // load the radio in FX
            WaveFileReader  waveRadioOut = new WaveFileReader(debugPathToRelease + "Media/Out.wav");    // load the radio out FX
            IWaveProvider[] radioFXParts = new IWaveProvider[] { waveRadioIn, radioMix, waveRadioOut }; // create an array with all 3 parts

            byte[]         buffer         = new byte[1024];                                             // create a buffer to store wav data to concatenate
            MemoryStream   finalWavStr    = new MemoryStream();                                         // create a stream for the final concatenated wav
            WaveFileWriter waveFileWriter = null;                                                       // create a writer to fill the stream

            foreach (IWaveProvider wav in radioFXParts)                                                 // iterate all three parts
            {
                if (waveFileWriter == null)                                                             // no writer, first part of the array
                {
                    waveFileWriter = new WaveFileWriter(finalWavStr, wav.WaveFormat);                   // create a writer of the proper format
                }
                else if (!wav.WaveFormat.Equals(waveFileWriter.WaveFormat))                             // else, check the other parts are of the same format
                {
                    continue;                                                                           // file is not of the proper format
                }
                int read;                                                                               // bytes read
                while ((read = wav.Read(buffer, 0, buffer.Length)) > 0)                                 // read data from the wave
                {
                    waveFileWriter.Write(buffer, 0, read);
                }                                          // fill the buffer with it
            }

            // Copy the stream to a byte array
            waveFileWriter.Flush();
            finalWavStr.Seek(0, SeekOrigin.Begin);
            byte[] waveBytes = new byte[finalWavStr.Length];
            finalWavStr.Read(waveBytes, 0, waveBytes.Length);

            // Close/dispose of everything
            waveStatic.Close(); waveStatic.Dispose();
            waveRadioIn.Close(); waveRadioIn.Dispose();
            waveRadioOut.Close(); waveRadioOut.Dispose();
            waveFileWriter.Close(); waveFileWriter.Dispose();
            finalWavStr.Close(); finalWavStr.Dispose();

            // Return the bytes
            return(waveBytes);
        }