Exemple #1
0
 private void CreateStream(int inputChannels, int outputChannels, int lengthInSeconds,
                           out IAudioStream sourceStream, out MonoStream monoStream)
 {
     sourceStream = new NullStream(
         new AudioProperties(inputChannels, 44100, 32, AudioFormat.IEEE),
         44100 * inputChannels * 4 /*bytesPerSample*/ * lengthInSeconds);
     monoStream = new MonoStream(sourceStream, outputChannels);
 }
Exemple #2
0
 protected IAudioStream PrepareStream(IAudioStream stream) {
     if (stream.Properties.Channels > 1) {
         stream = new MonoStream(stream);
     }
     if (stream.Properties.SampleRate != FrameReader.SAMPLERATE) {
         stream = new ResamplingStream(stream, ResamplingQuality.Medium, FrameReader.SAMPLERATE);
     }
     return stream;
 }
Exemple #3
0
        public void Generate()
        {
            IAudioStream audioStream = inputTrack.File ?
                                       AudioStreamFactory.FromFileInfoIeee32(inputTrack.FileInfo) :
                                       inputTrack.Stream;

            audioStream = new MonoStream(audioStream);
            audioStream = new ResamplingStream(audioStream, ResamplingQuality.Medium, profile.SampleRate);

            STFT stft = new STFT(audioStream, profile.FrameSize, profile.FrameStep, WindowType.Hann, STFT.OutputFormat.Decibel, this.bufferSize);

            index   = 0;
            indices = stft.WindowCount;

            frameBuffer = new float[profile.FrameSize / 2];
            List <SubFingerprint> subFingerprints = new List <SubFingerprint>();

            while (stft.HasNext())
            {
                // Get FFT spectrum
                stft.ReadFrame(frameBuffer);

                // Sum FFT bins into target frequency bands
                profile.MapFrequencies(frameBuffer, bands);

                CalculateSubFingerprint(bandsPrev, bands, subFingerprints);

                CommonUtil.Swap <float[]>(ref bands, ref bandsPrev);
                index++;

                // Output subfingerprints every once in a while
                if (index % this.eventInterval == 0 && SubFingerprintsGenerated != null)
                {
                    SubFingerprintsGenerated(this, new SubFingerprintsGeneratedEventArgs(inputTrack, subFingerprints, index, indices));
                    subFingerprints.Clear();
                }
            }

            // Output remaining subfingerprints
            if (SubFingerprintsGenerated != null)
            {
                SubFingerprintsGenerated(this, new SubFingerprintsGeneratedEventArgs(inputTrack, subFingerprints, index, indices));
            }

            if (Completed != null)
            {
                Completed(this, EventArgs.Empty);
            }

            audioStream.Close();
        }
Exemple #4
0
 public static IAudioStream PrepareStream(IAudioStream stream, int sampleRate)
 {
     if (stream.Properties.Format != AudioFormat.IEEE)
     {
         stream = new IeeeStream(stream);
     }
     if (stream.Properties.Channels > 1)
     {
         stream = new MonoStream(stream);
     }
     if (stream.Properties.SampleRate != 11050)
     {
         stream = new ResamplingStream(stream, ResamplingQuality.Medium, sampleRate);
     }
     return(stream);
 }
Exemple #5
0
        private void Window_Loaded(object sender, RoutedEventArgs e)
        {
            audioOutput = new WasapiOut(global::NAudio.CoreAudioApi.AudioClientShareMode.Shared, true, 10);
            mixer       = new MixerStream(2, 44100);
            MonoStream mono = new MonoStream(mixer);

            Streams.ResamplingStream resampler  = new Streams.ResamplingStream(mono, ResamplingQuality.VariableRate, 44100);
            NAudioSinkStream         naudioSink = new NAudioSinkStream(resampler);

            audioOutput.Init(naudioSink);

            mixingSampleRateLabel.Content   = mixer.Properties.SampleRate;
            playbackSampleRateLabel.Content = audioOutput.OutputWaveFormat.SampleRate;

            sliderSampleRate.ValueChanged += new RoutedPropertyChangedEventHandler <double>(delegate(object s2, RoutedPropertyChangedEventArgs <double> e2) {
                if (resampler.CheckTargetSampleRate(sliderSampleRate.Value))
                {
                    resampler.TargetSampleRate = sliderSampleRate.Value;
                }
            });
        }
Exemple #6
0
        private void button1_Click(object sender, RoutedEventArgs e)
        {
            Microsoft.Win32.OpenFileDialog dlg = new Microsoft.Win32.OpenFileDialog();
            dlg.DefaultExt = ".wav";
            dlg.Filter     = "Wave files|*.wav";

            if (dlg.ShowDialog() == true)
            {
                WaveFileReader reader = new WaveFileReader(dlg.FileName);

                NAudioSourceStream nAudioSource = new NAudioSourceStream(reader);

                IeeeStream ieee = new IeeeStream(nAudioSource);

                MonoStream       mono = new MonoStream(ieee);
                ResamplingStream res  = new ResamplingStream(mono, ResamplingQuality.Medium, 22050);

                NAudioSinkStream nAudioSink = new NAudioSinkStream(res);

                WaveFileWriter.CreateWaveFile(dlg.FileName + ".processed.wav", nAudioSink);
            }
        }
Exemple #7
0
        private void AddTrack(AudioTrack audioTrack)
        {
            if (audioTrack.SourceProperties.SampleRate > audioMixer.SampleRate)
            {
                // The newly added track has a higher samplerate than the current tracks, so we adjust
                // the processing samplerate to the highest rate
                ChangeMixingSampleRate(audioTrack.SourceProperties.SampleRate);
            }

            IAudioStream input        = audioTrack.CreateAudioStream();
            IAudioStream baseStream   = new TolerantStream(new BufferedStream(input, 1024 * 256 * input.SampleBlockSize, true));
            OffsetStream offsetStream = new OffsetStream(baseStream)
            {
                Offset = TimeUtil.TimeSpanToBytes(audioTrack.Offset, baseStream.Properties)
            };

            audioTrack.OffsetChanged += new EventHandler <ValueEventArgs <TimeSpan> >(
                delegate(object sender, ValueEventArgs <TimeSpan> e) {
                offsetStream.Offset = TimeUtil.TimeSpanToBytes(e.Value, offsetStream.Properties);
                audioMixer.UpdateLength();
            });

            // Upmix mono inputs to dual channel stereo or downmix surround to allow channel balancing
            // TODO add better multichannel stream support and allow balancing of surround
            IAudioStream mixToStereoStream = offsetStream;

            if (mixToStereoStream.Properties.Channels == 1)
            {
                mixToStereoStream = new MonoStream(mixToStereoStream, 2);
            }
            else if (mixToStereoStream.Properties.Channels > 2)
            {
                mixToStereoStream = new SurroundDownmixStream(mixToStereoStream);
            }

            // control the track phase
            PhaseInversionStream phaseInversion = new PhaseInversionStream(mixToStereoStream)
            {
                Invert = audioTrack.InvertedPhase
            };

            MonoStream monoStream = new MonoStream(phaseInversion, phaseInversion.Properties.Channels)
            {
                Downmix = audioTrack.MonoDownmix
            };

            // necessary to control each track individually
            VolumeControlStream volumeControl = new VolumeControlStream(monoStream)
            {
                Mute    = audioTrack.Mute,
                Volume  = audioTrack.Volume,
                Balance = audioTrack.Balance
            };

            // when the AudioTrack.Mute property changes, just set it accordingly on the audio stream
            audioTrack.MuteChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                volumeControl.Mute = ve.Value;
            });

            // when the AudioTrack.Solo property changes, we have to react in different ways:
            audioTrack.SoloChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                AudioTrack senderTrack  = (AudioTrack)vsender;
                bool isOtherTrackSoloed = false;

                foreach (AudioTrack vaudioTrack in trackList)
                {
                    if (vaudioTrack != senderTrack && vaudioTrack.Solo)
                    {
                        isOtherTrackSoloed = true;
                        break;
                    }
                }

                /* if there's at least one other track that is soloed, we set the mute property of
                 * the current track to the opposite of the solo property:
                 * - if the track is soloed, we unmute it
                 * - if the track is unsoloed, we mute it
                 */
                if (isOtherTrackSoloed)
                {
                    senderTrack.Mute = !ve.Value;
                }

                /* if this is the only soloed track, we mute all other tracks
                 * if this track just got unsoloed, we unmute all other tracks
                 */
                else
                {
                    foreach (AudioTrack vaudioTrack in trackList)
                    {
                        if (vaudioTrack != senderTrack && !vaudioTrack.Solo)
                        {
                            vaudioTrack.Mute = ve.Value;
                        }
                    }
                }
            });

            // when the AudioTrack.Volume property changes, just set it accordingly on the audio stream
            audioTrack.VolumeChanged += new EventHandler <ValueEventArgs <float> >(
                delegate(object vsender, ValueEventArgs <float> ve) {
                volumeControl.Volume = ve.Value;
            });

            audioTrack.BalanceChanged += new EventHandler <ValueEventArgs <float> >(
                delegate(object vsender, ValueEventArgs <float> ve) {
                volumeControl.Balance = ve.Value;
            });

            audioTrack.InvertedPhaseChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                phaseInversion.Invert = ve.Value;
            });
            audioTrack.MonoDownmixChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                monoStream.Downmix = ve.Value;
            });

            // adjust sample rate to mixer output rate
            ResamplingStream resamplingStream = new ResamplingStream(volumeControl,
                                                                     ResamplingQuality.Medium, audioMixer.Properties.SampleRate);

            IAudioStream trackStream = resamplingStream;

            if (trackStream.Properties.Channels == 1 && audioMixer.Properties.Channels > 1)
            {
                trackStream = new MonoStream(trackStream, audioMixer.Properties.Channels);
            }

            audioMixer.Add(trackStream);
            trackListStreams.Add(audioTrack, trackStream);
        }