/// <summary> /// Constructor /// </summary> /// <param name="input">WaveChannel32 stream used for processor stream input</param> public WaveStreamProcessor(WaveChannel32 input) { inputStr = input; st = new SoundTouch(); st.SetChannels((uint)input.WaveFormat.Channels); st.SetSampleRate((uint)input.WaveFormat.SampleRate); }
public AudioStreamModifier(ISampleProvider sample, double rateMult, int pitchDelta) { _sample = sample; WaveFormat = _sample.WaveFormat; _soundTouch = new SoundTouch<float, double>(); channelCount = sample.WaveFormat.Channels; _soundTouch.SetSampleRate(sample.WaveFormat.SampleRate); _soundTouch.SetChannels(channelCount); rateMult = (rateMult - 1) * 100; _soundTouch.SetTempoChange(rateMult); _soundTouch.SetPitchSemiTones(pitchDelta*0.25f); _soundTouch.SetRateChange(1.0f); _soundTouch.SetSetting(SettingId.UseQuickseek, 1); _soundTouch.SetSetting(SettingId.UseAntiAliasFilter, 1); _soundTouch.SetSetting(SettingId.SequenceDurationMs, 40); _soundTouch.SetSetting(SettingId.SeekwindowDurationMs, 15); _soundTouch.SetSetting(SettingId.OverlapDurationMs, 8); sourceReadBuffer = new float[(WaveFormat.SampleRate * channelCount * readDurationMilliseconds) / 1000]; soundTouchReadBuffer = new float[sourceReadBuffer.Length * 10]; // support down to 0.1 speed }
public AudioStreamModifier(ISampleProvider sample, double rateMult, int pitchDelta) { _sample = sample; WaveFormat = _sample.WaveFormat; _soundTouch = new SoundTouch <float, double>(); channelCount = sample.WaveFormat.Channels; _soundTouch.SetSampleRate(sample.WaveFormat.SampleRate); _soundTouch.SetChannels(channelCount); rateMult = (rateMult - 1) * 100; _soundTouch.SetTempoChange(rateMult); _soundTouch.SetPitchSemiTones(pitchDelta * 0.25f); _soundTouch.SetRateChange(1.0f); _soundTouch.SetSetting(SettingId.UseQuickseek, 1); _soundTouch.SetSetting(SettingId.UseAntiAliasFilter, 1); _soundTouch.SetSetting(SettingId.SequenceDurationMs, 40); _soundTouch.SetSetting(SettingId.SeekwindowDurationMs, 15); _soundTouch.SetSetting(SettingId.OverlapDurationMs, 8); sourceReadBuffer = new float[(WaveFormat.SampleRate * channelCount * readDurationMilliseconds) / 1000]; soundTouchReadBuffer = new float[sourceReadBuffer.Length * 10]; // support down to 0.1 speed }
/// <summary> /// Constructor /// </summary> /// <param name="inputWC32">WaveChannel32 stream used for processor stream input</param> public JumpCutterStreamProcessor(WaveChannel32 inputWC32, ref Options options) { this.inputWC32 = inputWC32; st = new SoundTouch <float, double>(); st.SetChannels(inputWC32.WaveFormat.Channels); st.SetSampleRate(inputWC32.WaveFormat.SampleRate); bytebuffer = new byte[options.samplesPerFrame * 4]; floatbuffer = new float[options.samplesPerFrame]; this.options = options; }
public JumpCutterStreamProcessor(AudioFileReader inputAFR, ref Options options) { this.inputAFR = inputAFR; st = new SoundTouch <float, double>(); st.SetChannels(inputAFR.WaveFormat.Channels); st.SetSampleRate(inputAFR.WaveFormat.SampleRate); bytebuffer = new byte[options.samplesPerFrame * 4]; floatbuffer = new float[options.samplesPerFrame]; this.options = options; framesToRender = new List <int>(); }
/// <summary> /// Sets the <c>SoundTouch</c> object up according to input file sound format & command line parameters /// </summary> private static void Setup(SoundTouch <TSampleType, TLongSampleType> pSoundTouch, WavInFile inFile, RunParameters parameters) { int sampleRate = inFile.GetSampleRate(); int channels = inFile.GetNumChannels(); pSoundTouch.SetSampleRate(sampleRate); pSoundTouch.SetChannels(channels); pSoundTouch.SetTempoChange(parameters.TempoDelta); pSoundTouch.SetPitchSemiTones(parameters.PitchDelta); pSoundTouch.SetRateChange(parameters.RateDelta); pSoundTouch.SetSetting(SettingId.UseQuickseek, parameters.Quick); pSoundTouch.SetSetting(SettingId.UseAntiAliasFilter, (parameters.NoAntiAlias == 1) ? 0 : 1); if (parameters.Speech) { // use settings for speech processing pSoundTouch.SetSetting(SettingId.SequenceDurationMs, 40); pSoundTouch.SetSetting(SettingId.SeekwindowDurationMs, 15); pSoundTouch.SetSetting(SettingId.OverlapDurationMs, 8); Console.Error.WriteLine("Tune processing parameters for speech processing."); } // print processing information if (parameters.OutFileName != null) { #if SOUNDTOUCH_INTEGER_SAMPLES Console.Error.WriteLine("Uses 16bit integer sample type in processing.\n"); #else #if !SOUNDTOUCH_FLOAT_SAMPLES #error "Sampletype not defined" #endif Console.Error.WriteLine("Uses 32bit floating point sample type in processing.\n"); #endif // print processing information only if outFileName given i.e. some processing will happen Console.Error.WriteLine("Processing the file with the following changes:"); Console.Error.WriteLine(" tempo change = {0:0.00} %", parameters.TempoDelta); Console.Error.WriteLine(" pitch change = {0} semitones", parameters.PitchDelta); Console.Error.WriteLine(" rate change = {0:0.00} %\n", parameters.RateDelta); Console.Error.Write("Working..."); } else { // outFileName not given Console.Error.WriteLine("Warning: output file name missing, won't output anything.\n"); } Console.Error.Flush(); }
//private static int Main(string[] args) //{ // var parameters = new RunParameters(); // parameters.TempoDelta = 50; // parameters.PitchDelta = 5; // parameters.Speech = true; // var inBytes = System.IO.File.ReadAllBytes("Test.wav"); // var inStream = new MemoryStream(inBytes); // var outStream = new MemoryStream(); // Process(inStream, outStream, parameters); // var outBytes = outStream.ToArray(); // File.WriteAllBytes("Test_Pith_Tempo_ms.wav", outBytes); // return 0; //} public static void Process(Stream inStream, Stream outStream, RunParameters parameters) { var soundTouch = new SoundTouch <TSampleType, TLongSampleType>(); // Open Input file. var inFile = new WavInFile(inStream); //var inFile = new WavInFile("Test.wav"); int bits = inFile.GetNumBits(); int samplerate = inFile.GetSampleRate(); int channels = inFile.GetNumChannels(); // Open output file. var outFile = new WavOutFile(outStream, samplerate, bits, channels); //var outFile = new WavOutFile("Test_Pith_Tempo.wav", samplerate, bits, channels); if (parameters.DetectBpm) { // detect sound BPM (and adjust processing parameters // accordingly if necessary) DetectBpm(inFile, parameters); } // Setup the 'SoundTouch' object for processing the sound int sampleRate = inFile.GetSampleRate(); soundTouch.SetSampleRate(sampleRate); soundTouch.SetChannels(channels); soundTouch.SetTempoChange(parameters.TempoDelta); soundTouch.SetPitchSemiTones(parameters.PitchDelta); soundTouch.SetRateChange(parameters.RateDelta); soundTouch.SetSetting(SettingId.UseQuickseek, parameters.Quick); soundTouch.SetSetting(SettingId.UseAntiAliasFilter, (parameters.NoAntiAlias == 1) ? 0 : 1); if (parameters.Speech) { // use settings for speech processing soundTouch.SetSetting(SettingId.SequenceDurationMs, 40); soundTouch.SetSetting(SettingId.SeekwindowDurationMs, 15); soundTouch.SetSetting(SettingId.OverlapDurationMs, 8); } // Process the sound Process(soundTouch, inFile, outFile); if (inFile != null) { inFile.Dispose(); } if (outFile != null) { outFile.Dispose(); } }
public VarispeedSampleProvider(ISampleProvider sourceProvider, int readDurationMilliseconds, SoundTouchProfile soundTouchProfile) { soundTouch = new SoundTouch(); // explore what the default values are before we change them: //Debug.WriteLine(String.Format("SoundTouch Version {0}", soundTouch.VersionString)); //Debug.WriteLine("Use QuickSeek: {0}", soundTouch.GetUseQuickSeek()); //Debug.WriteLine("Use AntiAliasing: {0}", soundTouch.GetUseAntiAliasing()); SetSoundTouchProfile(soundTouchProfile); this.sourceProvider = sourceProvider; soundTouch.SetSampleRate(WaveFormat.SampleRate); channelCount = WaveFormat.Channels; soundTouch.SetChannels(channelCount); sourceReadBuffer = new float[(WaveFormat.SampleRate * channelCount * (long)readDurationMilliseconds) / 1000]; soundTouchReadBuffer = new float[sourceReadBuffer.Length * 10]; // support down to 0.1 speed }
/// <summary> /// Initializes the audio renderer. /// Call the Play Method to start reading samples. /// </summary> private void Initialize() { Destroy(); // Release the audio device always upon exiting if (Application.Current is Application app) { app.Dispatcher?.BeginInvoke(new Action(() => { app.Exit += OnApplicationExit; })); } // Enumerate devices. The default device is the first one so we check // that we have more than 1 device (other than the default stub) var hasAudioDevices = MediaElement.RendererOptions.UseLegacyAudioOut ? LegacyAudioPlayer.EnumerateDevices().Count > 1 : DirectSoundPlayer.EnumerateDevices().Count > 1; // Check if we have an audio output device. if (hasAudioDevices == false) { WaitForReadyEvent.Complete(); HasFiredAudioDeviceStopped = true; this.LogWarning(Aspects.AudioRenderer, "No audio device found for output."); return; } // Initialize the SoundTouch Audio Processor (if available) AudioProcessor = (SoundTouch.IsAvailable == false) ? null : new SoundTouch(); if (AudioProcessor != null) { AudioProcessor.SetChannels(Convert.ToUInt32(WaveFormat.Channels)); AudioProcessor.SetSampleRate(Convert.ToUInt32(WaveFormat.SampleRate)); } // Initialize the Audio Device AudioDevice = MediaElement.RendererOptions.UseLegacyAudioOut ? new LegacyAudioPlayer(this, MediaElement.RendererOptions.LegacyAudioDevice?.DeviceId ?? -1) as IWavePlayer : new DirectSoundPlayer(this, MediaElement.RendererOptions.DirectSoundDevice?.DeviceId ?? DirectSoundPlayer.DefaultPlaybackDeviceId); // Create the Audio Buffer SampleBlockSize = Constants.AudioBytesPerSample * Constants.AudioChannelCount; var bufferLength = WaveFormat.ConvertMillisToByteSize(2000); // 2-second buffer AudioBuffer = new CircularBuffer(bufferLength); AudioDevice.Start(); }
public SoundTouchWaveStream(IWaveProvider source) { if (source.WaveFormat.BitsPerSample != 16) { throw new FormatException("Can't process bit depth of " + source.WaveFormat.BitsPerSample); } _source = source; _sourceSamples = new short[32768]; _sourceBuffer = new byte[_sourceSamples.Length * 2]; _stretchedSamples = new short[32768]; _stretcher = new SoundTouch <short, long>(); _stretcher.SetSampleRate(_source.WaveFormat.SampleRate); _stretcher.SetChannels(_source.WaveFormat.Channels); _tempo = 1.0; }
/// <summary> /// Stretches audio, keeps channels and samplerate. /// negatief value slowsdown (makes longer) the audio /// Positief value speedsup (make shorter) the audio /// </summary> public AudioSamples TimeStretch(float[] inputAudioSamples, int inputSampleRate = 44100, int inputChannels = 2, float rateFactor = 0.0f) { // calculate total milliseconds to read int totalmilliseconds = Int32.MaxValue; float[] data = null; int stream = Bass.BASS_StreamCreatePush(inputSampleRate, inputChannels, BASSFlag.BASS_STREAM_DECODE | BASSFlag.BASS_SAMPLE_FLOAT, IntPtr.Zero); ThrowIfStreamIsInvalid(stream); BASS_CHANNELINFO channelInfo = Bass.BASS_ChannelGetInfo(stream); Bass.BASS_StreamPutData(stream, inputAudioSamples, inputAudioSamples.Length * 4); SoundTouch <Single, Double> soundTouch = new SoundTouch <Single, Double>(); soundTouch.SetSampleRate(channelInfo.freq); soundTouch.SetChannels(channelInfo.chans); soundTouch.SetTempoChange(0.0f); soundTouch.SetPitchSemiTones(0.0f); soundTouch.SetRateChange(rateFactor); // -1.4f = Radio 538 setting soundTouch.SetSetting(SettingId.UseQuickseek, 0); soundTouch.SetSetting(SettingId.UseAntiAliasFilter, 0); int bufferSize = 2048; float[] buffer = new float[bufferSize]; List <float[]> chunks = new List <float[]>(); int size = 0; int nSamples = 0; while ((float)(size) / channelInfo.freq * 1000 < totalmilliseconds) { // get re-sampled data int bytesRead = Bass.BASS_ChannelGetData(stream, buffer, bufferSize); if (bytesRead <= 0) { break; } nSamples = (bytesRead / 4) / channelInfo.chans; // Feed the samples into SoundTouch processor soundTouch.PutSamples(buffer, nSamples); // Read ready samples from SoundTouch processor & write them output file. // NOTES: // - 'receiveSamples' doesn't necessarily return any samples at all // during some rounds! // - On the other hand, during some round 'receiveSamples' may have more // ready samples than would fit into 'sampleBuffer', and for this reason // the 'receiveSamples' call is iterated for as many times as it // outputs samples. do { nSamples = soundTouch.ReceiveSamples(buffer, (bufferSize / channelInfo.chans)); if (nSamples > 0) { float[] chunk = new float[nSamples * channelInfo.chans]; Array.Copy(buffer, chunk, nSamples * channelInfo.chans); chunks.Add(chunk); size += nSamples * channelInfo.chans; //size of the data } } while (nSamples != 0); } //while // Now the input file is processed, yet 'flush' few last samples that are // hiding in the SoundTouch's internal processing pipeline. soundTouch.Flush(); do { nSamples = soundTouch.ReceiveSamples(buffer, (bufferSize / channelInfo.chans)); if (nSamples > 0) { float[] chunk = new float[nSamples * channelInfo.chans]; Array.Copy(buffer, chunk, nSamples * channelInfo.chans); chunks.Add(chunk); size += nSamples * channelInfo.chans; //size of the data } } while (nSamples != 0); if (size <= 0) { // not enough samples to return the requested data return(null); } // Do bass cleanup Bass.BASS_ChannelStop(stream); Bass.BASS_StreamFree(stream); int start = 0; int end = size; data = new float[size]; int index = 0; // Concatenate foreach (float[] chunk in chunks) { Array.Copy(chunk, 0, data, index, chunk.Length); index += chunk.Length; } // Select specific part of the song if (start != 0 || end != size) { float[] temp = new float[end - start]; Array.Copy(data, start, temp, 0, end - start); data = temp; } // Create audiosamples object AudioSamples audioSamples = new AudioSamples(); audioSamples.Origin = "MEMORY"; audioSamples.Channels = channelInfo.chans; audioSamples.SampleRate = channelInfo.freq; audioSamples.StartInMS = start; audioSamples.DurationInMS = end; audioSamples.Samples = data; return(audioSamples); }