// Constructor, sets the {@link Decoder}, the sample window size and the // hop size for the spectra returned. Say the sample window size is 1024 // samples. To get an overlapp of 50% you specify a hop size of 512 samples, // for 25% overlap you specify a hopsize of 256 and so on. Hop sizes are of // course not limited to powers of 2. // // @param decoder The decoder to get the samples from. // @param sampleWindowSize The sample window size. // @param hopSize The hop size. // @param useHamming Wheter to use hamming smoothing or not. public SpectrumProvider(ISampleProvider decoder, int sampleWindowSize, int hopSize, bool useHamming) { if(decoder == null) throw new ArgumentException("Decoder must be != null"); if(sampleWindowSize <= 0) throw new ArgumentException("Sample window size must be > 0"); if(hopSize <= 0) throw new ArgumentException("Hop size must be > 0"); if(sampleWindowSize < hopSize) throw new ArgumentException("Hop size must be <= sampleSize"); this.decoder = decoder; this.samples = new float[sampleWindowSize]; this.nextSamples = new float[sampleWindowSize]; this.tempSamples = new float[sampleWindowSize]; this.hopSize = hopSize; fft = new FFT(sampleWindowSize, 44100); // calculate averages based on a miminum octave width of 22 Hz // split each octave into three bands // this should result in 30 averages //fft.LogAverages(22, 3); if(useHamming) fft.Window(FFT.HAMMING); decoder.Read(samples, 0, samples.Length); decoder.Read(nextSamples, 0, nextSamples.Length); }
public int Read(float[] buffer, int offset, int count) { if (_playbackRate.Equals(0)) // play silence { for (int n = 0; n < count; n++) { buffer[offset++] = 0; } return(count); } if (_repositionRequested) { _soundTouch.Clear(); _repositionRequested = false; } int samplesRead = 0; bool reachedEndOfSource = false; while (samplesRead < count) { if (_soundTouch.NumberOfSamplesAvailable == 0) { var readFromSource = _sourceProvider.Read(_sourceReadBuffer, 0, _sourceReadBuffer.Length); if (readFromSource > 0) { _soundTouch.PutSamples(_sourceReadBuffer, readFromSource / _channelCount); } else { reachedEndOfSource = true; // we've reached the end, tell SoundTouch we're done _soundTouch.Flush(); } } var desiredSampleFrames = (count - samplesRead) / _channelCount; var received = _soundTouch.ReceiveSamples(_soundTouchReadBuffer, desiredSampleFrames) * _channelCount; // use loop instead of Array.Copy due to WaveBuffer for (int n = 0; n < received; n++) { buffer[offset + samplesRead++] = _soundTouchReadBuffer[n]; } if (received == 0 && reachedEndOfSource) { break; } } return(samplesRead); }
public int Read(float[] buffer, int offset, int count) { var samplesRead = source.Read(buffer, offset, count); for (int n = 0; n < samplesRead; n += channels) { Add(buffer[n + offset]); } return(samplesRead); }
/// <summary> /// Reads from this sample provider /// </summary> public int Read(float[] buffer, int offset, int count) { float[] inBuffer; int inBufferOffset; int framesRequested = count / channels; int inNeeded = resampler.ResamplePrepare(framesRequested, outFormat.Channels, out inBuffer, out inBufferOffset); int inAvailable = source.Read(inBuffer, inBufferOffset, inNeeded * channels) / channels; int outAvailable = resampler.ResampleOut(buffer, offset, inAvailable, framesRequested, channels); return(outAvailable * channels); }
public static void AssertReadsExpected(this ISampleProvider sampleProvider, float[] expected, int readSize) { float[] buffer = new float[readSize]; var read = sampleProvider.Read(buffer, 0, expected.Length); Assert.AreEqual(expected.Length, read, "Number of samples read"); for (int n = 0; n < read; n++) { Assert.AreEqual(expected[n], buffer[n], String.Format("Buffer at index {0}", n)); } }
public int Read(float[] buffer, int offset, int count) { lock (FftQueue) { float[] buf = new float[count]; Array.Copy(buffer, offset, buf, 0, count); FftQueue.Enqueue(buf); } return(sampleProvider.Read(buffer, offset, count)); }
public int Read(float[] buffer, int offset, int count) { var read = fuente.Read(buffer, offset, count); for (int i = 0; i < read; i++) { buffer[offset + i] *= volume; } return(read); }
private void ExecuteWhenStateTransitionsToCrossFade(float[] buffer, int sampleCount) { fadeState = FadeState.FullVolume; if (!_nondivisibleFlag) { var time = bytesRead * 1000 / source.WaveFormat.SampleRate * source.WaveFormat.Channels; source = source.Skip(TimeSpan.FromMilliseconds(timetoSkip + time)); } else { var tempBuffer = new float[copyofOffsetSamples]; source = source.Skip(TimeSpan.FromMilliseconds(timetoSkip)); try { source.Read(tempBuffer, 0, copyofOffsetSamples); } catch { } var mod = bytesRead % sampleCount; if (mod == 0) { var times = bytesRead / sampleCount; for (int count = 0; count < times; count++) { source.Read(buffer, 0, sampleCount); } } else { // for wasapi exlusive and non-exclusive mode while (bytesRead > sampleCount) { source.Read(buffer, 0, sampleCount); bytesRead -= sampleCount; } source.Read(buffer, 0, bytesRead); } } }
/// <summary> /// Reads samples from this sample provider /// </summary> /// <param name="buffer">Sample buffer</param> /// <param name="offset">Offset into sample buffer</param> /// <param name="sampleCount">Number of samples desired</param> /// <returns>Number of samples read</returns> public int Read(float[] buffer, int offset, int sampleCount) { int samplesRead = source.Read(buffer, offset, sampleCount); if (Volume != 1f) { for (int n = 0; n < sampleCount; n++) { buffer[offset + n] *= Volume; } } return samplesRead; }
public int Read(float[] buffer, int offset, int count) { var read = fuente.Read(buffer, offset, count); for (int i = 0; i < read; i++) { //Efecto buffer[offset + i] *= Factor; } return(read); }
public int Read(float[] buffer, int offset, int count) { int read = fuente.Read(buffer, offset, count); // Aplicar el efecto for (int i = 0; i < read; i++) { buffer[i + offset] *= volumen; } return(read); }
public SampleBuffer(string filename) { _reader = new WaveFileReader(filename); Count = (int)_reader.SampleCount; SampleRate = _reader.WaveFormat.SampleRate; Length = TimeSpan.FromSeconds((double)Count / SampleRate); _sampleProvider = _reader.ToSampleProvider().ToMono(); _chunk1 = new Chunk { Buffer = new float[ChunkSize], Offset = 0 }; _sampleProvider.Read(_chunk1.Buffer, 0, ChunkSize); _chunk2 = new Chunk { Buffer = new float[ChunkSize], Offset = ChunkSize }; _sampleProvider.Read(_chunk2.Buffer, 0, ChunkSize); }
public int Read(float[] buffer, int offset, int count) { int result = _sampleProvider.Read(buffer, offset, count); // Apply the filter for (int i = 0; i < result; ++i) { buffer[i] = _filter.Transform(buffer[i]); } return(result); }
/// <summary>Reads `count` samples from the input and writes them into `buffer`. Will block as long as it takes for the input to buffer the requested number of samples.</summary> /// <param name="buffer">The output array to write the read samples to.</param> /// <param name="count">The number of samples to read.</param> /// <returns>The number of samples that have been read. Will always equal `count` except when `StopWaiting()` has been called,in which case `Read()` returns `0`.</returns> public int Read(float[] buffer, int count) { while (source.WaitForSamples(count)) { if (stopRequested) { return(0); } } return(samples.Read(buffer, 0, count)); }
public int Read(float[] buffer, int offset, int count) { int sourceSamplesRequired = count; int outIndex = offset; EnsureSourceBuffer(sourceSamplesRequired); int sourceSamplesRead = source.Read(sourceBuffer, 0, sourceSamplesRequired); int sourceSamplesWritten = Math.Min(sourceSamplesRead, (reverbBuffer.Length - revw)); Array.Copy(sourceBuffer, 0, reverbBuffer, revw, sourceSamplesWritten); revw += sourceSamplesWritten; if (revw >= reverbBuffer.Length) { revw = sourceSamplesRead - sourceSamplesWritten; Array.Copy(sourceBuffer, sourceSamplesWritten, reverbBuffer, 0, sourceSamplesRead - sourceSamplesWritten); } for (int n = 0; n < sourceSamplesRead; n += channels) { for (int c = 0; c < channels; c++) { buffer[outIndex++] = sourceBuffer[n + c]; } } for (int ch = 0; ch < rev.Length; ch++) { double boost = (Loopstream.LSSettings.singleton.reverbP / 100.0); for (int ri = 0; ri < rev[ch].Length; ri++) { outIndex -= sourceSamplesRead; for (int n = 0; n < sourceSamplesRead; n += channels) { try { if (rev[ch][ri] >= reverbBuffer.Length - ch) { rev[ch][ri] -= reverbBuffer.Length; } buffer[outIndex + ch] += (float)(reverbBuffer[rev[ch][ri] + ch] * boost); outIndex += channels; rev[ch][ri] += channels; } catch (Exception ex) { System.Windows.Forms.MessageBox.Show("buffer " + (outIndex + ch) + " of " + buffer.Length + ", reverb " + (rev[ch][ri] + ch) + " of " + reverbBuffer.Length + " (" + ch + "/" + ri + ")"); } } boost *= (Loopstream.LSSettings.singleton.reverbS / 100.0); } } return(sourceSamplesRead); }
/// <summary> /// Read from this sample provider /// </summary> public int Read(float[] buffer, int offset, int count) { int sampRead = sourceStream.Read(buffer, offset, count); // ReSharper disable once CompareOfFloatsByEqualityOperator if (pitch == 1f) { //Nothing to do. return(sampRead); } if (waveFormat.Channels == 1) { var mono = new float[sampRead]; var index = 0; for (var sample = offset; sample <= sampRead + offset - 1; sample++) { mono[index] = buffer[sample]; index += 1; } shifterLeft.PitchShift(pitch, sampRead, fftSize, osamp, waveFormat.SampleRate, mono); index = 0; for (var sample = offset; sample <= sampRead + offset - 1; sample++) { buffer[sample] = Limiter(mono[index]); index += 1; } return(sampRead); } if (waveFormat.Channels == 2) { var left = new float[(sampRead >> 1)]; var right = new float[(sampRead >> 1)]; var index = 0; for (var sample = offset; sample <= sampRead + offset - 1; sample += 2) { left[index] = buffer[sample]; right[index] = buffer[sample + 1]; index += 1; } shifterLeft.PitchShift(pitch, sampRead >> 1, fftSize, osamp, waveFormat.SampleRate, left); shifterRight.PitchShift(pitch, sampRead >> 1, fftSize, osamp, waveFormat.SampleRate, right); index = 0; for (var sample = offset; sample <= sampRead + offset - 1; sample += 2) { buffer[sample] = Limiter(left[index]); buffer[sample + 1] = Limiter(right[index]); index += 1; } return(sampRead); } throw new Exception("Shifting of more than 2 channels is currently not supported."); }
public int Read(float[] buffer, int offset, int count) { int read = fuente.Read(buffer, offset, count); //Aplicar el efecto //procesamiento for (int i = 0; i < read; i++) { buffer[i + offset] *= volumen; } //la variable buffer modificada es la salida return(read); }
//en el buffer se tienen que recibir todas las muestras, el count para contarlas y el offset por si hay algun desface public int Read(float[] buffer, int offset, int count) { var read = fuente.Read(buffer, offset, count); //se recorren las muestras leidas (estan guardadas en read for (int i = 0; i < read; i++) { //se modifica el buffer en la posicion offset + i porque todavia no se termina de trabajar en todas las muestras, asi que se pueden dejar algunas sin trabajar en caso de que se necesite buffer[offset + i] *= volume; } //Se regresan el numero de muestras que se leyeron return(read); }
public int Read(float[] buffer, int offset, int count) { if (!IsPlaying) { Array.Clear(buffer, offset, count); return(count); } else { var c = _resampled.Read(buffer, offset, count); return(c); } }
/* Los valores del búffer irán directamente a la salida, el offset es para aplicar desfaces * y count representa el número de muestras */ public int Read(float[] buffer, int offset, int count) { var read = fuente.Read(buffer, offset, count); // Recorremos las muestras leídas para aplicar el efecto deseado for (int i = 0; i < read; i++) { // En caso de ocupar un offset, se considera sumandoselo a la variable 'i' buffer[offset + i] *= volume; } return(read); }
public static CachedSound FromSampleProvider(ISampleProvider sampleProvider) { var readBuffer = new float[sampleProvider.WaveFormat.SampleRate * sampleProvider.WaveFormat.Channels]; var data = new List <float>(readBuffer.Length / 4); int samplesRead; while ((samplesRead = sampleProvider.Read(readBuffer, 0, readBuffer.Length)) > 0) { data.AddRange(readBuffer.Take(samplesRead)); } return(new CachedSound(data.ToArray(), sampleProvider.WaveFormat)); }
public int Read(float[] buffer, int offset, int count) { int samplesRead = _sourceProvider.Read(buffer, offset, count); foreach (var filter in _filters) { for (int i = 0; i < samplesRead; i++) { buffer[offset + i] = filter[(i % _sourceProvider.WaveFormat.Channels)].Transform(buffer[offset + i]); } } return(samplesRead); }
/// <summary> /// Reads audio from this sample provider /// </summary> public int Read(float[] buffer, int offset, int count) { if (adsr.State == EnvelopeGenerator.EnvelopeState.Idle) { return(0); // we've finished } var samples = source.Read(buffer, offset, count); for (int n = 0; n < samples; n++) { buffer[offset++] *= adsr.Process(); } return(samples); }
private void CalculateAv() { if (filePath == null) { return; } // Calculate average ZCR and STE for 1-second window using (WaveFileReader reader = new WaveFileReader(filePath)) { ISampleProvider sampleProvider = reader.ToSampleProvider(); List <double> zcrPerSecond = new List <double>(); // One second window int windowSizeFloats = sampleProvider.WaveFormat.AverageBytesPerSecond / sizeof(float); float[] buffer = new float[windowSizeFloats]; avZcr = 0; avSte = 0; int windowsCount = 0; while (true) { int samplesRead = sampleProvider.Read(buffer, 0, windowSizeFloats); if (samplesRead <= 0) { break; } // Calculate ZCR in window double sum = 0; for (int i = 1; i < samplesRead; i++) { sum += Math.Abs(Math.Sign(buffer[i]) - Math.Sign(buffer[i - 1])); } double value = sum * sampleProvider.WaveFormat.SampleRate / (2.0 * samplesRead); avZcr += value; // Calculate STE in window sum = 0; for (int i = 0; i < samplesRead; i++) { sum += buffer[i] * buffer[i]; } value = sum / samplesRead; avSte += value; windowsCount++; } avZcr /= (double)windowsCount; avSte /= (double)windowsCount; } }
public int Read(float[] buffer, int offset, int count) { if (_isDisposed) { return(0); } var read = _provider.Read(buffer, offset, count); if (read == 0) { Dispose(); } return(read); }
public int Read(float[] buffer, int offset, int count) { var samples = _source.Read(buffer, offset, count); if (Width > 0.0f) { for (int i = 0; i < samples; i++) { buffer[offset + i] = Process(buffer[offset + i]); } } return(samples); }
public void ReadChunk(ISampleProvider sampleProvider) { if (sampleProvider.Read(WindowRing, WindowRingPos, CHUNK_SIZE) != CHUNK_SIZE) { throw new Exception(); } ProcessedSamples += CHUNK_SIZE; if (ProcessedSamples >= WINDOW_SIZE) { AddStripe(); } }
public int Read(float[] buffer, int offset, int sampleCount) { var samplesRead = _source.Read(buffer, offset, sampleCount); if (_globalSettings.ProfileSettingsStore.GetClientSettingBool(ProfileSettingsKeys.RadioEffects)) { if (samplesRead > 0) { for (var n = 0; n < sampleCount; n++) { var audio = (double)buffer[offset + n]; if (audio != 0) // because we have silence in one channel (if a user picks radio left or right ear) we don't want to transform it or it'll play in both { if (_globalSettings.ProfileSettingsStore.GetClientSettingBool(ProfileSettingsKeys.RadioEffectsClipping)) { if (audio > CLIPPING_MAX) { audio = CLIPPING_MAX; } else if (audio < CLIPPING_MIN) { audio = CLIPPING_MIN; } } for (int i = 0; i < _filters.Length; i++) { var filter = _filters[i]; audio = filter.ProcessSample(audio); if (double.IsNaN(audio)) { audio = buffer[offset + n]; } } buffer[offset + n] = (float)audio; } } } } // Console.WriteLine("Read:"+samplesRead+" Time - " + _stopwatch.ElapsedMilliseconds); // _stopwatch.Restart(); // return(samplesRead); }
public int Read(float[] buffer, int offset, int count) { if ((buffer == null) || (buffer.Length < (offset + count))) { throw new ArgumentException(I18NString.Lookup("Audio_SoundFX_IncompleteBuffer")); } float[] iBuffer = new float[WaveFormat.Channels]; for (int x = 0; x < count; x++) { int read = source.Read(iBuffer, 0, WaveFormat.Channels); if (read == 0) { return(x); } int totalRead = read; while (totalRead < WaveFormat.Channels) { // Make sure we fully read a complete set of channel signals (unless the stream ends) read = source.Read(iBuffer, totalRead, WaveFormat.Channels - totalRead); if (read == 0) { totalRead = WaveFormat.Channels; } else { totalRead += read; } } double mixed = 0.0; for (int y = 0; y < totalRead; y++) { mixed += iBuffer[y]; } buffer[offset + x] = (float)(mixed / WaveFormat.Channels); } return(count); }
public void PopulateBitmapCache(TimelineEvent x) { if (x.Action == TimelineEventAction.PlayMp3 && !WaveBitmaps.ContainsKey(x.Parameter)) { var reader = new Mp3FileReader(x.Parameter); var width = ((double)reader.Length / samplesPerPixel); var widthFloor = (int)Math.Floor(width); Bitmap b = new Bitmap(widthFloor, 32, PixelFormat.Format16bppRgb555); using (Graphics g = Graphics.FromImage(b)) { g.Clear(Color.White); ISampleProvider m = reader.ToSampleProvider(); float[] buffer = new float[reader.Length / 2]; m.Read(buffer, 0, buffer.Length); var binCt = b.Width; var bins = new float[binCt]; var samplesPerBin = (float)buffer.Length / binCt; for (int i = 0; i < buffer.Length; i++) { var bin = (int)(Math.Floor(i / (samplesPerBin + 1))); if (bins[bin] < buffer[i]) { bins[bin] = buffer[i]; } } for (int i = 0; i < binCt; i++) { var centre = b.Height / 2; var scaledBinValue = bins[i] * centre; g.DrawLine(Pens.Black, i, centre - scaledBinValue, i, centre + scaledBinValue); } Bitmap c = new Bitmap(b.Width, b.Height, PixelFormat.Format1bppIndexed); c.Palette.Entries[0] = Color.DarkGray; c = b.Clone(new Rectangle(0, 0, b.Width, b.Height), PixelFormat.Format1bppIndexed); WaveBitmaps.Add(x.Parameter, c); b.Dispose(); WaveBitmapDurations.Add(x.Parameter, (float)reader.Length / (reader.WaveFormat.SampleRate * (reader.WaveFormat.BitsPerSample / 8) * reader.WaveFormat.Channels)); reader.Close(); reader.Dispose(); } } GC.Collect(2); }
/// <summary> /// Reads samples from this sample provider /// </summary> /// <param name="buffer">Sample buffer</param> /// <param name="offset">Offset into sample buffer</param> /// <param name="sampleCount">Number of samples desired</param> /// <returns>Number of samples read</returns> public int Read(float[] buffer, int offset, int sampleCount) { int samplesRead = source.Read(buffer, offset, sampleCount); if (Volume != 1f) { rampGain.CalculateGainStepDelta(sampleCount); for (int n = 0; n < sampleCount; n++) { buffer[offset + n] *= (float)rampGain.CurrentGain; rampGain.CalculateNextGain(); } } return(samplesRead); }
public void Read(ISampleProvider waveSampleProvider, FFTCalculated FFTCalculated) { float[] dummyFftArray = new float[fftBufferLength]; var dummyFftArrayLength = waveSampleProvider.Read(dummyFftArray, 0, fftBufferLength); for (int i = 0; i < dummyFftArrayLength; i += channels) { fftBuffer[fftBufferCurPossition].X = (float)(dummyFftArray[i] * FastFourierTransform.HammingWindow(fftBufferCurPossition, fftBufferLength)); fftBuffer[fftBufferCurPossition].Y = 0; fftBufferCurPossition++; if (fftBufferCurPossition >= fftBufferLength) { fftBufferCurPossition = 0; // 1024 = 2^10 FastFourierTransform.FFT(true, m, fftBuffer); FFTCalculated(fftBuffer); } } }