public void CopyToInterleavedPackedThrowsExceptionWhenDisposed() { var samples = new SampleBuffer(new float[2], 2); var outSamples = new byte[4]; samples.Dispose(); Assert.Throws <ObjectDisposedException>(() => samples.CopyToInterleaved(outSamples, 16)); }
public void CopyToFloatMonoThrowsExceptionWhenDisposed() { var samples = new SampleBuffer(new float[1], 1); var outSamples = new float[1]; samples.Dispose(); Assert.Throws <ObjectDisposedException>(() => samples.CopyTo(outSamples)); }
public void CopyToIntStereoThrowsExceptionWhenDisposed() { var samples = new SampleBuffer(new float[2], 2); var leftOutSamples = new int[1]; var rightOutSamples = new int[1]; samples.Dispose(); Assert.Throws <ObjectDisposedException>(() => samples.CopyTo(leftOutSamples, rightOutSamples, 16)); }
public SampleBuffer Process(SampleBuffer samples) { if (Math.Abs(_scale - 1) < 0.001) { return(samples); } Span <float> buffer = stackalloc float[samples.Frames * samples.Channels]; samples.CopyToInterleaved(buffer); samples.Dispose(); // Optimization - Vectorized implementation is significantly faster with AVX2 (256-bit SIMD) if (Vector.IsHardwareAccelerated) { var sampleVectors = MemoryMarshal.Cast <float, Vector <float> >(buffer); for (var vectorIndex = 0; vectorIndex < sampleVectors.Length; vectorIndex++) { sampleVectors[vectorIndex] *= _scale; } for (var sampleIndex = sampleVectors.Length * Vector <float> .Count; sampleIndex < buffer.Length; sampleIndex++) { buffer[sampleIndex] *= _scale; } } else { for (var sampleIndex = 0; sampleIndex < buffer.Length; sampleIndex++) { buffer[sampleIndex] *= _scale; } } return(new SampleBuffer(buffer, samples.Channels)); }