public void TestHiHat() { string closedFile = DataManagement.PathForDataFile("Test", "ClosedHiHat.wav"); Assert.IsTrue(WaveEncoding.SaveStream( filepath: closedFile, stream: InstrumentLookup.GetPercussion(PercussionMap.ClosedHiHat, 0xF7) .SafeCache() .SlowRangeFitter(), overwrite: true)); string pedalFile = DataManagement.PathForDataFile("Test", "PedalHiHat.wav"); Assert.IsTrue(WaveEncoding.SaveStream( filepath: pedalFile, stream: InstrumentLookup.GetPercussion(PercussionMap.PedalHiHat, 0xF7) .SafeCache() .SlowRangeFitter(), overwrite: true)); string openFile = DataManagement.PathForDataFile("Test", "OpenHiHat.wav"); Assert.IsTrue(WaveEncoding.SaveStream( filepath: openFile, stream: InstrumentLookup.GetPercussion(PercussionMap.OpenHiHat, 0xF7) .SafeCache() .SlowRangeFitter(), overwrite: true)); }
private void btnOkay_Click(object sender, EventArgs e) { Stop(); #if LOOP_SELECTION_DIALOG_LIB #else using (ProgressWindow progress = new ProgressWindow(this, String.Format("{0} Converter", _type == 0 ? "Brstm" : "Wave"), "Encoding, please wait...", false)) switch (_type) { case 0: var encoding = (WaveEncoding)ddlEncoding.SelectedItem; PreviousEncoding = encoding; _audioData = RSTMConverter.Encode(_sourceStream, progress, encoding); break; case 1: _audioData = RSARWaveConverter.Encode(_sourceStream, progress); break; case 2: _audioData = RWAVConverter.Encode(_sourceStream, progress); break; } #endif DialogResult = DialogResult.OK; Close(); }
private void btnOkay_Click(object sender, EventArgs e) { Stop(); if (_sourceStream is InitialStreamWrapper w && w.BaseStream == _initialStream) { _initialStream.LoopStartSample = _sourceStream.LoopStartSample; _initialStream.LoopEndSample = _sourceStream.LoopEndSample; _initialStream.IsLooping = _sourceStream.IsLooping; } if (_initialStream == null) { using (ProgressWindow progress = new ProgressWindow(this, String.Format("{0} Converter", _type == 0 ? "Brstm" : "Wave"), "Encoding, please wait...", false)) switch (_type) { case 0: var encoding = (WaveEncoding)ddlEncoding.SelectedItem; PreviousEncoding = encoding; _audioData = RSTMConverter.Encode(_sourceStream, progress, encoding); break; case 1: _audioData = RSARWaveConverter.Encode(_sourceStream, progress); break; case 2: _audioData = RWAVConverter.Encode(_sourceStream, progress); break; } } DialogResult = DialogResult.OK; Close(); }
public void TestSingleChannelWave() { float[] singleChannelSamples = CreateSineWave(1); string singleChannelFile = DataManagement.PathForDataFile("Test", "singleChannel.wav"); string secondSingleChannel = DataManagement.PathForDataFile("Test", "singleChannel2.wav"); Assert.IsTrue(WaveEncoding.SaveFile( filepath: singleChannelFile, channels: 1, sampleRate: 44100, samples: singleChannelSamples, overwrite: true)); singleChannelSamples = null; Assert.IsTrue(File.Exists(singleChannelFile)); Assert.IsTrue(WaveEncoding.LoadFile( filepath: singleChannelFile, channels: out int channels, samples: out singleChannelSamples)); Assert.IsTrue(channels == 1); Assert.IsTrue(singleChannelSamples != null); Assert.IsTrue(WaveEncoding.SaveFile( filepath: secondSingleChannel, channels: 1, sampleRate: 44100, samples: singleChannelSamples, overwrite: true)); }
public void TestDualChannelWave() { float[] dualChannelSamples = CreateSineWave(2); string dualChannelFile = DataManagement.PathForDataFile("Test", "dualChannel.wav"); string secondDualChannel = DataManagement.PathForDataFile("Test", "dualChannel2.wav"); Assert.IsTrue(WaveEncoding.SaveFile( filepath: dualChannelFile, channels: 2, sampleRate: 44100, samples: dualChannelSamples, overwrite: true)); dualChannelSamples = null; Assert.IsTrue(File.Exists(dualChannelFile)); Assert.IsTrue(WaveEncoding.LoadFile( filepath: dualChannelFile, channels: out int channels, samples: out dualChannelSamples)); Assert.IsTrue(channels == 2); Assert.IsTrue(dualChannelSamples != null); Assert.IsTrue(WaveEncoding.SaveFile( filepath: secondDualChannel, channels: 2, sampleRate: 44100, samples: dualChannelSamples, overwrite: true)); }
public void TestNoiseVocodedSpeech() { Calibration.Initialize(); string speechFile = DataManagement.PathForDataFile("Test", "000000.wav"); if (!File.Exists(speechFile)) { throw new Exception($"Test utilizes CRM missing sentence: {speechFile}"); } WaveEncoding.LoadBGCStream( filepath: speechFile, stream: out IBGCStream speechStream); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "06BandVocoding.wav"), stream: speechStream.NoiseVocode(bandCount: 6).Cache().SlowRangeFitter(), overwrite: true); success |= WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "11BandVocoding.wav"), stream: speechStream.NoiseVocode(bandCount: 11).Cache().SlowRangeFitter(), overwrite: true); success |= WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "22BandVocoding.wav"), stream: speechStream.NoiseVocode(bandCount: 22).Cache().SlowRangeFitter(), overwrite: true); Assert.IsTrue(success); }
public void TestPhaseVocoding() { //string baseFile = "Boston_HitchARide"; string baseFile = "000000"; WaveEncoding.LoadBGCSimple( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}.wav"), simpleAudioClip: out SimpleAudioClip song); song = song.Window(10f).Cache(); //First, write unmodified WaveEncoding.SaveFile( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Unmodified.wav"), channels: song.Channels, sampleRate: (int)song.SamplingRate, samples: song.Samples, overwrite: true); //Next, Slow it down 5% { SimpleAudioClip slowed_05 = song.PhaseVocode(0.95f).Cache(); //Next, write it slowed 5% WaveEncoding.SaveFile( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Slowed_05.wav"), channels: slowed_05.Channels, sampleRate: (int)slowed_05.SamplingRate, samples: slowed_05.Samples, overwrite: true); } //Next, Slow it down 25% { SimpleAudioClip slowed_25 = song.PhaseVocode(0.75f).Cache(); //Next, write it slowed 5% WaveEncoding.SaveFile( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Slowed_25.wav"), channels: slowed_25.Channels, sampleRate: (int)slowed_25.SamplingRate, samples: slowed_25.Samples, overwrite: true); } //Next, Slow it down 50% { SimpleAudioClip slowed_50 = song.PhaseVocode(0.5f).Cache(); //Next, write it slowed 5% WaveEncoding.SaveFile( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Slowed_50.wav"), channels: slowed_50.Channels, sampleRate: (int)slowed_50.SamplingRate, samples: slowed_50.Samples, overwrite: true); } }
public WaveFormat(short channels, int samplesPerSecond, short bitsPerSample) { _formatTag = WaveEncoding.Pcm; _channels = channels; _samplesPerSecond = samplesPerSecond; _bitsPerSample = bitsPerSample; _blockAlign = (short)((int)channels * (int)bitsPerSample / 8); _avgBytesPerSecond = samplesPerSecond * (int)_blockAlign; _size = 0; }
public void TestSnare() { string saveFile = DataManagement.PathForDataFile("Test", "SnareTest.wav"); Assert.IsTrue(WaveEncoding.SaveStream( filepath: saveFile, stream: InstrumentLookup.GetPercussion(PercussionMap.AcousticSnare, 0xF7) .Normalize(80f) .Window(1f) .Center(1.5f), overwrite: true)); }
public Wave(Stream stream) { using (var r = new BinaryReader(stream)) { Encoding = (WaveEncoding)r.ReadByte(); Loops = r.ReadBoolean(); SampleRate = r.ReadUInt16(); TimerLen = r.ReadUInt16(); LoopStart = r.ReadUInt16(); LoopLength = r.ReadUInt32(); } dataStream = new SubStream(stream, stream.Position); }
public void TestNewSpatialization() { string baseFile = "000000"; WaveEncoding.LoadBGCStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}.wav"), stream: out IBGCStream stream); Debug.Log($"Pre RMS: {string.Join(", ", stream.CalculateRMS().Select(x => x.ToString()).ToArray())}"); { IBGCStream spatialized = stream.Spatialize(0f); string rms = string.Join(", ", spatialized.CalculateRMS().Select(x => x.ToString()).ToArray()); Debug.Log($"Post RMS: {rms}"); //Write to File WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Spatialized_0.wav"), stream: spatialized, overwrite: true); } { IBGCStream spatialized = stream.Spatialize(25f); string rms = string.Join(", ", spatialized.CalculateRMS().Select(x => x.ToString()).ToArray()); Debug.Log($"Post RMS: {rms}"); //Write to File WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Spatialized_25.wav"), stream: spatialized, overwrite: true); } { IBGCStream spatialized = stream.Spatialize(-25f); string rms = string.Join(", ", spatialized.CalculateRMS().Select(x => x.ToString()).ToArray()); Debug.Log($"Post RMS: {rms}"); //Write to File WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Spatialized_n25.wav"), stream: spatialized, overwrite: true); } }
public void TestCarrierModifiedFakeVoices() { Calibration.Initialize(); // F2: 500 // F3: 1000 // F4: 2000 double qFactor = 200; Func <IBGCStream> makeCarrierA = () => new AnalyticNoiseStream( rms: 1.0, freqLB: 20, freqUB: 10000, frequencyCount: 10000, distribution: AnalyticNoiseStream.AmplitudeDistribution.Brown) .ToBGCStream(); Func <IBGCStream> makeCarrierB = () => new SawtoothWave( amplitude: 1.0, frequency: 120); Func <IBGCStream>[] carrierFuncs = new Func <IBGCStream>[] { makeCarrierA, makeCarrierB }; for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"testVoice{i}{j}.wav"), stream: new StreamAdder( carrierFuncs[i]() .BiQuadBandpassFilter( centralFrequency: 500, qFactor: qFactor), carrierFuncs[j]() .BiQuadBandpassFilter( centralFrequency: 1500, qFactor: qFactor)) .Window(0.5) .SlowRangeFitter(), overwrite: true); } } }
public void TestTriangleWave() { Calibration.Initialize(); IBGCStream stream = new TriangleWave(1f, 440f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "triangleWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); }
public void UpScalingTest() { float[] singleChannelSamples = CreateSineWave(1, 22050f); string upscaledFile = DataManagement.PathForDataFile("Test", "upscaled.wav"); IBGCStream stream = new SimpleAudioClip( samples: LinearInterpolation.FactorUpscaler( samples: singleChannelSamples, factor: 2, channels: 1), channels: 1); Assert.IsTrue(WaveEncoding.SaveStream( filepath: upscaledFile, stream: stream, overwrite: true)); }
public void TestPulses() { Calibration.Initialize(); double[] dutyCycles = new double[] { 0.1, 0.25, 0.5, 0.75, 0.9 }; foreach (double dutyCycle in dutyCycles) { string saveFile = DataManagement.PathForDataFile("Test", $"squareWave({dutyCycle}).wav"); Assert.IsTrue(WaveEncoding.SaveStream( filepath: saveFile, stream: new SquareWave(1.0, 400, dutyCycle) .Window(3.0), overwrite: true)); } }
public void TestGuitar() { Calibration.Initialize(); for (int octave = 2; octave <= 4; octave++) { string guitarFile = DataManagement.PathForDataFile("Test", $"guitarE{octave}.wav"); Assert.IsTrue(WaveEncoding.SaveStream( filepath: guitarFile, stream: InstrumentLookup.GetNote( set: ReservedSoundSet.ElectricGuitar_Jazz, note: (byte)(12 * octave + 4), velocity: 0xF7) .SafeCache() .SlowRangeFitter(), overwrite: true)); } }
private void readHEAD(Stream stream) { using (var r = new BinaryReader(stream)) { encoding = (WaveEncoding)r.ReadByte(); loop = r.ReadBoolean(); channels = r.ReadByte(); r.Skip(1); sampleRate = r.ReadUInt16(); r.Skip(2); loopPoint = r.ReadUInt32(); sampleCount = r.ReadUInt32(); dataPos = r.ReadUInt32(); nBlock = r.ReadUInt32(); blockLength = r.ReadUInt32(); blockSamples = r.ReadUInt32(); lastBlockLength = r.ReadUInt32(); lastBlockSamples = r.ReadUInt32(); } }
public static BaseSampleDecoder CreateDecoder(WaveEncoding encoding) { switch (encoding) { case WaveEncoding.PCM8: return(new PCM8Decoder()); case WaveEncoding.PCM16: return(new PCM16Decoder()); case WaveEncoding.ADPCM: return(new ADPCMDecoder()); case WaveEncoding.GEN: throw new ArgumentException(); default: throw new NotImplementedException(); } }
public void TestRenderToccataMidi() { Calibration.Initialize(); string loadFile = DataManagement.PathForDataFile("Test", "toccata1.mid"); string saveFile = DataManagement.PathForDataFile("Test", "toccata1.wav"); Assert.IsTrue(MidiEncoding.LoadFile( filePath: loadFile, midiFile: out MidiFile midiFile, retainAll: true)); Assert.IsTrue(WaveEncoding.SaveStream( filepath: saveFile, stream: new SlowRangeFitterFilter(new MidiFileStream(midiFile).SafeCache()), overwrite: true)); Assert.IsTrue(File.Exists(saveFile)); }
public void TestCarlileShuffler() { string baseFile = "000000"; WaveEncoding.LoadBGCSimple( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}.wav"), simpleAudioClip: out SimpleAudioClip song); Debug.Log($"Pre RMS: {Mathf.Sqrt(song.Samples.Sum(x => x * x) / song.Samples.Length)} N:{song.Samples.Length}"); song = song.CarlileShuffle().Cache(); Debug.Log($"Post RMS: {Mathf.Sqrt(song.Samples.Sum(x => x * x) / song.Samples.Length)} N:{song.Samples.Length}"); //Write to File WaveEncoding.SaveFile( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Carlile.wav"), channels: song.Channels, samples: song.Samples, overwrite: true); }
internal WaveFormat(BinaryReader reader, int size) { if (size < 16) { throw new WaveFormatException("WaveFormat data too short."); } _formatTag = (WaveEncoding)reader.ReadInt16(); _channels = reader.ReadInt16(); _samplesPerSecond = reader.ReadInt32(); _avgBytesPerSecond = reader.ReadInt32(); _blockAlign = reader.ReadInt16(); _bitsPerSample = reader.ReadInt16(); if (size > 16) { _size = reader.ReadInt16(); } if (size > 18) { throw new WaveFormatException("Structure is too big, use WaveFormatFull instead."); } }
public void TestFakeVoice() { Calibration.Initialize(); // F2: 500 // F3: 1000 // F4: 2000 double qFactor = 100; //IBGCStream f2 = new AnalyticNoiseStream( // rms: 1.0, // freqLB: 20, // freqUB: 10000, // frequencyCount: 10000, // distribution: AnalyticNoiseStream.AmplitudeDistribution.Pink) // .ToBGCStream() // .ContinuousFilter( // envelopeStream: new Audio.Envelopes.SigmoidEnvelope(2.0), // filterType: ContinuousFilter.FilterType.BandPass, // freqLB: 500, // freqUB: 540, // qFactor: 100.0); //Func<IBGCStream> makeCarrier = () => //new AnalyticNoiseStream( // rms: 1.0, // freqLB: 20, // freqUB: 10000, // frequencyCount: 10000, // distribution: AnalyticNoiseStream.AmplitudeDistribution.Brown) // .ToBGCStream(); //Func<IBGCStream> makeCarrier = () => // new SawtoothWave( // amplitude: 1.0, // frequency: 120); Func <IBGCStream> makeCarrier = () => new StreamAdder( new SawtoothWave( amplitude: 1.0, frequency: 120), new AnalyticNoiseStream( rms: 0.2, freqLB: 20, freqUB: 10000, frequencyCount: 10000, distribution: AnalyticNoiseStream.AmplitudeDistribution.Brown) .ToBGCStream()); //Func<IBGCStream> makeCarrier = () => // new SquareWave( // 1.0, // 280.0, // 0.1); IBGCStream f2 = makeCarrier() .ContinuousFilter( envelopeStream: new Audio.Envelopes.EnvelopeConcatenator( new Audio.Envelopes.SigmoidEnvelope(0.1), new Audio.Envelopes.ConstantEnvelope(1.0, 0.1)), filterType: ContinuousFilter.FilterType.BandPass, freqLB: 400, freqUB: 700, qFactor: qFactor); IBGCStream f3 = makeCarrier() .ContinuousFilter( envelopeStream: new Audio.Envelopes.LinearEnvelope(0.05, 0.15), filterType: ContinuousFilter.FilterType.BandPass, freqLB: 1500, freqUB: 1000, qFactor: qFactor); IBGCStream f4 = makeCarrier() .ContinuousFilter( envelopeStream: new Audio.Envelopes.ConstantEnvelope(1.0, 0.2), filterType: ContinuousFilter.FilterType.BandPass, freqLB: 2000, freqUB: 2000, qFactor: qFactor); IBGCStream fakeVoice = new StreamAdder(f2, f3, f4) .Window(.2) .Center(1) .SlowRangeFitter(); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "testVoice.wav"), stream: fakeVoice); }
public void Set(int size, int channels, WaveEncoding encoding) { RuintList *list; VoidPtr offset = _entries.Address; int dataOffset = 0x60 + channels * 8; _tag = Tag; _size = size; //Set entry offsets _entries.Entries[0] = 0x18; _entries.Entries[1] = 0x4C; _entries.Entries[2] = 0x5C; //Audio info //HEADPart1* part1 = Part1; //Set single channel info list = Part2; list->_numEntries._data = 1; //Number is little-endian list->Entries[0] = 0x58; // TODO: This is not actually AudioFormatInfo. Set it as a buint instead. *(AudioFormatInfo *)list->Get(offset, 0) = channels == 1 ? new AudioFormatInfo(1, 0, 0, 0) : new AudioFormatInfo(2, 0, 1, 0); //Set adpcm infos list = Part3; list->_numEntries._data = channels; //little-endian for (int i = 0; i < channels; i++) { //Set initial pointer list->Entries[i] = dataOffset; if (encoding == WaveEncoding.ADPCM) { //Set embedded pointer *(ruint *)(offset + dataOffset) = dataOffset + 8; dataOffset += 8; //Set info //*(ADPCMInfo*)(offset + dataOffset) = info[i]; dataOffset += ADPCMInfo.Size; //Set padding //*(short*)(offset + dataOffset) = 0; //dataOffset += 2; } else { //Set embedded pointer *(ruint *)(offset + dataOffset) = 0; dataOffset += 8; } } //Fill remaining int *p = (int *)(offset + dataOffset); for (dataOffset += 8; dataOffset < size; dataOffset += 4) { *p++ = 0; } }
public static unsafe FileMap Encode(IAudioStream stream, IProgressTracker progress, WaveEncoding encoding = WaveEncoding.ADPCM) #endif { int tmp; bool looped = stream.IsLooping; int channels = stream.Channels; int samples; int blocks; int sampleRate = stream.Frequency; int lbSamples, lbSize, lbTotal; int loopPadding, loopStart, totalSamples; short *tPtr; int samplesPerBlock = encoding == WaveEncoding.ADPCM ? 0x3800 : encoding == WaveEncoding.PCM16 ? 0x1000 : 0; if (samplesPerBlock == 0) { throw new ArgumentException("Encoding must be ADPCM or PCM16"); } if (looped) { loopStart = stream.LoopStartSample; samples = stream.LoopEndSample; //Set sample size to end sample. That way the audio gets cut off when encoding. //If loop point doesn't land on a block, pad the stream so that it does. if ((tmp = loopStart % samplesPerBlock) != 0) { loopPadding = samplesPerBlock - tmp; loopStart += loopPadding; } else { loopPadding = 0; } totalSamples = loopPadding + samples; } else { loopPadding = loopStart = 0; totalSamples = samples = stream.Samples; } if (progress != null) { progress.Begin(0, totalSamples * channels * 3, 0); } blocks = (totalSamples + samplesPerBlock - 1) / samplesPerBlock; //Initialize stream info if ((tmp = totalSamples % samplesPerBlock) != 0) { lbSamples = tmp; if (encoding == WaveEncoding.ADPCM) { lbSize = (lbSamples + 13) / 14 * 8; } else if (encoding == WaveEncoding.PCM16) { lbTotal = lbSize = lbSamples * 2; } else if (encoding == WaveEncoding.PCM8) { lbTotal = lbSize = lbSamples; } else { throw new NotImplementedException(); } lbTotal = lbSize.Align(0x20); } else { lbSamples = samplesPerBlock; lbTotal = lbSize = 0x2000; } //Get section sizes int rstmSize = 0x40; int headSize = (0x68 + (channels * (encoding == WaveEncoding.ADPCM ? 0x40 : 0x10))).Align(0x20); int adpcSize = encoding == WaveEncoding.ADPCM ? ((blocks - 1) * 4 * channels + 0x10).Align(0x20) : 0; int dataSize = ((blocks - 1) * 0x2000 + lbTotal) * channels + 0x20; #if RSTMLIB //Create byte array byte[] array = new byte[rstmSize + headSize + adpcSize + dataSize]; fixed(byte *address = array) { #else //Create file map FileMap map = FileMap.FromTempFile(rstmSize + headSize + adpcSize + dataSize); VoidPtr address = map.Address; #endif //Get section pointers RSTMHeader * rstm = (RSTMHeader *)address; HEADHeader * head = (HEADHeader *)((byte *)rstm + rstmSize); ADPCHeader * adpc = (ADPCHeader *)((byte *)head + headSize); RSTMDATAHeader *data = (RSTMDATAHeader *)((byte *)adpc + adpcSize); //Initialize sections rstm->Set(headSize, adpcSize, dataSize); head->Set(headSize, channels, encoding); if (adpcSize > 0) { adpc->Set(adpcSize); } data->Set(dataSize); //Set HEAD data StrmDataInfo *part1 = head->Part1; part1->_format = new AudioFormatInfo((byte)encoding, (byte)(looped ? 1 : 0), (byte)channels, 0); part1->_sampleRate = (ushort)sampleRate; part1->_blockHeaderOffset = 0; part1->_loopStartSample = loopStart; part1->_numSamples = totalSamples; part1->_dataOffset = rstmSize + headSize + adpcSize + 0x20; part1->_numBlocks = blocks; part1->_blockSize = 0x2000; part1->_samplesPerBlock = samplesPerBlock; part1->_lastBlockSize = lbSize; part1->_lastBlockSamples = lbSamples; part1->_lastBlockTotal = lbTotal; part1->_dataInterval = encoding == WaveEncoding.ADPCM ? samplesPerBlock : 0; part1->_bitsPerSample = encoding == WaveEncoding.ADPCM ? 4 : 0; if (encoding == WaveEncoding.ADPCM) { //Create one ADPCMInfo for each channel int * adpcData = stackalloc int[channels]; ADPCMInfo **pAdpcm = (ADPCMInfo **)adpcData; for (int i = 0; i < channels; i++) { *(pAdpcm[i] = head->GetChannelInfo(i)) = new ADPCMInfo() { _pad = 0 } } ; //Create buffer for each channel int * bufferData = stackalloc int[channels]; short **channelBuffers = (short **)bufferData; int bufferSamples = totalSamples + 2; //Add two samples for initial yn values for (int i = 0; i < channels; i++) { channelBuffers[i] = tPtr = (short *)Marshal.AllocHGlobal(bufferSamples * 2); //Two bytes per sample //Zero padding samples and initial yn values for (int x = 0; x < (loopPadding + 2); x++) { *tPtr++ = 0; } } //Fill buffers stream.SamplePosition = 0; short *sampleBuffer = stackalloc short[channels]; for (int i = 2; i < bufferSamples; i++) { if (stream.SamplePosition == stream.LoopEndSample && looped) { stream.SamplePosition = stream.LoopStartSample; } stream.ReadSamples(sampleBuffer, 1); for (int x = 0; x < channels; x++) { channelBuffers[x][i] = sampleBuffer[x]; } } //Calculate coefs for (int i = 0; i < channels; i++) { AudioConverter.CalcCoefs(channelBuffers[i] + 2, totalSamples, (short *)pAdpcm[i], progress); } //Encode blocks byte * dPtr = (byte *)data->Data; bshort *pyn = (bshort *)adpc->Data; for (int x = 0; x < channels; x++) { *pyn++ = 0; *pyn++ = 0; } for (int sIndex = 0, bIndex = 1; sIndex < totalSamples; sIndex += samplesPerBlock, bIndex++) { int blockSamples = Math.Min(totalSamples - sIndex, samplesPerBlock); for (int x = 0; x < channels; x++) { short *sPtr = channelBuffers[x] + sIndex; //Set block yn values if (bIndex != blocks) { *pyn++ = sPtr[samplesPerBlock + 1]; *pyn++ = sPtr[samplesPerBlock]; } //Encode block (include yn in sPtr) AudioConverter.EncodeBlock(sPtr, blockSamples, dPtr, (short *)pAdpcm[x]); //Set initial ps if (bIndex == 1) { pAdpcm[x]->_ps = *dPtr; } //Advance output pointer if (bIndex == blocks) { //Fill remaining dPtr += lbSize; for (int i = lbSize; i < lbTotal; i++) { *dPtr++ = 0; } } else { dPtr += 0x2000; } } if (progress != null) { if ((sIndex % samplesPerBlock) == 0) { progress.Update(progress.CurrentValue + (0x7000 * channels)); } } } //Reverse coefs for (int i = 0; i < channels; i++) { short *p = pAdpcm[i]->_coefs; for (int x = 0; x < 16; x++, p++) { *p = p->Reverse(); } } //Write loop states if (looped) { //Can't we just use block states? int loopBlock = loopStart / samplesPerBlock; int loopChunk = (loopStart - (loopBlock * samplesPerBlock)) / 14; dPtr = (byte *)data->Data + (loopBlock * 0x2000 * channels) + (loopChunk * 8); tmp = (loopBlock == blocks - 1) ? lbTotal : 0x2000; for (int i = 0; i < channels; i++, dPtr += tmp) { //Use adjusted samples for yn values tPtr = channelBuffers[i] + loopStart; pAdpcm[i]->_lps = *dPtr; pAdpcm[i]->_lyn2 = *tPtr++; pAdpcm[i]->_lyn1 = *tPtr; } } //Free memory for (int i = 0; i < channels; i++) { Marshal.FreeHGlobal((IntPtr)channelBuffers[i]); } } else if (encoding == WaveEncoding.PCM16) { bshort *destPtr = (bshort *)data->Data; for (int i = 0; i < blocks; i++) { int samplesPerChannel = i < blocks - 1 ? part1->_samplesPerBlock : part1->_lastBlockSamples; int bytesPerChannel = i < blocks - 1 ? part1->_blockSize : part1->_lastBlockTotal; short[] sampleData = new short[channels * bytesPerChannel / sizeof(short)]; fixed(short *sampleDataPtr = sampleData) { int read = 0; do { if (stream.SamplePosition == stream.LoopEndSample && looped) { stream.SamplePosition = stream.LoopStartSample; } int s = stream.ReadSamples(sampleDataPtr + read, samplesPerChannel - read); if (s == 0) { throw new Exception("No samples could be read from the stream"); } read += s; }while (read < samplesPerChannel); } for (int j = 0; j < channels; j++) { for (int k = j; k < sampleData.Length; k += channels) { *(destPtr++) = sampleData[k]; } } progress.Update(progress.CurrentValue + (samplesPerChannel * channels * 3)); } } if (progress != null) { progress.Finish(); } #if RSTMLIB } return(array); #else return(map); #endif }
public static unsafe byte[] EncodeToByteArray(IAudioStream stream, IProgressTracker progress, WaveEncoding encoding = WaveEncoding.ADPCM)
public void TestBiQuadFilters() { Calibration.Initialize(); { //Bandpass Filtered Sine Wave, Matched Frequency IBGCStream stream = new SquareWave(1f, 400f) .BiQuadBandpassFilter(400f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "BiQuadMostThroughBandWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); } { //Bandpass Filtered Sine Wave, 2x Frequency IBGCStream stream = new SquareWave(1f, 400f) .BiQuadBandpassFilter(800f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "BiQuadTooHighBandWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); } { //Bandpass Filtered Sine Wave, Half Frequency IBGCStream stream = new SquareWave(1f, 400f) .BiQuadBandpassFilter(200f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "BiQuadTooLowBandWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); } { //Notch Filtered Square Wave, Matched Frequency IBGCStream stream = new SquareWave(1f, 400f) .BiQuadNotchFilter(400f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "BiQuadNotchFilteredSquareWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); } { //Notch Filtered Sine Wave, Matched Frequency IBGCStream stream = new SineWave(1f, 400f) .BiQuadNotchFilter(400f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "BiQuadNotchFilteredSineWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); } { //Notch Filtered Sine Wave, +10 Hz Mismatch IBGCStream stream = new SineWave(1f, 400f) .BiQuadNotchFilter(410f) .Window(DURATION) .Normalize(LEVEL); bool success = WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "BiQuadNotchFiltered410SineWave.wav"), stream: stream, overwrite: true); Assert.IsTrue(success); } }
public void TestNewConvolution() { string baseFile = "000000"; WaveEncoding.LoadBGCStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}.wav"), stream: out IBGCStream stream); Debug.Log($"Pre RMS: {string.Join(", ", stream.CalculateRMS().Select(x => x.ToString()).ToArray())}"); { float[] filter1 = new float[150]; filter1[25] = 1f; float[] filter2 = new float[150]; for (int i = 0; i < 150; i++) { filter2[i] = 1f / (float)Math.Sqrt(150); } IBGCStream convolved = stream.MultiConvolve(filter1, filter2); string rms = string.Join(", ", convolved.CalculateRMS().Select(x => x.ToString()).ToArray()); Debug.Log($"Post RMS: {rms}"); //Write to File WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Convolved.wav"), stream: convolved, overwrite: true); } { float[] filter1 = new float[150]; filter1[25] = 1f / (float)Math.Sqrt(2); filter1[26] = 1f / (float)Math.Sqrt(2); float[] filter2 = new float[150]; for (int i = 0; i < 150; i++) { filter2[i] = 1f / 150f; } IBGCStream convolved = stream.MultiConvolve(filter1, filter2); string rms = string.Join(", ", convolved.CalculateRMS().Select(x => x.ToString()).ToArray()); Debug.Log($"Post RMS: {rms}"); //Write to File WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"{baseFile}_Convolved2.wav"), stream: convolved, overwrite: true); } }
public void TestFunFakeVoice() { Calibration.Initialize(); // F2: 500 // F3: 1000 // F4: 2000 double qFactor = 200; //IBGCStream f2 = new AnalyticNoiseStream( // rms: 1.0, // freqLB: 20, // freqUB: 10000, // frequencyCount: 10000, // distribution: AnalyticNoiseStream.AmplitudeDistribution.Pink) // .ToBGCStream() // .ContinuousFilter( // envelopeStream: new Audio.Envelopes.SigmoidEnvelope(2.0), // filterType: ContinuousFilter.FilterType.BandPass, // freqLB: 500, // freqUB: 540, // qFactor: 100.0); //Func<IBGCStream> makeCarrier = () => //new AnalyticNoiseStream( // rms: 1.0, // freqLB: 20, // freqUB: 10000, // frequencyCount: 10000, // distribution: AnalyticNoiseStream.AmplitudeDistribution.Brown) // .ToBGCStream(); //Func<IBGCStream> makeCarrier = () => // new SawtoothWave( // amplitude: 1.0, // frequency: 120); Func <IBGCStream> makeCarrier = () => new StreamAdder( new SawtoothWave( amplitude: 1.0, frequency: 120), new AnalyticNoiseStream( rms: 0.2, freqLB: 20, freqUB: 10000, frequencyCount: 10000, distribution: AnalyticNoiseStream.AmplitudeDistribution.Brown) .ToBGCStream()); //Func<IBGCStream> makeCarrier = () => // new SquareWave( // 1.0, // 280.0, // 0.1); //IBGCStream f2 = makeCarrier() // .ContinuousFilter( // envelopeStream: new SineWave(1.0, 4), // filterType: ContinuousFilter.FilterType.BandPass, // freqLB: 1500, // freqUB: 1000, // qFactor: qFactor); { IBGCStream f1 = makeCarrier() .BiQuadBandpassFilter( centralFrequency: 500, qFactor: qFactor); IBGCStream f2 = makeCarrier() .BiQuadBandpassFilter( centralFrequency: 1500, qFactor: qFactor); IBGCStream fakeVoice = new StreamAdder(f1, f2) .Window(0.2) .Center(0.5) .SlowRangeFitter(); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "testVoiceA.wav"), stream: fakeVoice, overwrite: true); } { IBGCStream f1 = makeCarrier() .BiQuadBandpassFilter( centralFrequency: 750, qFactor: qFactor); IBGCStream f2 = makeCarrier() .BiQuadBandpassFilter( centralFrequency: 2000, qFactor: qFactor); IBGCStream fakeVoice = new StreamAdder(f1, f2) .Window(0.2) .Center(0.5) .SlowRangeFitter(); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", "testVoiceB.wav"), stream: fakeVoice, overwrite: true); } }
public void TestContinuousFilter() { Calibration.Initialize(); IBGCStream noiseStream = new NoiseAudioClip( duration: 4, rms: 1.0, freqLB: 20.0, freqUB: 10000.0, frequencyCount: 10000, distribution: NoiseAudioClip.AmplitudeDistribution.Pink); for (ContinuousFilter.FilterType filter = 0; filter < ContinuousFilter.FilterType.MAX; filter++) { WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"Sigmoid{filter}FilteredNoise.wav"), stream: new ContinuousFilter( stream: noiseStream, filterEnvelope: new SigmoidEnvelope(4.0, 1.0), filterType: filter, freqLB: 20, freqUB: 10000) .Normalize(80), overwrite: true); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"Linear{filter}FilteredNoise.wav"), stream: new ContinuousFilter( stream: noiseStream, filterEnvelope: new LinearEnvelope(4.0), filterType: filter, freqLB: 20, freqUB: 10000) .Normalize(80), overwrite: true); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"SlowSine{filter}FilteredNoise.wav"), stream: new ContinuousFilter( stream: noiseStream, filterEnvelope: new SineWave(1.0, 1.0), filterType: filter, freqLB: 20, freqUB: 10000) .Normalize(80), overwrite: true); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"FastSine{filter}FilteredNoise.wav"), stream: new ContinuousFilter( stream: noiseStream, filterEnvelope: new SineWave(1.0, 50.0), filterType: filter, freqLB: 20, freqUB: 10000) .Normalize(80), overwrite: true); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"SlowTriangle{filter}FilteredNoise.wav"), stream: new ContinuousFilter( stream: noiseStream, filterEnvelope: new TriangleWave(1.0, 1.0), filterType: filter, freqLB: 20, freqUB: 10000) .Normalize(80), overwrite: true); WaveEncoding.SaveStream( filepath: DataManagement.PathForDataFile("Test", $"FastTriangle{filter}FilteredNoise.wav"), stream: new ContinuousFilter( stream: noiseStream, filterEnvelope: new TriangleWave(1.0, 50.0), filterType: filter, freqLB: 20, freqUB: 10000) .Normalize(80), overwrite: true); } }