public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { scalarToArray(0.0F, data); for (int j = 0; j < inputs.Length; j++) { inputs[j].ProcessAudio(audioData, this, sampleNum, channels); if (gains.Length > j) { if (gains[j] != null) { gains[j].ProcessAudio(amplitudeData, this, sampleNum, channels); } else { scalarToArray(1.0F, amplitudeData); } } else { scalarToArray(1.0F, amplitudeData); } for (int i = 0; i < data.Length; i++) { data[i] += amplitudeData[i] * audioData[i]; } } return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (input != null) { input.ProcessAudio(data, this, sampleNum, channels); } if (frqInput != null) { frqInput.ProcessAudio(frqData, this, sampleNum, channels); } if (pwInput != null) { pwInput.ProcessAudio(pwData, this, sampleNum, channels); } if (ampInput != null) { ampInput.ProcessAudio(ampData, this, sampleNum, channels); } for (int i = 0; i < data.Length; i++) { var frq = frqInput != null ? (frequency + frqData[i]) : frequency; var pw = pwInput != null ? (pulseWidth + pwData[i]) : pulseWidth; var amp = ampInput != null ? (amplitude + ampData[i]) : amplitude; var output = calcWave(frq, pw); output = (output * amp) + ampOffset; data[i] = (float)output; } return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { // input streams? if (frqInput != null) { frqInput.ProcessAudio(frqData, this, sampleNum, channels); } if (pwInput != null) { pwInput.ProcessAudio(pwData, this, sampleNum, channels); } // output stream for (int i = 0; i < data.Length; i += channels) { float frq = (frqInput ? frqData[i] : 220.0F) * (float)frqOffset; // this is frqInput float pw = pwData[i]; data[i] = (waveForm == WAVEFORM.PULSE) ? calcPulse(frq, pwInput ? pwData[i] : 0.5) : (waveForm == WAVEFORM.SAW) ? calcSaw(frq) : calcSin(frq); data[i] *= (float)gain; for (int j = 1; j < channels; j++) { data[i + j] = data[i]; // i.e. mono } } return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { // input streams if (audioInput != null) { audioInput.ProcessAudio(audioData, this, sampleNum, channels); } else { scalarToArray(0.0F, audioData); } if (gateInput != null) { gateInput.ProcessAudio(gateData, this, sampleNum, channels); } else { scalarToArray(0.0F, gateData); } // output stream for (int i = 0; i < data.Length; i += channels) { var g = (float)processSample((float)gateData[i]); for (int j = 0; j < channels; j++) { data[i + j] = audioData[i + j] * g; } } return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (cutoffInput != null) { cutoffInput.ProcessAudio(cutoffData, this, sampleNum, channels); } if (input == null) { throw new Exception("Must define an AudioInput"); } input.ProcessAudio(data, this, sampleNum, channels); for (int i = 0; i < data.Length; i++) { var cutFrq = cutoffInput == null ? cutoff : (double)cutoffData[i] + cutoff; if (oldCutFrq != cutFrq || oldRes != resonance) { cutFrq = cutFrq < 0.0 ? 0.0 : cutFrq; oldCutFrq = cutFrq; oldRes = resonance; g = Math.Tan(3.141592 * (cutFrq / SampleRate)); k = 2.0 - (2.0 * resonance); a1 = (1.0 / (1.0 + (g * (g + k)))); a2 = g * a1; } v1 = (a1 * ic1eq) + (a2 * (data[i] - ic2eq)); v2 = ic2eq + (g * v1); ic1eq = (2.0 * v1) - ic1eq; ic2eq = (2.0 * v2) - ic2eq; switch (filterType) { case FilterType.LOW: { data[i] = (float)v2; break; } case FilterType.BAND: { data[i] = (float)v1; break; } case FilterType.HIGH: { data[i] = (float)((data[i] - (k * v1)) - v2); break; } case FilterType.NOTCH: { data[i] = (float)(data[i] - (k * v1)); break; } case FilterType.PEAK: { data[i] = (float)((data[i] - (k * v1)) - (2.0 * v2)); break; } case FilterType.ALLPASS: { data[i] = (float)(data[i] - (2.0 * k * v1)); break; } } } return; }
void Start() { int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); var channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); ampData = new float[buflength * channels]; }
void Start() { SampleRate = AudioSettings.outputSampleRate; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); var channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); cutoffData = new float[buflength * channels]; }
void Start() { // SampleRate = AudioSettings.outputSampleRate; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); Channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); audioData = new float[buflength * Channels]; amplitudeData = new float[buflength * Channels]; }
void Start() { SampleRate = AudioSettings.outputSampleRate; Omega = (1.0 / AudioSettings.outputSampleRate) * TWOPI; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); Channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); frqData = new float[buflength * Channels]; pwData = new float[buflength * Channels]; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (audioFile == null || clipSamples < 0) { return; } if (gateInput != null) { gateInput.ProcessAudio(gateData, this, sampleNum, channels); } if (loopStartInput != null) { loopStartInput.ProcessAudio(loopStartData, this, sampleNum, channels); } if ((long)(loopStart * SampleRate) < 0 || (loopStart * SampleRate) > clipSamples) { loopStart = 0.0; } for (int i = 0; i < data.Length; i += channels) { var g = ((gateInput != null) ? gateData[i] : gate) > 0.25 ? 1.0F : 0.0F; var ls = loopStartInput ? (long)((loopStartData[i] + loopStart) * SampleRate) : (long)(loopStart * SampleRate); if (playHead > clipSamples) { Debug.Log("playHead " + playHead.ToString() + " file " + clipSamples); } data[i] = audioBuffer[playHead * clipChannels] * g; if (clipChannels > 1) { data[i + 1] = audioBuffer[(playHead * clipChannels) + 1] * g; } if (gateState < 0.5 && g >= 0.5) { if (gateRetrigger) { playHead = Math.Abs(ls); } } gateState = g; if (g > 0.5) { playHead++; if (playHead >= ((loopDuration > 0.0) ? (long)(loopDuration * SampleRate) + ls : clipSamples)) { playHead = Math.Abs(ls); } if (playHead >= clipSamples) { playHead = 0; } } } }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (input != null) { input.ProcessAudio(data, this, sampleNum, channels); } for (int i = 0; i < data.Length; i++) { var offset = input != null ? data[i] : 0.0; data[i] = (float)value + (float)offset; } return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { // only process input once for 'n' outputs if ((outputCnt % outputs.Length) == 0) { input.ProcessAudio(tmpData, this, sampleNum, channels); } // use tmpData for all outputs for (int i = 0; i < data.Length; i++) { data[i] = tmpData[i]; } outputCnt++; return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (input != null) { input.ProcessAudio(data, this, sampleNum, channels); } if (ampInput != null) { ampInput.ProcessAudio(ampData, this, sampleNum, channels); } for (int i = 0; i < data.Length; i++) { var offset = input != null ? data[i] : 0.0; data[i] = data[i] * (ampInput ? ampData[i] + amp : amp); } return; }
void Start() { SampleRate = AudioSettings.outputSampleRate; int buflength, numbufs; AudioSettings.GetDSPBufferSize(out buflength, out numbufs); var channels = AudioUnit.speakerModeToChannels(AudioSettings.speakerMode); gateData = new float[buflength * channels]; loopStartData = new float[buflength * channels]; if (audioFile) { clipSamples = audioFile.samples; clipChannels = audioFile.channels; audioBuffer = new float[clipSamples * clipChannels]; audioFile.GetData(audioBuffer, 0); } }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (BPM < 40.0) { BPM = 40.0; } var tmpCountDown = countDown; var tmpStepNumber = stepNumber; var tmpStepDuration = stepDuration; for (int i = 0; i < data.Length; i += channels) { if (tmpCountDown < 1) { tmpStepNumber++; // increment the stepcount! // set the countdown until the next step increment tmpCountDown = (long)((SampleRate / (BPM / 60.0)) * durations[tmpStepNumber % durations.Length]); tmpStepDuration = tmpCountDown; } if (caller == gateOutput) { var gateLength = (float)gates[tmpStepNumber % gates.Length]; data[i] = ((tmpStepDuration - tmpCountDown) > (long)(gateLength * tmpStepDuration)) ? 0.0F : 1.0F; // data[i] = (sampleNum % stepDuration) < ((long) ((SampleRate / (BPM / 60.0)) * gateLength)) ? 1.0F : 0.0F; } else { data[i] = (float)midi2Frq(pitches[tmpStepNumber % pitches.Length]); } for (int j = 1; j < channels; j++) { data[i + j] = data[i]; // i.e. mono } tmpCountDown--; } if ((processRunNum % 2) == 1) { countDown = tmpCountDown; stepNumber = tmpStepNumber; stepDuration = tmpStepDuration; } processRunNum++; return; }
public override void ProcessAudio(float[] data, AudioUnit caller, long sampleNum, int channels) { if (input != null) { input.ProcessAudio(data, this, sampleNum, channels); } for (int i = 0; i < data.Length; i += channels) { var leftd = delayLineLeft[playHeadLeft]; var leftout = data[i] + ((float)mixOut * leftd); delayLineLeft[playHeadLeft] = (data[i] * mixIn) + (leftd * feedback); playHeadLeft = (playHeadLeft + 1) % (long)(delayTimeLeft * 0.001 * SampleRate); data[i] = leftout; if (channels > 1) { var rightd = delayLineRight[playHeadRight]; var rightout = data[i + 1] + ((float)mixOut * rightd); delayLineRight[playHeadRight] = (data[i + 1] * mixIn) + (rightd * feedback); playHeadRight = (playHeadRight + 1) % (long)(delayTimeRight * 0.001 * SampleRate); data[i + 1] = rightout; } } return; }
public abstract void ProcessAudio(float[] buffer, AudioUnit caller, long sampleNum, int numChannels);