コード例 #1
0
    void OnAudioFilterRead(float[] data, int channels)
    {
        currentDspTime = AudioSettings.dspTime;
        dataLen        = data.Length / channels;
        chunkTime      = dataLen / sampleRate;
        dspTimeStep    = chunkTime / dataLen;

        double preciseDspTime;

        for (int i = 0; i < dataLen; i++)
        {
            preciseDspTime = currentDspTime + i * dspTimeStep;
            double signalValue = 0.0;
            double currentFreq = mainFrequency;

            if (useFrequencyModulation)
            {
                double freqOffset = (frequencyModulationOscillatorIntensity * mainFrequency * 0.75) / 100.0;
                currentFreq += mapValueD(frequencyModulationOscillator.calculateSignalValue(preciseDspTime, frequencyModulationOscillatorFrequency), -1.0, 1.0, -freqOffset, freqOffset);
                frequencyModulationRangeOut = (float)frequencyModulationOscillator.calculateSignalValue(preciseDspTime, frequencyModulationOscillatorFrequency) * 0.5f + 0.5f;
            }
            else
            {
                frequencyModulationRangeOut = 0.0f;
            }

            if (useSinusAudioWave)
            {
                signalValue += 1 * sinusAudioWave.calculateSignalValue(preciseDspTime, currentFreq);
            }

            if (useAmplitudeModulation)
            {
                signalValue *= mapValueD(amplitudeModulationOscillator.calculateSignalValue(preciseDspTime, amplitudeModulationOscillatorFrequency), -1.0, 1.0, 0.0, 1.0);
            }
            else
            {
            }

            float x = masterVolume * 0.5f * (float)signalValue;

            for (int j = 0; j < channels; j++)
            {
                data[i * channels + j] = x;
            }
        }
    }
コード例 #2
0
    /*private void LogFrequency ()
     * {
     *  Debug.Log("zeroL: " + zerosL.Count + "\t zeroR: " + zerosR.Count + "\t aligned:" + zerosAlign);
     *  ZeroCrossing l = zerosL.Last<ZeroCrossing>();
     *  ZeroCrossing r = zerosR.Last<ZeroCrossing>();
     *  zerosL.Clear();
     *  zerosR.Clear();
     *  zerosL.Add(l);
     *  zerosR.Add(r);
     *  zerosAlign = 0;
     * }
     */

    // Update is called once per frame
    //void Update()
    //{
    //    // Debug.Log("avg : " + audioRange.Average() + " | max: " + audioRange.Max() + " | min: " + audioRange.Min());
    //}

    private void OnAudioFilterRead(float[] data, int channels)
    {
        currentDspTime = AudioSettings.dspTime;
        dataLen        = data.Length / channels;
        chunkTime      = dataLen / sampleRate;
        dspTimeStep    = chunkTime / dataLen;

        double preciseDspTime;
        double f  = (double)frequency;
        double bf = f + (double)brainwave;

        for (int i = 0; i < dataLen; i++)
        {
            preciseDspTime = currentDspTime + i * dspTimeStep;
            double signalValueA   = 0.0;
            double signalValueB   = 0.0;
            double stepPercentage = (double)i / dataLen;
            signalValueA           = sinAAudio.calculateSignalValue(preciseDspTime, f, (double)amplitude, stepPercentage);
            signalValueB           = (double)amplitude * sinBAudio.calculateSignalValue(preciseDspTime, bf, (double)amplitude, stepPercentage);
            data[i * channels]     = (float)signalValueA;
            data[i * channels + 1] = (float)signalValueB;
        }
    }
コード例 #3
0
    void OnAudioFilterRead(float[] data, int channels)
    {
        /* This is called by the system
         * suppose: sampleRate = 48000
         * suppose: data.Length = 2048
         * suppose: channels = 2
         * then:
         * dataLen = 2048/2 = 1024
         * chunkTime = 1024 / 48000 = 0.0213333... so the chunk time is around 21.3 milliseconds.
         * dspTimeStep = 0.0213333 / 1024 = 2.083333.. * 10^(-5) = 0.00002083333..sec = 0.02083 milliseconds
         *      keep note that 1 / dspTimeStep = 48000 ok!
         */

        currentDspTime = AudioSettings.dspTime;
        dataLen        = data.Length / channels;        // the actual data length for each channel
        chunkTime      = dataLen / sampleRate;          // the time that each chunk of data lasts
        dspTimeStep    = chunkTime / dataLen;           // the time of each dsp step. (the time that each individual audio sample (actually a float value) lasts)

        double preciseDspTime;

        for (int i = 0; i < dataLen; i++)                 // go through data chunk
        {
            preciseDspTime = currentDspTime + i * dspTimeStep;
            double signalValue = 0.0;
            double currentFreq = mainFrequency;
            if (useFrequencyModulation)
            {
                double freqOffset = (frequencyModulationOscillatorIntensity * mainFrequency * 0.75) / 100.0;
                currentFreq += mapValueD(frequencyModulationOscillator.calculateSignalValue(preciseDspTime, frequencyModulationOscillatorFrequency), -1.0, 1.0, -freqOffset, freqOffset);
                frequencyModulationRangeOut = (float)frequencyModulationOscillator.calculateSignalValue(preciseDspTime, frequencyModulationOscillatorFrequency) * 0.5f + 0.5f;
            }
            else
            {
                frequencyModulationRangeOut = 0.0f;
            }

            if (useSinusAudioWave)
            {
                signalValue += sinusAudioWaveIntensity * sinusAudioWave.calculateSignalValue(preciseDspTime, currentFreq);
            }
            if (useSawAudioWave)
            {
                signalValue += sawAudioWaveIntensity * sawAudioWave.calculateSignalValue(preciseDspTime, currentFreq);
            }
            if (useSquareAudioWave)
            {
                signalValue += squareAudioWaveIntensity * squareAudioWave.calculateSignalValue(preciseDspTime, currentFreq);
            }

            if (useAmplitudeModulation)
            {
                signalValue *= mapValueD(amplitudeModulationOscillator.calculateSignalValue(preciseDspTime, amplitudeModulationOscillatorFrequency), -1.0, 1.0, 0.0, 1.0);
                amplitudeModulationRangeOut = (float)amplitudeModulationOscillator.calculateSignalValue(preciseDspTime, amplitudeModulationOscillatorFrequency) * 0.5f + 0.5f;
            }
            else
            {
                amplitudeModulationRangeOut = 0.0f;
            }

            float x = masterVolume * 0.5f * (float)signalValue;

            for (int j = 0; j < channels; j++)
            {
                data[i * channels + j] = x;
            }
        }
    }
コード例 #4
0
    /// <summary>
    ///  OnAudioFilterRead consente di intercettare ciò che l' audio source collegato a questo oggetto sta riproducendo. In questo modo possiamo modificare
    ///  ciò che l'audio source sta riproducendo, sia scrivendo nuovi campionamenti all'interno, che modificando l'audio clip che l'audio source sta riproducendo.
    /// </summary>
    /// <param name="data">Buffer contenente i campionamenti dell'aduio source.. possiamo scrivere qui per far rispodurre suoni all'oggetto audiosource</param>
    /// <param name="channels"> Numero di canali disponibili dal motore audio unity. Per audio stereo, channels = 2.</param>
    void OnAudioFilterRead(float[] data, int channels)
    {
        if (_GM.isActive)
        {
            /*
             * This is "the current time of the audio system", as given
             * by Unity. It is updated every time the OnAudioFilterRead() function
             * is called. It's usually every 1024 samples.
             *
             */

            currentDspTime = AudioSettings.dspTime;  // the current time of the audio system
            dataLen        = data.Length / channels; // the actual data length for each channel
            chunkTime      = dataLen / sampleRate;   // the time that each chunk of data lasts
            dspTimeStep    = chunkTime / dataLen;    // the time of each dsp step. (the time that each individual audio sample (actually a float value) lasts)

            double preciseDspTime;                   //  used to get a precise approximation of the time
            nextOutput = 0;
            sinOutput  = 0;
            sawOutput  = 0;
            sqrOutput  = 0;

            for (int i = 0; i < dataLen; i++)                      // go through data chunk
            {
                preciseDspTime = currentDspTime + i * dspTimeStep; //  we calculate the current dsp time adding the time of every step

                double currentFreq = frequency;                    //  this lets us modulate the frequency

                //  Applies Frequency Modulation
                if (useFrequencyModulation)
                {
                    double freqOffset = (frequencyModulationOscillatorIntensity * frequency * 0.75) / 100.0;
                    currentFreq += mapValueD(frequencyModulationOscillator.calculateSignalValue(preciseDspTime, frequencyModulationOscillatorFrequency), -1.0, 1.0, -freqOffset, freqOffset);
                    frequencyModulationRangeOut = (float)frequencyModulationOscillator.calculateSignalValue(preciseDspTime, frequencyModulationOscillatorFrequency) * 0.5f + 0.5f;
                }
                else
                {
                    frequencyModulationRangeOut = 0.0f;
                }

                //  the samples calculated for the sine wave
                sinOutput = (float)(sinWeight * sinusAudioWave.calculateSignalValue(preciseDspTime, currentFreq));
                //  the samples calculated for the saw wave
                sawOutput = (float)(sawWeight * sawAudioWave.calculateSignalValue(preciseDspTime, currentFreq));
                //  the samples calculated for the square wave
                sqrOutput = (float)(sqrWeight * squareAudioWave.calculateSignalValue(preciseDspTime, currentFreq));


                /*      Mixa assieme tutti gli output
                 * http://www.vttoth.com/CMS/index.php/technical-notes/68
                 * Let's say we have two signals, A and B. If A is quiet, we want to hear B on the output in unaltered form. If B
                 * is quiet, we want to hear A on the output (i.e., A and B are treated symmetrically.) If both A and B have a non-zero amplitude,
                 * the mixed signal must have an amplitude between the greater of A and B, and the maximum permissible amplitude.
                 * If we take A and B to have values between 0 and 1, there is actually a simple equation that satisfies all of the
                 * above conditions:       Z= A + B − AB.
                 * Simple, isn't it! Moreover, it can be easily adapted for more than two signals.
                 * Consider what happens if we mix another signal, C, to Z:  T= Z + C − Z C = A + B + C − AB − AC − BC + ABC.
                 *
                 */
                nextOutput = sinOutput + sawOutput + sqrOutput - (sinOutput * sawOutput) -
                             (sinOutput * sqrOutput) - (sawOutput * sqrOutput) + (sinOutput * sawOutput * sqrOutput);



                //  Applies Amplitude Modulation
                if (useAmplitudeModulation)
                {
                    nextOutput *= (float)mapValueD(amplitudeModulationOscillator.calculateSignalValue(preciseDspTime, amplitudeModulationOscillatorFrequency), -1.0, 1.0, 0.0, 1.0);
                    amplitudeModulationRangeOut = (float)amplitudeModulationOscillator.calculateSignalValue(preciseDspTime, amplitudeModulationOscillatorFrequency) * 0.5f + 0.5f;
                }
                else
                {
                    amplitudeModulationRangeOut = 0.0f;
                }


                //  regulates the output based on the current volume of the synth
                float x = volume * (float)nextOutput;

                //  Copies the samples on every available channels of the sound system
                for (int j = 0; j < channels; j++)
                {
                    data[i * channels + j] = x;
                }
            }
        }
    }