Example #1
0
    void Update()
    {
        // Real-time
        if (realTimeSamples)
        {
            audioSource.GetSpectrumData(realTimeSpectrum, 0, FFTWindow.BlackmanHarris);
            realTimeSpectralFluxAnalyzer.analyzeSpectrum(realTimeSpectrum, audioSource.time);
            realTimePlotController.updatePlot(realTimeSpectralFluxAnalyzer.spectralFluxSamples);
        }

        // Preprocessed
        if (preProcessSamples)
        {
            int indexToPlot = getIndexFromTime(audioSource.time) / 1024;
            preProcessedPlotController.updatePlot(preProcessedSpectralFluxAnalyzer.spectralFluxSamples, indexToPlot);
        }


        /* START MY SHITTY ADDON */

        if (backgroundThreadCompleted == true && threeDimensionalSpectrumBuild == false)
        {
            Debug.Log("calling spectrumBoy to build the thing!");
            threeDimensionalSpectrumBuild = true;
            Debug.Log(string.Format("threeDimensionalSpectrumBuild = ", threeDimensionalSpectrumBuild));
            spectrumBoy.buildSpectrumGraph();
        }
        if (backgroundThreadCompleted == true && threeDimensionalSpectrumBuild == true)
        {
            spectrumBoy.updateSpectrumGraph(audioSource.time);
            spectrumRealTime.updateRealTimeSpectrumGraph();
        }
        /* END MY SHITTY ADDON */
    }
Example #2
0
    void Update()
    {
        // Real-time
        if (realTimeSamples && !GetComponent <AudioManipulation>().pausePressed)
        {
            audioSource.GetSpectrumData(realTimeSpectrum, 0, FFTWindow.BlackmanHarris);
            realTimeSpectralFluxAnalyzer.analyzeSpectrum(realTimeSpectrum, audioSource.time);
            realTimePlotController.updatePlot(realTimeSpectralFluxAnalyzer.spectralFluxSamples);
        }

        // Preprocessed
        if (preProcessSamples && !GetComponent <AudioManipulation>().pausePressed)
        {
            int indexToPlot = getIndexFromTime(audioSource.time) / 1024;
            preProcessedPlotController.updatePlot(preProcessedSpectralFluxAnalyzer.spectralFluxSamples, indexToPlot);
        }
    }
    void Update()
    {
        // Real-time
        if (realTimeSamples)
        {
            audioSource.GetSpectrumData(realTimeSpectrum, 0, FFTWindow.BlackmanHarris);
            realTimeSpectralFluxAnalyzer.analyzeSpectrum(realTimeSpectrum, audioSource.time);
            realTimePlotController.updatePlot(realTimeSpectralFluxAnalyzer.spectralFluxSamples);
        }

        // Preprocessed
        if (preProcessSamples)
        {
            int indexToPlot = getIndexFromTime(audioSource.time) / 4096;
            preProcessedPlotController.updatePlot(preProcessedSpectralFluxAnalyzer.spectralFluxSamples, indexToPlot);
        }
    }
Example #4
0
    private void GetSpectrumData(float[] samples)
    {
        try{
            float[] preProcessedSamples = GetOutputData(samples);

            Debug.Log("Combine Channels done");
            Debug.Log(preProcessedSamples.Length);

            // Once we have our audio sample data prepared, we can execute an FFT to return the spectrum data over the time domain
            int spectrumSampleSize = 1024;
            int iterations         = preProcessedSamples.Length / spectrumSampleSize;

            FFT fft = new FFT();
            fft.Initialize((UInt32)spectrumSampleSize);

            Debug.Log(string.Format("Processing {0} time domain samples for FFT", iterations));
            double[] sampleChunk = new double[spectrumSampleSize];
            for (int i = 0; i < iterations; i++)
            {
                // Grab the current 1024 chunk of audio sample data
                Array.Copy(preProcessedSamples, i * spectrumSampleSize, sampleChunk, 0, spectrumSampleSize);

                // Apply our chosen FFT Window
                double[] windowCoefs         = DSP.Window.Coefficients(DSP.Window.Type.Hanning, (uint)spectrumSampleSize);
                double[] scaledSpectrumChunk = DSP.Math.Multiply(sampleChunk, windowCoefs);
                double   scaleFactor         = DSP.Window.ScaleFactor.Signal(windowCoefs);

                // Perform the FFT and convert output (complex numbers) to Magnitude
                Complex[] fftSpectrum       = fft.Execute(scaledSpectrumChunk);
                double[]  scaledFFTSpectrum = DSPLib.DSP.ConvertComplex.ToMagnitude(fftSpectrum);
                scaledFFTSpectrum = DSP.Math.Multiply(scaledFFTSpectrum, scaleFactor);

                // These 1024 magnitude values correspond (roughly) to a single point in the audio timeline
                float curSongTime = getTimeFromIndex(i) * spectrumSampleSize;

                // Send our magnitude data off to our Spectral Flux Analyzer to be analyzed for peaks
                preProcessedSpectralFluxAnalyzer.analyzeSpectrum(Array.ConvertAll(scaledFFTSpectrum, x => (float)x), curSongTime);

                Debug.Log("Spectrum Analysis done");
                Debug.Log("Background Thread Completed");
            }
        } catch (Exception e) {
            Debug.Log(e.ToString());
        }
    }
Example #5
0
    public void getFullSpectrumThreaded(Action <float> callback)
    {
        try
        {
            // We only need to retain the samples for combined channels over the time domain
            float[] preProcessedSamples = new float[this._numTotalSamples];

            int   numProcessed           = 0;
            float combinedChannelAverage = 0f;
            for (int i = 0; i < _multiChannelSamples.Length; i++)
            {
                combinedChannelAverage += _multiChannelSamples[i];

                // Each time we have processed all channels samples for a point in time, we will store the average of the channels combined
                if ((i + 1) % this._numChannels == 0)
                {
                    preProcessedSamples[numProcessed] = combinedChannelAverage / this._numChannels;
                    numProcessed++;
                    combinedChannelAverage = 0f;
                }
            }

            //Debug.Log("Combine Channels done");
            //Debug.Log(preProcessedSamples.Length);

            // Once we have our audio sample data prepared, we can execute an FFT to return the spectrum data over the time domain
            int spectrumSampleSize = 1024;
            int iterations         = preProcessedSamples.Length / spectrumSampleSize;

            FFT fft = new FFT();
            fft.Initialize((UInt32)spectrumSampleSize);

            //Debug.Log(string.Format("Processing {0} time domain samples for FFT", iterations));
            double[] sampleChunk = new double[spectrumSampleSize];
            for (int i = 0; i < iterations; i++)
            {
                // Grab the current 1024 chunk of audio sample data
                Array.Copy(preProcessedSamples, i * spectrumSampleSize, sampleChunk, 0, spectrumSampleSize);

                // Apply our chosen FFT Window
                double[] windowCoefs         = DSP.Window.Coefficients(DSP.Window.Type.Hanning, (uint)spectrumSampleSize);
                double[] scaledSpectrumChunk = DSP.Math.Multiply(sampleChunk, windowCoefs);
                double   scaleFactor         = DSP.Window.ScaleFactor.Signal(windowCoefs);

                // Perform the FFT and convert output (complex numbers) to Magnitude
                Complex[] fftSpectrum       = fft.Execute(scaledSpectrumChunk);
                double[]  scaledFFTSpectrum = DSPLib.DSP.ConvertComplex.ToMagnitude(fftSpectrum);
                scaledFFTSpectrum = DSP.Math.Multiply(scaledFFTSpectrum, scaleFactor);

                // These 1024 magnitude values correspond (roughly) to a single point in the audio timeline
                float curSongTime = getTimeFromIndex(i) * spectrumSampleSize;

                // Send our magnitude data off to our Spectral Flux Analyzer to be analyzed for peaks
                _preProcessedSpectralFluxAnalyzer.analyzeSpectrum(Array.ConvertAll(scaledFFTSpectrum, x => (float)x), curSongTime);
                callback((i * 100) / iterations);
            }

            callback(100);
            //Debug.Log("Spectrum Analysis done");
            //Debug.Log("Background Thread Completed");
            _loadComplete = true;
        }
        catch (Exception e)
        {
            // Catch exceptions here since the background thread won't always surface the exception to the main thread
            Debug.Log(e.ToString());
        }
    }
Example #6
0
    void AnalyzeAudio()
    {
        if (soundFile)
        {
            //The array for the averaged sample data. (L,R,L,R,L,R are averaged into (L+R)/2, (L+R)/2, (L+R)/2)
            float[] preprocessedSamples = new float[numSamples];

            int   numberOfSamplesProcessed = 0;
            float combinedChannelAverage   = 0f;

            Debug.Log("Starting sample processing...");
            for (int i = 0; i < allChannelsSamples.Length; i++)
            {
                combinedChannelAverage += allChannelsSamples[i];
                //for(int j = 0; j < numChannels; j++)
                //{
                //	combinedChannelAverage += allChannelsSamples[i + j];
                //	numberOfSamplesProcessed++;
                //}
                //preprecessedSamples[i/numChannels] = combinedChannelAverage / (float)numChannels;
                //combinedChannelAverage = 0;

                // Each time we have processed all channels samples for a point in time, we will store the average of the channels combined
                if ((i + 1) % numChannels == 0)
                {
                    preprocessedSamples[numberOfSamplesProcessed] = combinedChannelAverage / numChannels;
                    numberOfSamplesProcessed++;
                    combinedChannelAverage = 0f;
                }
            }

            int      specSampSize = 1024;
            int      iterations   = preprocessedSamples.Length / specSampSize;
            double[] sampleChunk  = new double[specSampSize];

            //LomFFT fft = new LomFFT();
            FFT fft = new FFT();
            fft.Initialize((System.UInt32)specSampSize);

            SpectralFluxAnalyzer preproAnalyzer = new SpectralFluxAnalyzer();

            for (int i = 0; i < iterations; ++i)
            {
                System.Array.Copy(preprocessedSamples, i * specSampSize, sampleChunk, 0, specSampSize);

                double[] windowCoefs         = DSP.Window.Coefficients(DSP.Window.Type.Hanning, (uint)specSampSize);
                double[] scaledSpectrumChunk = DSP.Math.Multiply(sampleChunk, windowCoefs);
                double   scaleFactor         = DSP.Window.ScaleFactor.Signal(windowCoefs);

                // Perform the FFT and convert output (complex numbers) to Magnitude
                System.Numerics.Complex[] fftSpectrum = fft.Execute(scaledSpectrumChunk);
                double[] scaledFFTSpectrum            = DSP.ConvertComplex.ToMagnitude(fftSpectrum);
                scaledFFTSpectrum = DSP.Math.Multiply(scaledFFTSpectrum, scaleFactor);

                //old
                //fft.FFT(sampleChunk);



                float currTime = getTimeFromIndex(i) * specSampSize;
                preproAnalyzer.analyzeSpectrum(System.Array.ConvertAll(scaledFFTSpectrum, x => (float)x), currTime);                 //AnalyzeSpectrum(data...);
            }

            //foreach(SpectralFluxAnalyzer.SpectralFluxInfo specInfo in preproAnalyzer.spectralFluxSamples)
            //{
            //	if(specInfo.isPeak)
            //	{
            //		Debug.Log("Peak at: " + specInfo.time);
            //	}
            //}

            importantMoments = null;
            importantMoments = new AnimationCurve();
            freqCurve        = null;
            freqCurve        = new AnimationCurve();

            Debug.Log("Starting graph processing...");
            for (int i = 0; i < preproAnalyzer.spectralFluxSamples.Count; i++)
            {
                if (preproAnalyzer.spectralFluxSamples[i].isPeak)
                {
                    importantMoments.AddKey(preproAnalyzer.spectralFluxSamples[i].time, 1);
                    freqCurve.AddKey(preproAnalyzer.spectralFluxSamples[i].time, preproAnalyzer.spectralFluxSamples[i].spectralFlux);
                }
            }

            Debug.Log("Done!");
            Debug.Log(numberOfSamplesProcessed);


            //AudioListener.GetSpectrumData(spectrums, 0, FFTWindow.BlackmanHarris);
            //Debug.Log(AudioSettings.outputSampleRate);
        }
    }
Example #7
0
    public void getFullSpectrumThreaded()   //not used. from https://medium.com/giant-scam/algorithmic-beat-mapping-in-unity-preprocessed-audio-analysis-d41c339c135a . //I find it well visualised, but overly sensitive (doesnt seem to target lower freq, which it should).
    {
        try {
            // We only need to retain the samples for combined channels over the time domain
            float[] preProcessedSamples = new float[this.samplesTotal]; //J- check here to see if needs doubling/halving.

            int   numProcessed           = 0;
            float combinedChannelAverage = 0f;
            for (int i = 0; i < CC.Length; i++)
            {
                combinedChannelAverage += CC[i];

                // Each time we have processed all channels samples for a point in time, we will store the average of the channels combined
                if ((i + 1) % 2 == 0)                                               //hard coding here, soz. J-
                {
                    preProcessedSamples[numProcessed] = combinedChannelAverage / 2; //here too (the 2).
                    numProcessed++;
                    combinedChannelAverage = 0f;
                }
            }

            Debug.Log("Combine Channels done");
            Debug.Log(preProcessedSamples.Length);

            // Once we have our audio sample data prepared, we can execute an FFT to return the spectrum data over the time domain
            int spectrumSampleSize = 1024;
            int iterations         = preProcessedSamples.Length / spectrumSampleSize;

            FFT fft = new FFT();
            fft.Initialize((UInt32)spectrumSampleSize);

            Debug.Log(string.Format("Processing {0} time domain samples for FFT", iterations));
            double[] sampleChunk = new double[spectrumSampleSize];
            for (int i = 0; i < iterations; i++)
            {
                // Grab the current 1024 chunk of audio sample data
                Array.Copy(preProcessedSamples, i * spectrumSampleSize, sampleChunk, 0, spectrumSampleSize);

                // Apply our chosen FFT Window
                double[] windowCoefs         = DSP.Window.Coefficients(DSP.Window.Type.Hanning, (uint)spectrumSampleSize);
                double[] scaledSpectrumChunk = DSP.Math.Multiply(sampleChunk, windowCoefs);
                double   scaleFactor         = DSP.Window.ScaleFactor.Signal(windowCoefs);

                // Perform the FFT and convert output (complex numbers) to Magnitude
                System.Numerics.Complex[] fftSpectrum = fft.Execute(scaledSpectrumChunk);
                double[] scaledFFTSpectrum            = DSPLib.DSP.ConvertComplex.ToMagnitude(fftSpectrum);
                scaledFFTSpectrum = DSP.Math.Multiply(scaledFFTSpectrum, scaleFactor);

                // These 1024 magnitude values correspond (roughly) to a single point in the audio timeline
                float curSongTime = getTimeFromIndex(i) * spectrumSampleSize;

                // Send our magnitude data off to our Spectral Flux Analyzer to be analyzed for peaks
                preProcessedSpectralFluxAnalyzer.analyzeSpectrum(Array.ConvertAll(scaledFFTSpectrum, x => (float)x), curSongTime);
            }

            Debug.Log("Spectrum Analysis done");
            Debug.Log("Background Thread Completed");
        }
        catch (Exception e) {
            // Catch exceptions here since the background thread won't always surface the exception to the main thread
            Debug.Log(e.ToString());
        }
    }//Not used currently. Copied from giant-scam. Using for reference sporadically.
Example #8
0
    void Update()
    {
        // Real-time
        if (realTimeSamples)
        {
            audioSource.GetSpectrumData(realTimeSpectrum, 0, FFTWindow.BlackmanHarris);
            realTimeSpectralFluxAnalyzer.analyzeSpectrum(realTimeSpectrum, audioSource.time);
            realTimePlotController.updatePlot(realTimeSpectralFluxAnalyzer.spectralFluxSamples);
        }

        // Preprocessed
        if (preProcessSamples)
        {
            int indexToPlot = getIndexFromTime(audioSource.time) / 1024;
            // preProcessedPlotController.updatePlot (preProcessedSpectralFluxAnalyzer.spectralFluxSamples, indexToPlot);

            if (songProcessed)
            {
                if (preProcessedSpectralFluxAnalyzer.spectralFluxSamples[indexToPlot].isPeak)                // && audioSource.isPlaying)
                {
                    if (spawnRed)
                    {
                        float highOrLow = UnityEngine.Random.Range(0f, 1f);

                        if (highOrLow > 0.5f)
                        {
                            redSpawnEngineLow.GetComponent <SpawnEngine>().onBeat = true;
                            spawnRed = false;
                            //Debug.Log("RED cube spawned via beat");
                        }
                        else
                        {
                            redSpawnEngineHigh.GetComponent <SpawnEngine>().onBeat = true;
                            spawnRed = false;
                        }
                    }
                    else
                    {
                        float highOrLow = UnityEngine.Random.Range(0f, 1f);

                        if (highOrLow > 0.5f)
                        {
                            blueSpawnEngineLow.GetComponent <SpawnEngine>().onBeat = true;
                            spawnRed = true;
                            //Debug.Log("BLUE cube spawned via beat");
                        }
                        else
                        {
                            blueSpawnEngineHigh.GetComponent <SpawnEngine>().onBeat = true;
                            spawnRed = false;
                        }
                    }
                }
                else
                {
                    blueSpawnEngineLow.GetComponent <SpawnEngine>().onBeat  = false;
                    redSpawnEngineLow.GetComponent <SpawnEngine>().onBeat   = false;
                    blueSpawnEngineHigh.GetComponent <SpawnEngine>().onBeat = false;
                    redSpawnEngineHigh.GetComponent <SpawnEngine>().onBeat  = false;
                }
            }
        }
    }