// Use this for initialization void Start() { audioSource = GetComponent <AudioSource>(); preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); preProcessedPlotController = GameObject.Find("PreprocessedPlot").GetComponent <PlotController>(); // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] multiChannelSamples = new float[audioSource.clip.samples * audioSource.clip.channels]; numChannels = audioSource.clip.channels; numTotalSamples = audioSource.clip.samples; clipLength = audioSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate sampleRate = audioSource.clip.frequency; audioSource.clip.GetData(multiChannelSamples, 0); Thread processSampleThread = new Thread(new ThreadStart(getFullSpectrumThreaded)); processSampleThread.Start(); }
// Use this for initialization void Start() //be aware we shouldn't put too much in here alone; updating the clip must invoke these too. { preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); InitialiseFrequencyBounds(); localAudioSource = GetComponent <AudioSource>(); UpdateInternalVariables();//do this after initialising references, otherwise shit won't find audio clips, etc. }
void Start() { audioSource = GetComponent <AudioSource> (); // Preprocess entire audio file upfront if (preProcessSamples) { preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); preProcessedSpectralFluxAnalyzer.thresholdMultiplier = Sensitivity; // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] multiChannelSamples = new float[audioSource.clip.samples * audioSource.clip.channels]; numChannels = audioSource.clip.channels; numTotalSamples = audioSource.clip.samples; clipLength = audioSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate this.sampleRate = audioSource.clip.frequency; audioSource.clip.GetData(multiChannelSamples, 0); Debug.Log("GetData done"); Thread bgThread = new Thread(this.getFullSpectrumThreaded); Debug.Log("Starting Background Thread"); bgThread.Start(); } }
public void Enable(AudioClip _loadedClip) { if (!Serializer.IsExtratingClip && Serializer.ClipExtratedComplete) { audioSource.clip = _loadedClip; audioSourceLoaded = true; m_Slider.maxValue = audioSource.clip.length; m_Slider.value = 0; if (IsSpectrumCached()) { try { using (FileStream file = File.OpenRead(spectrumCachePath + Serializer.CleanInput(Serializer.ChartData.AudioName + spc_ext))) { System.Runtime.Serialization.Formatters.Binary.BinaryFormatter bf = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter(); preProcessedSpectralFluxAnalyzer = (SpectralFluxAnalyzer)bf.Deserialize(file); } EndSpectralAnalyzer(); Debug.Log("Spectrum loaded from cached"); } catch (Exception ex) { Miku_DialogManager.ShowDialog(Miku_DialogManager.DialogType.Alert, "Error while dezerializing Spectrum " + ex.ToString() ); Debug.Log(ex.ToString()); } } else { Debug.Log("Spectrum data not cached!"); } } }
private void ProcessFullSpectrum( SpectralFluxAnalyzer analyzer, ThreadSafeAudioClip clip, OnAudioClipProcessed callback) { var processedSamples = GetChannelsCombined(clip.Samples, clip.ChannelCount, clip.SampleCount); Debug.Log("Channels have been combined"); var iterations = processedSamples.Length / _fftSampleSize; var fft = new FFT(); fft.Initialize(_fftSampleSize); var chunk = new double[_fftSampleSize]; for (var i = 0; i < iterations; ++i) { Array.Copy(processedSamples, i * _fftSampleSize, chunk, 0, _fftSampleSize); var windowCoefficients = DSP.Window.Coefficients(DSP.Window.Type.Hamming, _fftSampleSize); var scaledSpectrumChunk = DSP.Math.Multiply(chunk, windowCoefficients); var scaleFactor = DSP.Window.ScaleFactor.Signal(windowCoefficients); var fftSpectrum = fft.Execute(scaledSpectrumChunk); var scaledFftSpectrum = DSP.ConvertComplex.ToMagnitude(fftSpectrum); scaledFftSpectrum = DSP.Math.Multiply(scaledFftSpectrum, scaleFactor); var currentSongTime = GetTimeFromIndex(i, clip.Frequency) * _fftSampleSize; analyzer.AnalyzeSpectrum(Array.ConvertAll(scaledFftSpectrum, x => (float)x), currentSongTime); } callback(analyzer); }
// Use this for initialization private void Start() { objectPooler = ObjectPooler.Instance; _audioSource = GetComponent <AudioSource>(); _audioProcessor = new AudioProcessor(1024); // Bass _audioProcessor.ProcessClip( _audioSource.clip, new SpectralFluxAnalyzer(1024, _audioSource.clip.frequency, 2000, 2499), analyzer => { _bassAnalyzer = analyzer; }); // Midrange _audioProcessor.ProcessClip( _audioSource.clip, new SpectralFluxAnalyzer(1024, _audioSource.clip.frequency, 2500, 4000), analyzer => { _midRangeAnalyzer = analyzer; }); // High _audioProcessor.ProcessClip( _audioSource.clip, new SpectralFluxAnalyzer(1024, _audioSource.clip.frequency, 4000, 20000), analyzer => { _highRangeAnalyzer = analyzer; }); }
public void processSignal(AudioSource audio) { audioSource = audio; // Preprocess entire audio file upfront if (preProcessSamples) { preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); // preProcessedPlotController = GameObject.Find ("PreprocessedPlot").GetComponent<PlotController> (); // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] multiChannelSamples = new float[audioSource.clip.samples * audioSource.clip.channels]; numChannels = audioSource.clip.channels; numTotalSamples = audioSource.clip.samples; clipLength = audioSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate this.sampleRate = audioSource.clip.frequency; audioSource.clip.GetData(multiChannelSamples, 0); // Fills mutliChannelSamples[] ! Debug.Log("GetData done"); Thread bgThread = new Thread(this.getFullSpectrumThreaded); Debug.Log("Starting Background Thread"); bgThread.Start(); } }
public void ProcessClip(AudioClip clip, SpectralFluxAnalyzer spectralFluxAnalyzer, OnAudioClipProcessed callback) { _threadClip = new ThreadSafeAudioClip(clip); var t = new Thread(() => { ProcessFullSpectrum( spectralFluxAnalyzer, _threadClip, callback); }); t.Start(); }
private BeatAnalyzer(AudioClipData clipData, int spectrumSampleSize = 1024, int thresholdWindowSize = 50) { this.spectrumSampleSize = spectrumSampleSize; this.thresholdWindowSize = thresholdWindowSize; fluxAnalyzer = new SpectralFluxAnalyzer(spectrumSampleSize, thresholdWindowSize); multiChannelSamples = new float[clipData.lengthSamples * clipData.channels]; frequency = clipData.frequency; channels = clipData.channels; lengthSamples = clipData.lengthSamples; clipLength = (float)clipData.lengthSamples / clipData.frequency; timePerSample = 1f / frequency; clipData.data.CopyTo(multiChannelSamples, clipData.offsetSamples); }
/* END MY SHITTY ADDON */ void Start() { audioSource = GetComponent <AudioSource> (); /* START MY SHITTY ADDON */ spectrumBoy = GameObject.Find("Spectograph").GetComponent <SpectrumBuilder> (); spectrumRealTime = GameObject.Find("Spectograph").GetComponent <RealtimeSpectrumBuilder> (); backgroundThreadCompleted = false; threeDimensionalSpectrumBuild = false; /* END MY SHITTY ADDON */ // Process audio as it plays if (realTimeSamples) { realTimeSpectrum = new float[1024]; realTimeSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); realTimePlotController = GameObject.Find("RealtimePlot").GetComponent <PlotController> (); this.sampleRate = AudioSettings.outputSampleRate; } // Preprocess entire audio file upfront if (preProcessSamples) { preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); preProcessedPlotController = GameObject.Find("PreprocessedPlot").GetComponent <PlotController> (); // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] multiChannelSamples = new float[audioSource.clip.samples * audioSource.clip.channels]; numChannels = audioSource.clip.channels; numTotalSamples = audioSource.clip.samples; clipLength = audioSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate this.sampleRate = audioSource.clip.frequency; audioSource.clip.GetData(multiChannelSamples, 0); // Fills mutliChannelSamples[] ! Debug.Log("GetData done"); Thread bgThread = new Thread(this.getFullSpectrumThreaded); Debug.Log("Starting Background Thread"); bgThread.Start(); } }
void Start() { audioSource = GetComponent <AudioSource> (); redSpawnEngineLow = GameObject.Find("RedSpawner_Low"); blueSpawnEngineLow = GameObject.Find("BlueSpawner_Low"); redSpawnEngineHigh = GameObject.Find("RedSpawner_High"); blueSpawnEngineHigh = GameObject.Find("BlueSpawner_High"); spawnRed = true; // start with red cube songProcessed = false; // Process audio as it plays if (realTimeSamples) { realTimeSpectrum = new float[1024]; realTimeSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); realTimePlotController = GameObject.Find("RealtimePlot").GetComponent <PlotController> (); this.sampleRate = AudioSettings.outputSampleRate; } // Preprocess entire audio file upfront if (preProcessSamples) { preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); preProcessedPlotController = GameObject.Find("PreprocessedPlot").GetComponent <PlotController> (); // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] multiChannelSamples = new float[audioSource.clip.samples * audioSource.clip.channels]; numChannels = audioSource.clip.channels; numTotalSamples = audioSource.clip.samples; clipLength = audioSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate this.sampleRate = audioSource.clip.frequency; audioSource.clip.GetData(multiChannelSamples, 0); Debug.Log("GetData done"); Thread bgThread = new Thread(this.getFullSpectrumThreaded); Debug.Log("Starting Background Thread"); bgThread.Start(); } }
void Start() { SongObjectScript song = findSong(); audioSource = song.GetAudioSource(); Debug.Log(song.GetBPM()); song.PlayAudio(); // Process audio as it plays if (realTimeSamples) { realTimeSpectrum = new float[1024]; realTimeSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); realTimePlotController = GameObject.Find("RealtimePlot").GetComponent <NoteGenerator> (); this.sampleRate = AudioSettings.outputSampleRate; } // Preprocess entire audio file upfront if (preProcessSamples) { preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); preProcessedPlotController = GameObject.Find("PreprocessedPlot").GetComponent <NoteGenerator>(); // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] multiChannelSamples = new float[audioSource.clip.samples * audioSource.clip.channels]; numChannels = audioSource.clip.channels; numTotalSamples = audioSource.clip.samples; clipLength = audioSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate this.sampleRate = audioSource.clip.frequency; audioSource.clip.GetData(multiChannelSamples, 0); Debug.Log("GetData done"); Thread bgThread = new Thread(this.getFullSpectrumThreaded); Debug.Log("Starting Background Thread"); bgThread.Start(); } }
void Start() { cubes = new Dictionary <int, GameObject>(); rumbled = new Dictionary <int, GameObject>(); var textElements = FindObjectsOfType <TextMeshProUGUI>(); //audioDataText = textElements.Where(t => t.name == "SongPosition").First(); pointsText = textElements.Where(t => t.name == "Points").First(); SetSongData(); multiChannelSamples = new float[source.clip.samples * source.clip.channels]; source.clip.GetData(multiChannelSamples, 0); print("GetData done"); FluxAnalyzer = new SpectralFluxAnalyzer(source.clip.channels, source.clip.samples, source.clip.frequency, source.clip.length, multiChannelSamples); Thread bgThread = new Thread(FluxAnalyzer.GetFullSpectrumThreaded); Debug.Log("Starting Background Thread"); bgThread.Start(); }
public void PrepareTrack(AudioClip clip, Action <float> callback) { _mockSource.clip = clip; _hearableSource.clip = clip; _preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); // Need all audio samples. If in stereo, samples will return with left and right channels interweaved // [L,R,L,R,L,R] _multiChannelSamples = new float[_mockSource.clip.samples * _mockSource.clip.channels]; _numChannels = _mockSource.clip.channels; _numTotalSamples = _mockSource.clip.samples; _clipLength = _mockSource.clip.length; // We are not evaluating the audio as it is being played by Unity, so we need the clip's sampling rate this._sampleRate = _mockSource.clip.frequency; _mockSource.clip.GetData(_multiChannelSamples, 0); //Debug.Log("GetData done"); Thread bgThread = new Thread(() => this.getFullSpectrumThreaded(callback)); //Debug.Log("Starting Background Thread"); bgThread.Start(); }
private void SpectralFluxAnalysis() { Sound s = Array.Find(songs, song => song.name == PlayerPrefs.GetString("selectedSong")); preProcessedSpectralFluxAnalyzer = new SpectralFluxAnalyzer(); samplingRate = s.source.clip.frequency; channels = s.source.clip.channels; numOfSamples = s.source.clip.samples; clipLength = s.source.clip.length; Debug.Log("clip length: " + clipLength); samples = new float[s.source.clip.samples * s.source.clip.channels]; // the sample data is stored as [L, R, L, R, L, R, ...] in the array s.source.clip.GetData(samples, 0); GetSpectrumData(samples); Debug.Log("samples length: " + samples.Length); Debug.Log("flux samples length: " + preProcessedSpectralFluxAnalyzer.spectralFluxSamples.Count); List <SpectralFluxInfo> peakSamples = preProcessedSpectralFluxAnalyzer.spectralFluxSamples.FindAll(IsPeakSample); Debug.Log("Peak Samples Length: " + peakSamples.Count); GetPeakOfPeakSamples(peakSamples); Debug.Log("Peak of peak samples length: " + peakOfPeakSamples.Count); OutputOnsetJson(); }
void AnalyzeAudio() { if (soundFile) { //The array for the averaged sample data. (L,R,L,R,L,R are averaged into (L+R)/2, (L+R)/2, (L+R)/2) float[] preprocessedSamples = new float[numSamples]; int numberOfSamplesProcessed = 0; float combinedChannelAverage = 0f; Debug.Log("Starting sample processing..."); for (int i = 0; i < allChannelsSamples.Length; i++) { combinedChannelAverage += allChannelsSamples[i]; //for(int j = 0; j < numChannels; j++) //{ // combinedChannelAverage += allChannelsSamples[i + j]; // numberOfSamplesProcessed++; //} //preprecessedSamples[i/numChannels] = combinedChannelAverage / (float)numChannels; //combinedChannelAverage = 0; // Each time we have processed all channels samples for a point in time, we will store the average of the channels combined if ((i + 1) % numChannels == 0) { preprocessedSamples[numberOfSamplesProcessed] = combinedChannelAverage / numChannels; numberOfSamplesProcessed++; combinedChannelAverage = 0f; } } int specSampSize = 1024; int iterations = preprocessedSamples.Length / specSampSize; double[] sampleChunk = new double[specSampSize]; //LomFFT fft = new LomFFT(); FFT fft = new FFT(); fft.Initialize((System.UInt32)specSampSize); SpectralFluxAnalyzer preproAnalyzer = new SpectralFluxAnalyzer(); for (int i = 0; i < iterations; ++i) { System.Array.Copy(preprocessedSamples, i * specSampSize, sampleChunk, 0, specSampSize); double[] windowCoefs = DSP.Window.Coefficients(DSP.Window.Type.Hanning, (uint)specSampSize); double[] scaledSpectrumChunk = DSP.Math.Multiply(sampleChunk, windowCoefs); double scaleFactor = DSP.Window.ScaleFactor.Signal(windowCoefs); // Perform the FFT and convert output (complex numbers) to Magnitude System.Numerics.Complex[] fftSpectrum = fft.Execute(scaledSpectrumChunk); double[] scaledFFTSpectrum = DSP.ConvertComplex.ToMagnitude(fftSpectrum); scaledFFTSpectrum = DSP.Math.Multiply(scaledFFTSpectrum, scaleFactor); //old //fft.FFT(sampleChunk); float currTime = getTimeFromIndex(i) * specSampSize; preproAnalyzer.analyzeSpectrum(System.Array.ConvertAll(scaledFFTSpectrum, x => (float)x), currTime); //AnalyzeSpectrum(data...); } //foreach(SpectralFluxAnalyzer.SpectralFluxInfo specInfo in preproAnalyzer.spectralFluxSamples) //{ // if(specInfo.isPeak) // { // Debug.Log("Peak at: " + specInfo.time); // } //} importantMoments = null; importantMoments = new AnimationCurve(); freqCurve = null; freqCurve = new AnimationCurve(); Debug.Log("Starting graph processing..."); for (int i = 0; i < preproAnalyzer.spectralFluxSamples.Count; i++) { if (preproAnalyzer.spectralFluxSamples[i].isPeak) { importantMoments.AddKey(preproAnalyzer.spectralFluxSamples[i].time, 1); freqCurve.AddKey(preproAnalyzer.spectralFluxSamples[i].time, preproAnalyzer.spectralFluxSamples[i].spectralFlux); } } Debug.Log("Done!"); Debug.Log(numberOfSamplesProcessed); //AudioListener.GetSpectrumData(spectrums, 0, FFTWindow.BlackmanHarris); //Debug.Log(AudioSettings.outputSampleRate); } }