public static string DecodeKey(string resp) { var context = new Context(); context.Eval(JS); var concatFunction = context.GetVariable("decodeKey").As <Function>(); string key = concatFunction.Call(new Arguments { resp }).ToString(); byte[] v = Array.ConvertAll(key.Trim(',').Split(','), s => (byte)int.Parse(s)); string realKey = Convert.ToBase64String(v); return(realKey); }
public void getFullSpectrumThreaded() { try { // We only need to retain the samples for combined channels over the time domain float[] preProcessedSamples = new float[this.numTotalSamples]; int numProcessed = 0; float combinedChannelAverage = 0f; for (int i = 0; i < multiChannelSamples.Length; i++) { combinedChannelAverage += multiChannelSamples[i]; // Each time we have processed all channels samples for a point in time, we will store the average of the channels combined if ((i + 1) % this.numChannels == 0) { preProcessedSamples[numProcessed] = combinedChannelAverage / this.numChannels; numProcessed++; combinedChannelAverage = 0f; } } Debug.Log("Combine Channels done"); Debug.Log(preProcessedSamples.Length); // Once we have our audio sample data prepared, we can execute an FFT to return the spectrum data over the time domain int spectrumSampleSize = 1024; int iterations = preProcessedSamples.Length / spectrumSampleSize; FFT fft = new FFT(); fft.Initialize((System.UInt32)spectrumSampleSize); Debug.Log(string.Format("Processing {0} time domain samples for FFT", iterations)); double[] sampleChunk = new double[spectrumSampleSize]; for (int i = 0; i < iterations; i++) { // Grab the current 1024 chunk of audio sample data Array.Copy(preProcessedSamples, i * spectrumSampleSize, sampleChunk, 0, spectrumSampleSize); // Apply our chosen FFT Window double[] windowCoefs = DSP.Window.Coefficients(DSP.Window.Type.Hanning, (uint)spectrumSampleSize); double[] scaledSpectrumChunk = DSP.Math.Multiply(sampleChunk, windowCoefs); double scaleFactor = DSP.Window.ScaleFactor.Signal(windowCoefs); // Perform the FFT and convert output (complex numbers) to Magnitude Complex[] fftSpectrum = fft.Execute(scaledSpectrumChunk); double[] scaledFFTSpectrum = DSPLib.DSP.ConvertComplex.ToMagnitude(fftSpectrum); scaledFFTSpectrum = DSP.Math.Multiply(scaledFFTSpectrum, scaleFactor); // These 1024 magnitude values correspond (roughly) to a single point in the audio timeline float curSongTime = getTimeFromIndex(i) * spectrumSampleSize; // Send our magnitude data off to our Spectral Flux Analyzer to be analyzed for peaks audPP.analyzeSpectrum(Array.ConvertAll(scaledFFTSpectrum, x => (float)x), curSongTime); } Debug.Log("Spectrum Analysis done"); Debug.Log("Background Thread Completed"); Debug.Log("Number of Points: " + audPP.spectralFluxSamples.Count); for (int i = 0; i < audPP.spectralFluxSamples.Count; i++) { if (audPP.spectralFluxSamples[i].time < 2) { continue; } if (i % 20 == 0) { times.Add(audPP.spectralFluxSamples[i].time); } } startSong = true; } catch (System.Exception e) { // Catch exceptions here since the background thread won't always surface the exception to the main thread Debug.Log(e.ToString()); } }
public static byte[] ToByteArray(this string value, char separator) { return(Array.ConvertAll(value.Split(separator), s => byte.Parse(s))); }