예제 #1
0
        //private float[] mixtemp = new float[outputResolution2];

        private void AddSample(float[] samples, int offset, int channels)
        {
            // mono source - copy to both channels
            if (channels == 1)
            {
                fft[0].Add(samples[offset]);
                fft[1].Add(samples[offset]);

                fft2.Add(samples[offset]);
            }
            else
            {
                fft[0].Add(samples[offset]);
                fft[1].Add(samples[offset + 1]);

                fft2.Add((samples[offset] + samples[offset + 1]) * 0.5f);
            }

            sampleCounter++;
            if (sampleCounter > frameInterval)
            {
                float[] f = new float[Globals.SPECTRUMRES * MAXCHANNELS];

                for (int i = 0; i < MAXCHANNELS; i++)
                {
                    fft[i].GenerateTo(f, i, Globals.SPECTRUMRES, MAXCHANNELS);
                }


                float[] f2 = new float[Globals.SPECTRUM2RES];
                //var mixtemp = MixChannels(f, outputResolution * MAXCHANNELS, MAXCHANNELS).Select(x => x / (float)MAXCHANNELS).ToArray();
                //fft2.Add(Resample(mixtemp, outputResolution2, x => x * x));
                fft2.GenerateTo(f2, 0, Globals.SPECTRUM2RES);


                var analysisSample = new AudioAnalysisSample(f, f2, new float[Globals.AUDIODATASIZE], frameInterval /*, analyser.OutputNames*/);

                analyser.Process(analysisSample);

                SpectrumReady?.Invoke(this, new FftEventArgs(analysisSample));

                sampleCounter = 0;
            }
        }
예제 #2
0
 private void Spectrum_SpectrumReady(object sender, FftEventArgs e)
 {
     SpectrumReady?.Invoke(sender, e);
 }