/// <summary>
        /// Create a new sound by changing the speed/pitch of the old sound. 2.0 = an octave up, 1.0 = the same, 0.5 = down an octave
        /// </summary>
        public static WaveAudio ScalePitchAndDuration(WaveAudio w, double factor)
        {
            if (factor < 0) throw new ArgumentException("Factor must >= 0");
            WaveAudio res = new WaveAudio(w.getSampleRate(), w.getNumChannels());

            // do operation for all channels
            for (int i = 0; i < w.getNumChannels(); i++)
                res.data[i] = scalePitchAndDurationChannel(w.data[i], factor);

            return res;
        }
Example #2
0
 public static WaveAudio hiPassFilter(WaveAudio w, double factor) //0.5
 {
     WaveAudio ret = new WaveAudio(w.getSampleRate(), w.getNumChannels());
     ret.LengthInSamples = w.LengthInSamples;
     for (int ch = 0; ch < w.getNumChannels(); ch++)
     {
         ret.data[ch][0] = w.data[ch][0];
         for (int i = 1; i < ret.data[ch].Length; i++)
             ret.data[ch][i] = factor * ret.data[ch][i - 1] + (factor) * (w.data[ch][i] - w.data[ch][i-1]);
     }
     return ret;
 }
Example #3
0
        //could also get continuous with window.


        public static WaveAudio lowPassFilter(WaveAudio w, double factor) //0.5
        {
            //http://en.wikipedia.org/wiki/Low-pass_filter
            WaveAudio ret = new WaveAudio(w.getSampleRate(), w.getNumChannels());
            ret.LengthInSamples = w.LengthInSamples;
            for (int ch = 0; ch < w.getNumChannels(); ch++)
            {
                ret.data[ch][0] = w.data[ch][0];
                for (int i=1; i<ret.data[ch].Length; i++)
                    ret.data[ch][i] = (1-factor)*ret.data[ch][i-1] + (factor)*w.data[ch][i];
            }
            return ret;
        }
Example #4
0
        public static WaveAudio hiPassFilter(WaveAudio w, double factor) //0.5
        {
            WaveAudio ret = new WaveAudio(w.getSampleRate(), w.getNumChannels());

            ret.LengthInSamples = w.LengthInSamples;
            for (int ch = 0; ch < w.getNumChannels(); ch++)
            {
                ret.data[ch][0] = w.data[ch][0];
                for (int i = 1; i < ret.data[ch].Length; i++)
                {
                    ret.data[ch][i] = factor * ret.data[ch][i - 1] + (factor) * (w.data[ch][i] - w.data[ch][i - 1]);
                }
            }
            return(ret);
        }
Example #5
0
        public static WaveAudio Tremolo(WaveAudio w, double tremfreq, double amp)
        {
            WaveAudio res = new WaveAudio(w.getSampleRate(), w.getNumChannels());

            res.LengthInSamples = w.LengthInSamples;
            double tremeloFreqScale = 2.0 * Math.PI * tremfreq / (double)w.getSampleRate();

            for (int ch = 0; ch < w.data.Length; ch++)
            {
                for (int i = 0; i < w.data[ch].Length; i++)
                {
                    double val = w.data[ch][i] * (1 + amp * Math.Sin(tremeloFreqScale * i));
                    if (val > 1.0)
                    {
                        val = 1.0;
                    }
                    else if (val < -1.0)
                    {
                        val = -1.0;
                    }
                    res.data[ch][i] = val;
                }
            }
            return(res);
        }
Example #6
0
        public static WaveAudio Vibrato(WaveAudio wave, double freq, double width)
        {
            if (width < 0)
            {
                throw new ArgumentException("Factor must >= 0");
            }
            WaveAudio newwave = new WaveAudio(wave.getSampleRate(), wave.getNumChannels());

            // do operation for all channels
            for (int i = 0; i < wave.getNumChannels(); i++)
            {
                newwave.data[i] = vibratoChannel(wave.data[i], wave.getSampleRate(), width, freq);
            }

            return(newwave);
        }
Example #7
0
        /// <summary>
        /// Create a new sound by changing the speed/pitch of the old sound. 2.0 = an octave up, 1.0 = the same, 0.5 = down an octave
        /// </summary>
        public static WaveAudio ScalePitchAndDuration(WaveAudio w, double factor)
        {
            if (factor < 0)
            {
                throw new ArgumentException("Factor must >= 0");
            }
            WaveAudio res = new WaveAudio(w.getSampleRate(), w.getNumChannels());

            // do operation for all channels
            for (int i = 0; i < w.getNumChannels(); i++)
            {
                res.data[i] = scalePitchAndDurationChannel(w.data[i], factor);
            }

            return(res);
        }
Example #8
0
        //could also get continuous with window.


        public static WaveAudio lowPassFilter(WaveAudio w, double factor) //0.5
        {
            //http://en.wikipedia.org/wiki/Low-pass_filter
            WaveAudio ret = new WaveAudio(w.getSampleRate(), w.getNumChannels());

            ret.LengthInSamples = w.LengthInSamples;
            for (int ch = 0; ch < w.getNumChannels(); ch++)
            {
                ret.data[ch][0] = w.data[ch][0];
                for (int i = 1; i < ret.data[ch].Length; i++)
                {
                    ret.data[ch][i] = (1 - factor) * ret.data[ch][i - 1] + (factor) * w.data[ch][i];
                }
            }
            return(ret);
        }
        public static double[][] SpectrumContentOverTime(WaveAudio w, int nBins, int nSize)
        {
            if (w.getNumChannels() != 1)
            {
                throw new Exception("Only mono supported.");
            }
            if (!isPowerOfTwo((uint)nSize))
            {
                throw new Exception("Size must be power of 2.");
            }

            int nDatapoints = w.LengthInSamples / nSize - 1;

            double[][] res    = new double[nDatapoints][];
            double[]   buffer = new double[nSize];

            for (int i = 0; i < nDatapoints; i++)
            {
                // get samples from this slice of time. Put it into the buffer
                Array.Copy(w.data[0], i * nSize, buffer, 0, nSize);

                res[i] = getSpectrumContent(buffer, nBins);
            }
            return(res);
        }
        // The following create new audio without modifying original
        public static WaveAudio Concatenate(WaveAudio w1, WaveAudio w2)
        {
            // make sure sample rates match we could be nicer and convert automatically
            if (w1.m_currentSampleRate != w2.m_currentSampleRate) throw new Exception("Sample rates don't match");
            if (w2.getNumChannels() != w2.getNumChannels()) throw new Exception("Number of channels don't match");

            WaveAudio newwave = new WaveAudio(w1.getSampleRate(), w1.getNumChannels());
            newwave.LengthInSamples = w1.LengthInSamples + w2.LengthInSamples;

            for (int ch = 0; ch < w1.getNumChannels(); ch++)
            {
                //          source    sIndex, destination, destIndex,  length
                Array.Copy(w1.data[ch], 0, newwave.data[ch], 0, w1.data[ch].Length);
                Array.Copy(w2.data[ch], 0, newwave.data[ch], w1.data[ch].Length, w2.data[0].Length);
            }
            return newwave;
        }
 public static WaveAudio GetSliceSample(WaveAudio wthis, int nStart, int nEnd)
 {
     WaveAudio slice = new WaveAudio(wthis.getSampleRate(), wthis.getNumChannels());
     if (nEnd <= nStart || nEnd > wthis.LengthInSamples || nStart < 0) throw new Exception("Invalid slice");
     for (int ch = 0; ch < slice.data.Length; ch++)
     {
         slice.data[ch] = new double[nEnd - nStart];
         Array.Copy(wthis.data[ch], nStart, slice.data[ch], 0, nEnd - nStart);
     }
     return slice;
 }
        // These work by shifting the signal until it seems to correlate with itself.
        // In other words if the signal looks very similar to (signal shifted 200 samples) than the fundamental period is probably 200 samples
        // Note that the algorithm only works well when there's only one prominent fundamental.
        // This could be optimized by looking at the rate of change to determine a maximum without testing all periods.
        private static double[] detectPitchCalculation(WaveAudio w, double minHz, double maxHz, int nCandidates, int nResolution, PitchDetectAlgorithm algorithm)
        {
            // note that higher frequency means lower period
            int nLowPeriodInSamples = hzToPeriodInSamples(maxHz, w.getSampleRate());
            int nHiPeriodInSamples = hzToPeriodInSamples(minHz, w.getSampleRate());
            if (nHiPeriodInSamples <= nLowPeriodInSamples) throw new Exception("Bad range for pitch detection.");
            if (w.getNumChannels() != 1) throw new Exception("Only mono supported.");
            double[] samples = w.data[0];
            if (samples.Length < nHiPeriodInSamples) throw new Exception("Not enough samples.");

            // both algorithms work in a similar way
            // they yield an array of data, and then we find the index at which the value is highest.
            double[] results = new double[nHiPeriodInSamples - nLowPeriodInSamples];

            if (algorithm == PitchDetectAlgorithm.Amdf)
            {
                for (int period = nLowPeriodInSamples; period < nHiPeriodInSamples; period += nResolution)
                {
                    double sum = 0;
                    // for each sample, see how close it is to a sample n away. Then sum these.
                    for (int i = 0; i < samples.Length - period; i++)
                        sum += Math.Abs(samples[i] - samples[i + period]);

                    double mean = sum / (double)samples.Length;
                    mean *= -1; //somewhat of a hack. We are trying to find the minimum value, but our findBestCandidates finds the max. value.
                    results[period - nLowPeriodInSamples] = mean;
                }
            }
            else if (algorithm == PitchDetectAlgorithm.Autocorrelation)
            {
                for (int period = nLowPeriodInSamples; period < nHiPeriodInSamples; period += nResolution)
                {
                    double sum = 0;
                    // for each sample, find correlation. (If they are far apart, small)
                    for (int i = 0; i < samples.Length - period; i++)
                        sum += samples[i] * samples[i + period];

                    double mean = sum / (double)samples.Length;
                    results[period - nLowPeriodInSamples] = mean;
                }
            }

            // find the best indices
            int[] bestIndices = findBestCandidates(nCandidates, ref results); //note findBestCandidates modifies parameter
            // convert back to Hz
            double[] res = new double[nCandidates];
            for (int i=0; i<nCandidates;i++)
                res[i] = periodInSamplesToHz(bestIndices[i]+nLowPeriodInSamples, w.getSampleRate());
            return res;
        }
Example #13
0
        // The following create new audio without modifying original
        public static WaveAudio Concatenate(WaveAudio w1, WaveAudio w2)
        {
            // make sure sample rates match we could be nicer and convert automatically
            if (w1.m_currentSampleRate != w2.m_currentSampleRate)
            {
                throw new Exception("Sample rates don't match");
            }
            if (w2.getNumChannels() != w2.getNumChannels())
            {
                throw new Exception("Number of channels don't match");
            }

            WaveAudio newwave = new WaveAudio(w1.getSampleRate(), w1.getNumChannels());

            newwave.LengthInSamples = w1.LengthInSamples + w2.LengthInSamples;

            for (int ch = 0; ch < w1.getNumChannels(); ch++)
            {
                //          source    sIndex, destination, destIndex,  length
                Array.Copy(w1.data[ch], 0, newwave.data[ch], 0, w1.data[ch].Length);
                Array.Copy(w2.data[ch], 0, newwave.data[ch], w1.data[ch].Length, w2.data[0].Length);
            }
            return(newwave);
        }
Example #14
0
        public static WaveAudio GetSliceSample(WaveAudio wthis, int nStart, int nEnd)
        {
            WaveAudio slice = new WaveAudio(wthis.getSampleRate(), wthis.getNumChannels());

            if (nEnd <= nStart || nEnd > wthis.LengthInSamples || nStart < 0)
            {
                throw new Exception("Invalid slice");
            }
            for (int ch = 0; ch < slice.data.Length; ch++)
            {
                slice.data[ch] = new double[nEnd - nStart];
                Array.Copy(wthis.data[ch], nStart, slice.data[ch], 0, nEnd - nStart);
            }
            return(slice);
        }
Example #15
0
        /// <summary>
        /// Find spectrum content of signal. Returns array, where each element represents energy at frequencies.
        /// For example, SpectrumContent(w, 8) returns 8 numbers. The first in the array is the amount of energy at low frequencies, and the last is the energy at highest frequencies.
        /// Note that FFT uses a power of 2 samples, and so all of the signal may not be used.
        /// </summary>
        /// <param name="w">Sound</param>
        /// <param name="nBins">Number of bins to return.</param>
        public static double[] SpectrumContent(WaveAudio w, int nBins)
        {
            if (w.getNumChannels() != 1) throw new Exception("Only mono supported.");
            double[] buffer;

            // FFT uses a power of 2 samples, so we might have to truncate.
            if (!isPowerOfTwo((uint) w.LengthInSamples))
            {
                int nSize = (int)findNearestPowerOfTwo((uint) w.LengthInSamples);
                buffer = new double[nSize];
                Array.Copy(w.data[0], buffer, nSize);
            }
            else 
                buffer = w.data[0];
            return getSpectrumContent(buffer, nBins);
        }
Example #16
0
        public static double[][] SpectrumContentOverTime(WaveAudio w, int nBins, int nSize)
        {
            if (w.getNumChannels() != 1) throw new Exception("Only mono supported.");
            if (!isPowerOfTwo((uint) nSize)) throw new Exception("Size must be power of 2.");

            int nDatapoints = w.LengthInSamples / nSize - 1;
            double[][] res = new double[nDatapoints][];
            double[] buffer = new double[nSize];

            for (int i = 0; i < nDatapoints; i++)
            {
                // get samples from this slice of time. Put it into the buffer
                Array.Copy(w.data[0], i * nSize, buffer, 0, nSize);

                res[i] = getSpectrumContent(buffer, nBins);
            }
            return res;
        }
Example #17
0
        public static WaveAudio Phaser(WaveAudio wOriginal, double freq, double fb, int depth, int stages, int drywet)
        {
            double startphaseleft  = 0;
            double startphaseright = startphaseleft + Math.PI; //note that left and right channels should start pi out of phase
            double freq_scaled     = 2.0 * Math.PI * freq / (double)wOriginal.getSampleRate();

            WaveAudio w = wOriginal.Clone();

            if (w.getNumChannels() == 1)
            {
                effect_phaseraud_impl(w.data[0], freq_scaled, startphaseleft, fb, depth, stages, drywet);
            }
            else
            {
                effect_phaseraud_impl(w.data[0], freq_scaled, startphaseleft, fb, depth, stages, drywet);
                effect_phaseraud_impl(w.data[1], freq_scaled, startphaseright, fb, depth, stages, drywet);
            }
            return(w);
        }
Example #18
0
        public static WaveAudio Wahwah(WaveAudio wOriginal, double freq, double depth, double freqofs, double res)
        {
            double startphaseleft  = 0;
            double startphaseright = startphaseleft + Math.PI; //note that left and right channels should start pi out of phase
            double freq_scaled     = 2.0 * Math.PI * freq / (double)wOriginal.getSampleRate();

            WaveAudio w = wOriginal.Clone();

            if (w.getNumChannels() == 1)
            {
                effect_wahwahaud_impl(w.data[0], startphaseleft, freq_scaled, depth, freqofs, res);
            }
            else
            {
                effect_wahwahaud_impl(w.data[0], startphaseleft, freq_scaled, depth, freqofs, res);
                effect_wahwahaud_impl(w.data[1], startphaseright, freq_scaled, depth, freqofs, res);
            }
            return(w);
        }
Example #19
0
        public static WaveAudio Derivative(WaveAudio wOriginal)
        {
            WaveAudio w = wOriginal.Clone();

            for (int ch = 0; ch < w.getNumChannels(); ch++)
            {
                for (int i = 0; i < w.data[ch].Length; i++)
                {
                    if (i + 1 < w.data[ch].Length)
                    {
                        w.data[ch][i] = w.data[ch][i] - w.data[ch][i + 1];
                    }
                    else
                    {
                        w.data[ch][i] = w.data[ch][i] - 0;
                    }
                }
            }
            return(w);
        }
        public static void propertytests()
        {
            // these aren't the best tests.
            WaveAudio w1 = new WaveAudio(44100, 2);
            asserteq(w1.data.Length, 2, "channels");
            asserteq(w1.getNumChannels(), 2, "channels");
            assert(w1.data[0] != null && w1.data[1] != null, "channels");
            assert(w1.data[0].Length == 1 && w1.data[1].Length == 1, "004");
            asserteq(w1.getSampleRate(), 44100, "005");

            WaveAudio w1m = new WaveAudio(22050, 1);
            asserteq(w1m.data.Length, 1, "channels");
            assert(w1m.data[0] != null, "channels");
            asserteq(w1m.data[0].Length, 1, "004");
            asserteq(w1m.getSampleRate(), 22050, "005");

            // now set some properties
            w1m.LengthInSamples = 100;
            asserteq(w1m.data[0].Length, 100);
            asserteqf(w1m.LengthInSeconds, 100 / (double)w1m.getSampleRate(), 0.001);
        }
        /// <summary>
        /// Find spectrum content of signal. Returns array, where each element represents energy at frequencies.
        /// For example, SpectrumContent(w, 8) returns 8 numbers. The first in the array is the amount of energy at low frequencies, and the last is the energy at highest frequencies.
        /// Note that FFT uses a power of 2 samples, and so all of the signal may not be used.
        /// </summary>
        /// <param name="w">Sound</param>
        /// <param name="nBins">Number of bins to return.</param>
        public static double[] SpectrumContent(WaveAudio w, int nBins)
        {
            if (w.getNumChannels() != 1)
            {
                throw new Exception("Only mono supported.");
            }
            double[] buffer;

            // FFT uses a power of 2 samples, so we might have to truncate.
            if (!isPowerOfTwo((uint)w.LengthInSamples))
            {
                int nSize = (int)findNearestPowerOfTwo((uint)w.LengthInSamples);
                buffer = new double[nSize];
                Array.Copy(w.data[0], buffer, nSize);
            }
            else
            {
                buffer = w.data[0];
            }
            return(getSpectrumContent(buffer, nBins));
        }
        // These work by shifting the signal until it seems to correlate with itself.
        // In other words if the signal looks very similar to (signal shifted 200 samples) than the fundamental period is probably 200 samples
        // Note that the algorithm only works well when there's only one prominent fundamental.
        // This could be optimized by looking at the rate of change to determine a maximum without testing all periods.
        private static double[] detectPitchCalculation(WaveAudio w, double minHz, double maxHz, int nCandidates, int nResolution, PitchDetectAlgorithm algorithm)
        {
            // note that higher frequency means lower period
            int nLowPeriodInSamples = hzToPeriodInSamples(maxHz, w.getSampleRate());
            int nHiPeriodInSamples  = hzToPeriodInSamples(minHz, w.getSampleRate());

            if (nHiPeriodInSamples <= nLowPeriodInSamples)
            {
                throw new Exception("Bad range for pitch detection.");
            }
            if (w.getNumChannels() != 1)
            {
                throw new Exception("Only mono supported.");
            }
            double[] samples = w.data[0];
            if (samples.Length < nHiPeriodInSamples)
            {
                throw new Exception("Not enough samples.");
            }

            // both algorithms work in a similar way
            // they yield an array of data, and then we find the index at which the value is highest.
            double[] results = new double[nHiPeriodInSamples - nLowPeriodInSamples];

            if (algorithm == PitchDetectAlgorithm.Amdf)
            {
                for (int period = nLowPeriodInSamples; period < nHiPeriodInSamples; period += nResolution)
                {
                    double sum = 0;
                    // for each sample, see how close it is to a sample n away. Then sum these.
                    for (int i = 0; i < samples.Length - period; i++)
                    {
                        sum += Math.Abs(samples[i] - samples[i + period]);
                    }

                    double mean = sum / (double)samples.Length;
                    mean *= -1; //somewhat of a hack. We are trying to find the minimum value, but our findBestCandidates finds the max. value.
                    results[period - nLowPeriodInSamples] = mean;
                }
            }
            else if (algorithm == PitchDetectAlgorithm.Autocorrelation)
            {
                for (int period = nLowPeriodInSamples; period < nHiPeriodInSamples; period += nResolution)
                {
                    double sum = 0;
                    // for each sample, find correlation. (If they are far apart, small)
                    for (int i = 0; i < samples.Length - period; i++)
                    {
                        sum += samples[i] * samples[i + period];
                    }

                    double mean = sum / (double)samples.Length;
                    results[period - nLowPeriodInSamples] = mean;
                }
            }

            // find the best indices
            int[] bestIndices = findBestCandidates(nCandidates, ref results); //note findBestCandidates modifies parameter
            // convert back to Hz
            double[] res = new double[nCandidates];
            for (int i = 0; i < nCandidates; i++)
            {
                res[i] = periodInSamplesToHz(bestIndices[i] + nLowPeriodInSamples, w.getSampleRate());
            }
            return(res);
        }
Example #23
0
        // Helper function. It's long and gross because either sound could be longer.
        // I could be more clever and use Math.Max / Min to have WaveAudio longer, WaveAudio shorter
        // , but at least now it is readable
        /// <summary>
        /// Element-wise combination of two audio clips. For example, adding, or modulation.
        /// </summary>
        internal static WaveAudio elementWiseCombination(WaveAudio w1, WaveAudio w2, ElementWiseCombinationFn fn)
        {
            if (w1.m_currentSampleRate != w2.m_currentSampleRate)
            {
                throw new Exception("Sample rates don't match");
            }
            if (w1.getNumChannels() != w2.getNumChannels())
            {
                throw new Exception("Number of channels don't match");
            }

            WaveAudio newwave = new WaveAudio(w1.getSampleRate(), w1.getNumChannels());

            newwave.LengthInSamples = Math.Max(w1.LengthInSamples, w2.LengthInSamples);
            double val;

            for (int ch = 0; ch < w1.getNumChannels(); ch++)
            {
                if (w1.LengthInSamples > w2.LengthInSamples)
                {
                    for (int i = 0; i < w1.LengthInSamples; i++)
                    {
                        if (i >= w2.LengthInSamples)
                        {
                            val = fn(w1.data[ch][i], 0);
                        }
                        else
                        {
                            val = fn(w1.data[ch][i], w2.data[ch][i]);
                        }

                        if (val > 1.0)
                        {
                            val = 1.0;
                        }
                        else if (val < -1.0)
                        {
                            val = -1.0;
                        }
                        newwave.data[ch][i] = val;
                    }
                }
                else
                {
                    for (int i = 0; i < w2.LengthInSamples; i++)
                    {
                        if (i >= w1.LengthInSamples)
                        {
                            val = fn(0, w2.data[ch][i]);
                        }
                        else
                        {
                            val = fn(w1.data[ch][i], w2.data[ch][i]);
                        }

                        if (val > 1.0)
                        {
                            val = 1.0;
                        }
                        else if (val < -1.0)
                        {
                            val = -1.0;
                        }
                        newwave.data[ch][i] = val;
                    }
                }
            }
            return(newwave);
        }
        public static WaveAudio Vibrato(WaveAudio wave, double freq, double width)
        {
            if (width < 0) throw new ArgumentException("Factor must >= 0");
            WaveAudio newwave = new WaveAudio(wave.getSampleRate(), wave.getNumChannels());
            
            // do operation for all channels
            for (int i = 0; i < wave.getNumChannels(); i++)
                newwave.data[i] = vibratoChannel(wave.data[i], wave.getSampleRate(), width, freq);

            return newwave;
        }
        static void iotests_perfile(string strFilename, int nBits, int nChannels, int nRate)
        {
            WaveAudio w01 = new WaveAudio(strFilename);
            asserteq(w01.getNumChannels(), nChannels);
            asserteq(w01.getSampleRate(), nRate, "011");
            asserteqf(w01.LengthInSamples, 90725 * (nRate / 22050), 1.0, "012"); //note give 1.0 tolerance
            asserteqf(w01.LengthInSeconds, 4.1145124, "013");

            asserteq(w01.data.Length, nChannels);
            asserteq(w01.data[0].Length, w01.LengthInSamples);
            for (int i = 0; i < nChannels; i++)
                asserteq(w01.data[i].Length, w01.LengthInSamples);

            // test converting to other rates / quality
            w01.SaveWaveFile("..\\..\\testout\\o_" + nRate + "_" + nBits + "_" + nRate + "_" + nChannels + ".wav", nBits);
            nBits = (nBits == 8) ? 16 : 8;
            w01.SaveWaveFile("..\\..\\testout\\ot_" + nRate + "_" + nBits + "_" + nRate + "_" + nChannels + ".wav", nBits);
        }
 public static WaveAudio Tremolo(WaveAudio w, double tremfreq, double amp)
 {
     WaveAudio res = new WaveAudio(w.getSampleRate(), w.getNumChannels());
     res.LengthInSamples = w.LengthInSamples;
     double tremeloFreqScale = 2.0 * Math.PI * tremfreq / (double)w.getSampleRate();
     for (int ch=0;ch<w.data.Length; ch++)
     {
         for (int i = 0; i < w.data[ch].Length; i++)
         {
             double val = w.data[ch][i] * (1 + amp * Math.Sin(tremeloFreqScale * i));
             if (val > 1.0) val = 1.0;
             else if (val < -1.0) val = -1.0;
             res.data[ch][i] = val;
         }
     }
     return res;
 }
 // Test between 20Hz and 500Hz. This works very well, although it is hard to eliminate octave errors
 // This test will play original, then a sine at the frequency it detected. The two should line up.
 private static void pitchdetectwav(AudioPlayer pl, string strFilename, PitchDetection.PitchDetectAlgorithm algorithm)
 {
     string strInstdir = mediadirpitch;
     WaveAudio w = new WaveAudio(strInstdir + strFilename);
     if (w.getNumChannels() != 1) w.setNumChannels(1, true);
     double dfreq = PitchDetection.DetectPitch(w, 50,500,algorithm);
     WaveAudio testPitch = new Triangle(dfreq, 0.7).CreateWaveAudio(1.0);
     pl.Play(w);
     pl.Play(testPitch);
 }
        // Helper function. It's long and gross because either sound could be longer.
        // I could be more clever and use Math.Max / Min to have WaveAudio longer, WaveAudio shorter
        // , but at least now it is readable 
        /// <summary>
        /// Element-wise combination of two audio clips. For example, adding, or modulation.
        /// </summary>
        internal static WaveAudio elementWiseCombination(WaveAudio w1, WaveAudio w2, ElementWiseCombinationFn fn)
        {
            if (w1.m_currentSampleRate != w2.m_currentSampleRate) throw new Exception("Sample rates don't match");
            if (w1.getNumChannels() != w2.getNumChannels()) throw new Exception("Number of channels don't match");

            WaveAudio newwave = new WaveAudio(w1.getSampleRate(), w1.getNumChannels());
            newwave.LengthInSamples = Math.Max(w1.LengthInSamples, w2.LengthInSamples);
            double val;
            for (int ch = 0; ch < w1.getNumChannels(); ch++)
            {
                if (w1.LengthInSamples > w2.LengthInSamples)
                {
                    for (int i = 0; i < w1.LengthInSamples; i++)
                    {
                        if (i >= w2.LengthInSamples)
                            val = fn(w1.data[ch][i], 0);
                        else
                            val = fn(w1.data[ch][i], w2.data[ch][i]);

                        if (val > 1.0) val = 1.0;
                        else if (val < -1.0) val = -1.0;
                        newwave.data[ch][i] = val;
                    }
                }
                else
                {
                    for (int i = 0; i < w2.LengthInSamples; i++)
                    {
                        if (i >= w1.LengthInSamples)
                            val = fn(0, w2.data[ch][i]);
                        else
                            val = fn(w1.data[ch][i], w2.data[ch][i]);

                        if (val > 1.0) val = 1.0;
                        else if (val < -1.0) val = -1.0;
                        newwave.data[ch][i] = val;
                    }
                }
            }
            return newwave;
        }