public static Signal ObterSinalMono(string arquivoAudio) { var audioDecoder = new WaveDecoder(arquivoAudio); using (var originalSignal = audioDecoder.Decode()) { var monoFilter = new MonoFilter(); return(monoFilter.Apply(originalSignal)); } }
public override void Awake() { base.Awake(); filters = new MonoFilter[NUM_FILTERS]; bufferCopy = new float[MAX_BUFFER_LENGTH]; controlBuffer = new float[MAX_BUFFER_LENGTH]; //primary stereo filter filters[0] = new MonoFilter(frequency[0], resonance); filters[1] = new MonoFilter(frequency[0], resonance); //secondary stereo filter (for BP/notch) filters[2] = new MonoFilter(frequency[1], resonance); filters[3] = new MonoFilter(frequency[1], resonance); }
/// <summary> /// This method should be implemented by inheriting classes to implement the /// actual feature extraction, transforming the input image into a list of features. /// </summary> /// protected override IList <MelFrequencyCepstrumCoefficientDescriptor> InnerTransform(Signal signal) { if (signal.NumberOfChannels > 1) { signal = new MonoFilter().Apply(signal); } float[] w_fSig = new float[signal.Length]; signal.CopyTo(w_fSig); int w_nfr = (int)(signal.Length / m_fshift + 1); var w_mfcc = new MelFrequencyCepstrumCoefficientDescriptor[w_nfr]; for (int w_fr = 0; w_fr < w_nfr; w_fr++) { int w_start = (int)System.Math.Round(w_fr * m_fshift); int w_end = System.Math.Min(signal.Length, w_start + m_wlen); Int16[] w_frame = new Int16[w_end - w_start]; int w_j = 0; for (int w_i = w_start; w_i < w_end; w_i++, w_j++) { w_frame[w_j] = (Int16)(w_fSig[w_i] * 32768f); } int w_len = w_frame.Length; if (w_len < m_wlen) { Array.Resize(ref w_frame, m_wlen); for (int w_i = w_len; w_i < m_wlen; w_i++) { w_frame[w_i] = 0; } } double[] w_s2mfc = frame2s2mfc(w_frame); w_mfcc[w_fr] = new MelFrequencyCepstrumCoefficientDescriptor(w_fr, w_s2mfc); } return(w_mfcc); }
protected string ParseReferenceStringRight(MonoFilter f) { ReferenceFilterValue rfv = (ReferenceFilterValue)f.Right; return(ParseReferenceFilterValue(rfv)); }