static void Main(string[] args) { // Write names of the songs you want to visualize string[] files = new string[] { "Home.wav", }; AudioRecognitionLibrary.Recognizer.AudioRecognizer recognizer = new AudioRecognitionLibrary.Recognizer.AudioRecognizer(); foreach (var file in files) { // Compute BPM and write result into the console var audio = Recorder.GetAudio($"Resources/Songs/{file}"); Console.WriteLine($"FILE: {file}"); Console.WriteLine(recognizer.GetBPM(audio, true)); // Run visualisation AudioProcessor.ConvertToMono(audio); // Change last parameter to see other visualisations var window = new global::Visualizer.Visualizer(audio.Data, audio.Channels, audio.SampleRate, VisualisationModes.Frequencies); window.Run(); } }
/// <summary> /// Creates fingerprint of given audio and returns it together with number of valid notes. (needed to compute recognition accuracy). <br></br> /// </summary> /// <param name="audio">Audio whos fingerprint we want.</param> /// <returns>Tuple where Item1 is fingerprint, Item2 is total number of notes. <br></br>fingerprint: [hash; (absolute anchor times)]<br></br>Note: Address is the hash.</returns> public Tuple <Dictionary <uint, List <uint> >, int> GetAudioFingerprint(IAudioFormat audio) { short[] monoData = AudioProcessor.ConvertToMono(audio); double[] data = Array.ConvertAll(monoData, item => (double)item); //compute downsample coeficient for resampling to Parameters.TargetSamplingRate int downsampleCoef = (int)audio.SampleRate / Parameters.TargetSamplingRate; double[] downsampledData = AudioProcessor.DownSample(data, downsampleCoef, audio.SampleRate); int bufferSize = Parameters.WindowSize / downsampleCoef; //default: 4096/4 = 1024 var timeFrequencyPoints = CreateTimeFrequencyPoints(bufferSize, downsampledData); //[hash;(AbsAnchorTimes)] Dictionary <uint, List <uint> > fingerprint = CreateRecordAddresses(timeFrequencyPoints); return(new Tuple <Dictionary <uint, List <uint> >, int> (fingerprint, timeFrequencyPoints.Count)); }