コード例 #1
0
ファイル: AudioStreamFactory.cs プロジェクト: hatak30/Aurio
        private static PeakStore CreatePeakStore(AudioTrack audioTrack, bool fileSupport)
        {
            IAudioStream audioInputStream = audioTrack.CreateAudioStream();

            PeakStore peakStore = new PeakStore(SAMPLES_PER_PEAK, audioInputStream.Properties.Channels,
                                                (int)Math.Ceiling((float)audioInputStream.Length / audioInputStream.SampleBlockSize / SAMPLES_PER_PEAK));

            Action peakStoreFillAction = delegate {
                FillPeakStore(audioTrack, fileSupport, audioInputStream, peakStore);
                audioInputStream.Close();
            };

            // add task
            peakStoreQueue.Add(peakStoreFillAction);

            // create consumer/worker threads
            for (; peakStoreQueueThreads < Math.Min(2, Environment.ProcessorCount); peakStoreQueueThreads++)
            {
                Task.Factory.StartNew(() => {
                    // process peakstore actions as long as the queue is not empty
                    Debug.WriteLine("PeakStoreQueue thread started");
                    while (peakStoreQueue.Count > 0)
                    {
                        peakStoreQueue.Take().Invoke();
                    }
                    peakStoreQueueThreads--;
                    Debug.WriteLine("PeakStoreQueue thread stopped");
                });
            }

            return(peakStore);
        }
コード例 #2
0
        private void button1_Click(object sender, RoutedEventArgs e)
        {
            Microsoft.Win32.OpenFileDialog dlg = new Microsoft.Win32.OpenFileDialog();
            dlg.DefaultExt  = ".wav";
            dlg.Multiselect = true;
            dlg.Filter      = "Wave files|*.wav";

            if (dlg.ShowDialog() == true)
            {
                var profile = FingerprintGenerator.GetProfiles()[0];

                spectrogram1.SpectrogramSize = profile.WindowSize / 2;
                spectrogram2.SpectrogramSize = profile.WindowSize / 2;

                ColorGradient gradient = new ColorGradient(0, 1);
                gradient.AddStop(Colors.Black, 0);
                gradient.AddStop(Colors.White, 1);
                var palette = gradient.GetGradientArgbArray(1024);
                // Set zero dB to red, and then set all found peaks to zero dB to make them visible in the spectrogram
                palette[palette.Length - 1] = ColorGradient.ColorToArgb(Colors.Red);

                spectrogram1.ColorPalette = palette;
                spectrogram2.ColorPalette = palette;

                var store = new FingerprintStore(profile);

                Task.Factory.StartNew(() => {
                    foreach (string file in dlg.FileNames)
                    {
                        AudioTrack audioTrack              = new AudioTrack(new FileInfo(file));
                        IAudioStream audioStream           = audioTrack.CreateAudioStream();
                        IProgressReporter progressReporter = ProgressMonitor.GlobalInstance.BeginTask("Generating fingerprints for " + audioTrack.FileInfo.Name, true);
                        int hashCount = 0;

                        FingerprintGenerator fpg = new FingerprintGenerator(profile);
                        fpg.FrameProcessed      += delegate(object sender2, FrameProcessedEventArgs e2) {
                            var spectrum         = (float[])e2.Spectrum.Clone();
                            var spectrumResidual = (float[])e2.SpectrumResidual.Clone();
                            Dispatcher.BeginInvoke((Action) delegate {
                                spectrogram1.AddSpectrogramColumn(spectrum);
                                spectrogram2.AddSpectrogramColumn(spectrumResidual);
                                progressReporter.ReportProgress((double)e2.Index / e2.Indices * 100);
                            });
                        };
                        fpg.SubFingerprintsGenerated += delegate(object sender2, SubFingerprintsGeneratedEventArgs e2) {
                            hashCount += e2.SubFingerprints.Count;
                            store.Add(e2);
                        };

                        fpg.Generate(audioTrack);
                        Debug.WriteLine("{0} hashes (mem {1:0.00} mb)", hashCount, (hashCount * Marshal.SizeOf(typeof(SubFingerprintHash))) / 1024f / 1024f);

                        progressReporter.Finish();
                    }
                    store.FindAllMatches();
                });
            }
        }
コード例 #3
0
ファイル: AudioStreamFactory.cs プロジェクト: hatak30/Aurio
        public static VisualizingStream FromAudioTrackForGUI(AudioTrack audioTrack)
        {
            VisualizingStream visualizingStream =
                new VisualizingStream(audioTrack.CreateAudioStream(),
                                      CreatePeakStore(audioTrack, audioTrack.TimeWarps.Count == 0));

            // TODO if timewarps are added but total length stays the same, the peakstore still has to be refreshed
            audioTrack.LengthChanged += delegate(object sender, ValueEventArgs <TimeSpan> e) {
                visualizingStream.PeakStore = CreatePeakStore(audioTrack, false);
            };

            return(visualizingStream);
        }
コード例 #4
0
        private void Benchmark(AudioTrack track, bool warmup)
        {
            Task.Factory.StartNew(() => {
                if (warmup)
                {
                    // "Warmup" reads the whole stream before starting the benchmark procedures
                    // to trigger the file caching in Windows, else the first fingerprinting run
                    // on a file is always slower than the following because the file is cached
                    // on successive runs.
                    var stream    = track.CreateAudioStream();
                    var buffer    = new byte[1024 * 1024];
                    int bytesRead = 0;
                    while ((bytesRead = stream.Read(buffer, 0, buffer.Length)) > 0)
                    {
                        // nothing to do here
                    }
                }

                BenchmarkHaitsmaKalker(track);
                BenchmarkWang(track);
                BenchmarkEchoprint(track);
                BenchmarkChromaprint(track);
            });
        }
コード例 #5
0
        private void TimeWarp(TimeWarpType type, AudioTrack t1, TimeSpan t1From, TimeSpan t1To, AudioTrack t2, TimeSpan t2From, TimeSpan t2To, bool calculateSimilarity, bool normalizeSimilarity, bool cueIn, bool cueOut)
        {
            IAudioStream s1 = t1.CreateAudioStream();
            IAudioStream s2 = t2.CreateAudioStream();

            s1 = new CropStream(s1, TimeUtil.TimeSpanToBytes(t1From, s1.Properties), TimeUtil.TimeSpanToBytes(t1To, s1.Properties));
            s2 = new CropStream(s2, TimeUtil.TimeSpanToBytes(t2From, s2.Properties), TimeUtil.TimeSpanToBytes(t2To, s2.Properties));

            List <Tuple <TimeSpan, TimeSpan> > path = null;
            DTW dtw = null;

            // execute time warping
            if (type == TimeWarpType.DTW)
            {
                dtw = new DTW(TimeWarpSearchWidth, progressMonitor);
            }
            else if (type == TimeWarpType.OLTW)
            {
                dtw = new OLTW2(TimeWarpSearchWidth, progressMonitor);
            }

            if (TimeWarpDisplay)
            {
                this.Dispatcher.BeginInvoke((Action) delegate {
                    dtwPathViewer = new DtwPathViewer();
                    dtwPathViewer.Show();
                });

                dtw.OltwInit += new DTW.OltwInitDelegate(delegate(int windowSize, IMatrix <double> cellCostMatrix, IMatrix <double> totalCostMatrix) {
                    dtwPathViewer.Dispatcher.BeginInvoke((Action) delegate {
                        dtwPathViewer.DtwPath.Init(windowSize, cellCostMatrix, totalCostMatrix);
                    });
                });
                bool drawing = false;
                dtw.OltwProgress += new DTW.OltwProgressDelegate(delegate(int i, int j, int minI, int minJ, bool force) {
                    if (!drawing || force)
                    {
                        dtwPathViewer.Dispatcher.BeginInvoke((Action) delegate {
                            drawing = true;
                            dtwPathViewer.DtwPath.Refresh(i, j, minI, minJ);
                            drawing = false;
                        });
                    }
                });
            }

            path = dtw.Execute(s1, s2);

            if (path == null)
            {
                return;
            }

            // convert resulting path to matches and filter them
            int               filterSize       = TimeWarpFilterSize; // take every n-th match and drop the rest
            bool              smoothing        = TimeWarpSmoothing;
            int               smoothingWidth   = Math.Max(1, Math.Min(filterSize / 10, filterSize));
            bool              inOutCue         = TimeWarpInOutCue;
            TimeSpan          inOutCueSpan     = TimeWarpSearchWidth;
            List <Match>      matches          = new List <Match>();
            float             maxSimilarity    = 0; // needed for normalization
            IProgressReporter progressReporter = progressMonitor.BeginTask("post-process resulting path...", true);
            double            totalProgress    = path.Count;
            double            progress         = 0;

            /* Leave out matches in the in/out cue areas...
             * The matches in the interval at the beginning and end of the calculated time warping path with a width
             * equal to the search width should be left out because they might not be correct - since the time warp
             * path needs to start at (0,0) in the matrix and end at (m,n), they would only be correct if the path gets
             * calculated between two synchronization points. Paths calculated from the start of a track to the first
             * sync point, or from the last sync point to end of the track are probably wrong in this interval since
             * the start and end points don't match if there is time drift so it is better to leave them out in those
             * areas... in those short a few second long intervals the drict actually will never be that extreme that
             * someone would notice it anyway. */
            if (inOutCue)
            {
                int startIndex = 0;
                int endIndex   = path.Count;

                // this needs a temporally ordered mapping path (no matter if ascending or descending)
                foreach (Tuple <TimeSpan, TimeSpan> mapping in path)
                {
                    if (cueIn && (mapping.Item1 < inOutCueSpan || mapping.Item2 < inOutCueSpan))
                    {
                        startIndex++;
                    }
                    if (cueOut && (mapping.Item1 > (t1To - t1From - inOutCueSpan) || mapping.Item2 > (t2To - t2From - inOutCueSpan)))
                    {
                        endIndex--;
                    }
                }
                path = path.GetRange(startIndex, endIndex - startIndex);
            }

            for (int i = 0; i < path.Count; i += filterSize)
            {
                //List<Tuple<TimeSpan, TimeSpan>> section = path.GetRange(i, Math.Min(path.Count - i, filterSize));
                List <Tuple <TimeSpan, TimeSpan> > smoothingSection = path.GetRange(
                    Math.Max(0, i - smoothingWidth / 2), Math.Min(path.Count - i, smoothingWidth));
                Tuple <TimeSpan, TimeSpan> match = path[i];

                if (smoothingSection.Count == 0)
                {
                    throw new InvalidOperationException("must not happen");
                }
                else if (smoothingSection.Count == 1 || !smoothing || i == 0)
                {
                    // do nothing, match doesn't need any processing
                    // the first and last match must not be smoothed since they must sit at the bounds
                }
                else
                {
                    List <TimeSpan> offsets = new List <TimeSpan>(smoothingSection.Select(t => t.Item2 - t.Item1).OrderBy(t => t));
                    int             middle  = offsets.Count / 2;

                    // calculate median
                    // http://en.wikiversity.org/wiki/Primary_mathematics/Average,_median,_and_mode#Median
                    TimeSpan smoothedDriftTime = new TimeSpan((offsets[middle - 1] + offsets[middle]).Ticks / 2);
                    match = new Tuple <TimeSpan, TimeSpan>(match.Item1, match.Item1 + smoothedDriftTime);
                }

                float similarity = calculateSimilarity ? (float)Math.Abs(CrossCorrelation.Correlate(
                                                                             s1, new Interval(match.Item1.Ticks, match.Item1.Ticks + TimeUtil.SECS_TO_TICKS),
                                                                             s2, new Interval(match.Item2.Ticks, match.Item2.Ticks + TimeUtil.SECS_TO_TICKS))) : 1;

                if (similarity > maxSimilarity)
                {
                    maxSimilarity = similarity;
                }

                matches.Add(new Match()
                {
                    Track1     = t1,
                    Track1Time = match.Item1 + t1From,
                    Track2     = t2,
                    Track2Time = match.Item2 + t2From,
                    Similarity = similarity,
                    Source     = type.ToString()
                });

                progressReporter.ReportProgress(progress / totalProgress * 100);
                progress += filterSize;
            }

            // add last match if it hasn't been added
            if (path.Count > 0 && path.Count % filterSize != 1)
            {
                Tuple <TimeSpan, TimeSpan> lastMatch = path[path.Count - 1];
                matches.Add(new Match()
                {
                    Track1     = t1,
                    Track1Time = lastMatch.Item1 + t1From,
                    Track2     = t2,
                    Track2Time = lastMatch.Item2 + t2From,
                    Similarity = 1,
                    Source     = type.ToString()
                });
            }
            progressReporter.Finish();

            multiTrackViewer.Dispatcher.BeginInvoke((Action) delegate {
                foreach (Match match in matches)
                {
                    if (normalizeSimilarity)
                    {
                        match.Similarity /= maxSimilarity; // normalize to 1
                    }
                    multiTrackViewer.Matches.Add(match);
                }
            });

            s1.Close();
            s2.Close();
        }
コード例 #6
0
        private void AddTrack(AudioTrack audioTrack)
        {
            if (audioTrack.SourceProperties.SampleRate > audioMixer.SampleRate)
            {
                // The newly added track has a higher samplerate than the current tracks, so we adjust
                // the processing samplerate to the highest rate
                ChangeMixingSampleRate(audioTrack.SourceProperties.SampleRate);
            }

            IAudioStream input        = audioTrack.CreateAudioStream();
            IAudioStream baseStream   = new TolerantStream(new BufferedStream(input, 1024 * 256 * input.SampleBlockSize, true));
            OffsetStream offsetStream = new OffsetStream(baseStream)
            {
                Offset = TimeUtil.TimeSpanToBytes(audioTrack.Offset, baseStream.Properties)
            };

            audioTrack.OffsetChanged += new EventHandler <ValueEventArgs <TimeSpan> >(
                delegate(object sender, ValueEventArgs <TimeSpan> e) {
                offsetStream.Offset = TimeUtil.TimeSpanToBytes(e.Value, offsetStream.Properties);
                audioMixer.UpdateLength();
            });

            // Upmix mono inputs to dual channel stereo or downmix surround to allow channel balancing
            // TODO add better multichannel stream support and allow balancing of surround
            IAudioStream mixToStereoStream = offsetStream;

            if (mixToStereoStream.Properties.Channels == 1)
            {
                mixToStereoStream = new MonoStream(mixToStereoStream, 2);
            }
            else if (mixToStereoStream.Properties.Channels > 2)
            {
                mixToStereoStream = new SurroundDownmixStream(mixToStereoStream);
            }

            // control the track phase
            PhaseInversionStream phaseInversion = new PhaseInversionStream(mixToStereoStream)
            {
                Invert = audioTrack.InvertedPhase
            };

            MonoStream monoStream = new MonoStream(phaseInversion, phaseInversion.Properties.Channels)
            {
                Downmix = audioTrack.MonoDownmix
            };

            // necessary to control each track individually
            VolumeControlStream volumeControl = new VolumeControlStream(monoStream)
            {
                Mute    = audioTrack.Mute,
                Volume  = audioTrack.Volume,
                Balance = audioTrack.Balance
            };

            // when the AudioTrack.Mute property changes, just set it accordingly on the audio stream
            audioTrack.MuteChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                volumeControl.Mute = ve.Value;
            });

            // when the AudioTrack.Solo property changes, we have to react in different ways:
            audioTrack.SoloChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                AudioTrack senderTrack  = (AudioTrack)vsender;
                bool isOtherTrackSoloed = false;

                foreach (AudioTrack vaudioTrack in trackList)
                {
                    if (vaudioTrack != senderTrack && vaudioTrack.Solo)
                    {
                        isOtherTrackSoloed = true;
                        break;
                    }
                }

                /* if there's at least one other track that is soloed, we set the mute property of
                 * the current track to the opposite of the solo property:
                 * - if the track is soloed, we unmute it
                 * - if the track is unsoloed, we mute it
                 */
                if (isOtherTrackSoloed)
                {
                    senderTrack.Mute = !ve.Value;
                }

                /* if this is the only soloed track, we mute all other tracks
                 * if this track just got unsoloed, we unmute all other tracks
                 */
                else
                {
                    foreach (AudioTrack vaudioTrack in trackList)
                    {
                        if (vaudioTrack != senderTrack && !vaudioTrack.Solo)
                        {
                            vaudioTrack.Mute = ve.Value;
                        }
                    }
                }
            });

            // when the AudioTrack.Volume property changes, just set it accordingly on the audio stream
            audioTrack.VolumeChanged += new EventHandler <ValueEventArgs <float> >(
                delegate(object vsender, ValueEventArgs <float> ve) {
                volumeControl.Volume = ve.Value;
            });

            audioTrack.BalanceChanged += new EventHandler <ValueEventArgs <float> >(
                delegate(object vsender, ValueEventArgs <float> ve) {
                volumeControl.Balance = ve.Value;
            });

            audioTrack.InvertedPhaseChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                phaseInversion.Invert = ve.Value;
            });
            audioTrack.MonoDownmixChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                monoStream.Downmix = ve.Value;
            });

            // adjust sample rate to mixer output rate
            ResamplingStream resamplingStream = new ResamplingStream(volumeControl,
                                                                     ResamplingQuality.Medium, audioMixer.Properties.SampleRate);

            IAudioStream trackStream = resamplingStream;

            if (trackStream.Properties.Channels == 1 && audioMixer.Properties.Channels > 1)
            {
                trackStream = new MonoStream(trackStream, audioMixer.Properties.Channels);
            }

            audioMixer.Add(trackStream);
            trackListStreams.Add(audioTrack, trackStream);
        }