public int ReadSamples(Span <short> samples) { var count = _decoder.Decode(samples); Position += count; return(count); }
private void ProcessFile(string file) { if (File.Exists(file)) { if (decoder != null) { //btnOpen.Enabled = false; btnFingerPrint.Enabled = false; btnRequest.Enabled = false; Task.Factory.StartNew(() => { Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); ChromaContext context = new ChromaContext(); context.Start(decoder.SampleRate, decoder.Channels); decoder.Decode(context.Consumer, 120); context.Finish(); stopwatch.Stop(); ProcessFileCallback(context.GetFingerprint(), stopwatch.ElapsedMilliseconds); }); } } }
/// <summary> /// Decode a signal from the specified file. /// </summary> /// /// <param name="fileName">File name to read signal from.</param> /// <param name="frameInfo">Information about the decoded signal.</param> /// /// <returns>Return decoded signal.</returns> /// public static Signal DecodeFromFile(string fileName, out FrameInfo frameInfo) { string fileExtension = FormatDecoderAttribute.GetNormalizedExtension(fileName); IAudioDecoder decoder = FormatDecoderAttribute.GetDecoders(fileExtension, decoderTypes, decoders.Value); if (decoder != null) { // open stream using (FileStream stream = new FileStream(fileName, FileMode.Open, FileAccess.Read)) { // open decoder decoder.Open(stream); // read all audio frames Signal signal = decoder.Decode(); decoder.Close(); frameInfo = new FrameInfo(signal.NumberOfChannels, signal.SampleRate, Signal.GetSampleSize(signal.SampleFormat), 0, signal.Length); return(signal); } } throw new ArgumentException(String.Format("No suitable decoder has been found for the file format {0}. If ", fileExtension) + "you are trying to decode .wav files, please add a reference to Accord.Audio.DirectSound. You might need to instantiate" + "at least one type from this assembly to make sure it has been loaded in the AppDomain of your applicatoin.", "fileName"); }
private void _DoDecode() { if (_Paused || _Terminated || _NoMoreData) { return; } float timecode; byte[] buffer; lock (_MutexData) { if (_Data.BytesNotRead > _BeginRefill) { return; } } _Decoder.Decode(out buffer, out timecode); if (buffer == null) { if (_Loop) { _Start = 0f; _DoSkip(); } else { _NoMoreData = true; } return; } lock (_MutexData) { _Data.Write(buffer); _TimeCode = timecode; if (_Data.BytesNotRead < _BeginRefill) { _EventDecode.Set(); } } }
/// <summary> /// Gets audio from queue and process it /// </summary> protected virtual void AudioAnalys(IAudioDecoder decoder) { int counter = 0; Audio item; while ((item = GetItemFromQueue()) != null) { try { counter++; //decode audio source to samples and mp3 tags extracting AudioInfo info = null; using (var stream = item.GetSourceStream()) { info = decoder.Decode(stream, TargetBitrate, item.GetSourceExtension()); } //normalize volume level info.Samples.Normalize(); //launch sample processors foreach (var processor in factory.CreateSampleProcessors()) { try { processor.Process(item, info); } catch (Exception E) { Logger.WarnException("Audio processor exception.", E); } } OnProgress(new ProgressChangedEventArgs(100 * (itemsCount - audioSourceQueue.Count) / itemsCount, null)); item.State = AudioState.Processed; } catch (Exception E) { Logger.ErrorException("Audio processing failed.", E); item.State = AudioState.Bad; } } }
/// <summary> /// Decode a signal from the specified file. /// </summary> /// /// <param name="fileName">File name to read signal from.</param> /// <param name="frameInfo">Information about the decoded signal.</param> /// /// <returns>Return decoded signal.</returns> /// public static Signal DecodeFromFile(string fileName, out FrameInfo frameInfo) { Signal signal = null; string fileExtension = Path.GetExtension(fileName).ToUpperInvariant(); if ((fileExtension != string.Empty) && (fileExtension.Length != 0)) { fileExtension = fileExtension.Substring(1); if (!decoders.ContainsKey(fileExtension)) { FormatDecoderAttribute.PopulateDictionaryWithDecodersFromAllAssemblies <IAudioDecoder>(decoders, fileExtension); } if (decoders.ContainsKey(fileExtension)) { IAudioDecoder decoder = (IAudioDecoder)Activator.CreateInstance(decoders[fileExtension]); // open stream using (FileStream stream = new FileStream(fileName, FileMode.Open, FileAccess.Read)) { // open decoder decoder.Open(stream); // read all audio frames signal = decoder.Decode(); decoder.Close(); } frameInfo = new FrameInfo(signal.Channels, signal.SampleRate, Signal.GetSampleSize(signal.SampleFormat), 0, signal.Length); return(signal); } } throw new ArgumentException(String.Format("No suitable decoder has been found for the file format {0}. If ", fileExtension) + "you are trying to decode .wav files, please add a reference to Accord.Audio.DirectSouond.", "fileName"); }
private void ProcessFile(string file) { if (File.Exists(file)) { if (decoder.Ready) { Task.Factory.StartNew(() => { Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); ChromaContext context = new ChromaContext(); context.Start(decoder.SampleRate, decoder.Channels); decoder.Decode(context.Consumer, 120); context.Finish(); stopwatch.Stop(); ProcessFileCallback(context.GetFingerprint(), stopwatch.ElapsedMilliseconds); }); } Lookup(fp, dur); } }
private void DoDecode() { if (!_FileOpened) { return; } if (_Paused) { return; } if (_terminated) { return; } float Timecode; byte[] Buffer; bool DoIt = false; lock (MutexData) { if (!_skip && BUFSIZE - 10000L > _data.BytesNotRead) { DoIt = true; } } if (!DoIt) { return; } _Decoder.Decode(out Buffer, out Timecode); if (Buffer == null) { if (_Loop) { lock (MutexSyncSignals) { _CurrentTime = 0f; _Start = 0f; } DoSkip(); } else { _NoMoreData = true; } return; } lock (MutexData) { _data.Write(Buffer); _TimeCode = Timecode; if (_data.BytesNotRead < BUFSIZE - 10000L) { _waiting = false; EventDecode.Set(); } else { _waiting = true; } } }
private void Process(IAudioDecoder decoder) { int counter = 0; AudioRecord item; while ((item = GetItemFromQueue()) != null) { try { counter++; //decode audio source to samples and mp3 tags extracting AudioInfo info = null; using (var stream = item.GetSourceStream()) info = decoder.Decode(stream, targetBitRate, item.GetSourceExtension()); //normalize volume level info.Samples.Normalize(); //launch sample processors foreach (var processor in factory.CreateSampleProcessors()) try { processor.Process(item, info); } catch (Exception E) { } //OnProgress(new ProgressChangedEventArgs(100 * (itemsCount - recordsQueue.Count) / itemsCount, null)); item.State = RecordState.Processed; } catch (Exception E) { item.State = RecordState.Bad; } OnProcessingProgress(new ProgressChangedEventArgs(100 * (itemsCount - recordsQueue.Count) / itemsCount, null)); } }
/// <summary> /// Gets audio from queue and process it /// </summary> protected virtual void AudioAnalys(IAudioDecoder decoder) { int counter = 0; Audio item; while ((item = GetItemFromQueue()) != null) try { counter++; //decode audio source to samples and mp3 tags extracting AudioInfo info = null; using (var stream = item.GetSourceStream()) { info = decoder.Decode(stream, TargetBitrate, item.GetSourceExtension()); } //normalize volume level info.Samples.Normalize(); //launch sample processors foreach (var processor in factory.CreateSampleProcessors()) { try { processor.Process(item, info); } catch (Exception E) { Logger.WarnException("Audio processor exception.", E); } } OnProgress(new ProgressChangedEventArgs(100 * (itemsCount - audioSourceQueue.Count) / itemsCount, null)); item.State = AudioState.Processed; } catch (Exception E) { Logger.ErrorException("Audio processing failed.", E); item.State = AudioState.Bad; } }
// private double _doubleReadCounter; // private int _doubleReadFloor; #endregion #region Methods /// <returns>Length *in shorts* of the decoded data</returns> public int ReadSamples(short[] outputBuffer) { int length = 0; bool isSilent = false; lock (_queue) { // If there are too many frames in the queue, pull them out one by one and decode them (so the speex buffer stays OK), // but don't bother playing them. This is a case where downsampling would be helpful, but we'll ignore it for now. while (_queue.Count > queuedFramesTargetMax) { _logger.LogQueueFull(); var entry = _queue.Dequeue(); AudioDecoder = _codecFactory.GetAudioDecoder(entry.AudioCodecType); AudioDecoder.Decode(entry.Frame, 0, entry.DataLength, outputBuffer, length, entry.IsSilent); _entryPool.Recycle(entry); _videoQualityController.LogGlitch(1); } // If we haven't lost any frames since the last check, pull one frame out of the queue to reduce latency. // This is a case where downsampling would be helpful, but we'll ignore it for now. if (++_framesSinceLastCheck > _framesBetweenChecks && _firstPacketReceived) { // Keep a record of the queue size, so that we can know how "bad" it is when we miss a packet. // It's not a big deal to miss a read when the queue size is stable at < 4 frames, // but it's a pretty big deal when the queue size is jumping around between 0 and 50. while (_queueSizes.Count > maxQueueSizeEntries) { _queueSizes.RemoveAt(0); } _queueSizes.Add(_queue.Count); if (_framesLostSinceLastCheck == 0 && _queue.Count > queuedFramesTargetMin) { var entry = _queue.Dequeue(); AudioDecoder = _codecFactory.GetAudioDecoder(entry.AudioCodecType); AudioDecoder.Decode(entry.Frame, 0, entry.DataLength, outputBuffer, length, entry.IsSilent); _entryPool.Recycle(entry); if (_framesBetweenChecks > framesBetweenChecksMin) { _framesBetweenChecks -= goodTrafficAdjustment; // Speed up (slightly) the rate at which we can decrease the queue size. } _logger.LogQueueReduced(); } _framesLostSinceLastCheck = 0; _framesSinceLastCheck = 0; } // Calculate the number of packets we should retrieve. // Here's the logic. Let's say that we're only reading packets every 23.3 milliseconds instead of every 20 milliseconds. // This means that for about 3.3/20 = 16.5% of the reads, we actually need to request *two* packets. // So each time we read, we add .165 to a counter, and then take its floor. As soon as the counter floor // rolls over to a new integer, we know that we need to read a second packet. // Unfortunately, dammit, it doesn't look like this works. We'll need to create a better approach, // presumably using a resampler. //_doubleReadCounter += _logger.OverageRatio; //int packetsToRead = 1; //var newDoubleReadFloor = (int)Math.Floor(_doubleReadCounter); //if (newDoubleReadFloor > _doubleReadFloor) //{ // packetsToRead += newDoubleReadFloor - _doubleReadFloor; // _doubleReadFloor = newDoubleReadFloor; // _logger.LogMultipleRead(); //} //for (int i = 0; i < packetsToRead; i++) { if (_queue.Count > 0) { // If we have anything in the queue, fulfill the request. var entry = _queue.Dequeue(); isSilent = entry.IsSilent; _lastSequenceNumberRead = entry.SequenceNumber; AudioDecoder = _codecFactory.GetAudioDecoder(entry.AudioCodecType); length += AudioDecoder.Decode(entry.Frame, 0, entry.DataLength, outputBuffer, length, entry.IsSilent); _entryPool.Recycle(entry); _firstPacketReceived = true; } else { // Record the fact that we missed a read, so the rest of the system can adjust. _logger.LogQueueEmpty(); if (_firstPacketReceived) { double stdDev = DspHelper.GetStandardDeviation(_queueSizes); _videoQualityController.LogGlitch((int)Math.Floor(stdDev) + 1); } // If the frame hasn't arrived yet, let the last audio codec interpolate the missing packet. // Most likely, the additional frames will arrive in a bunch by the time the next read happens. // We may want to investigate our own upsampling algorithm at some point. length += AudioDecoder.Decode(null, 0, 0, outputBuffer, length, true); if (_framesBetweenChecks < framesBetweenChecksMax && _firstPacketReceived) { _framesBetweenChecks += badTrafficAdjustment; // Slow down (substantially) the rate at which we decrease the queue size. } } } } _logger.LogRead(_queue, _framesBetweenChecks, isSilent); return(length); }
public Signal ObterSinal() { return(_audioDecoder.Decode()); }