public EncoderPipeline([NotNull] WaveFormat inputFormat, [NotNull] IVoiceEncoder encoder, [NotNull] ICommsNetwork net) { if (inputFormat == null) { throw new ArgumentNullException("inputFormat"); } if (encoder == null) { throw new ArgumentNullException("encoder"); } if (net == null) { throw new ArgumentNullException("net"); } _net = net; _inputFormat = inputFormat; _encoder = new ReadonlyLockedValue <IVoiceEncoder>(encoder); //Create buffers to store the encoder input (1 frame of floats) and output (twice equivalent amount of bytes) _plainSamples = new float[encoder.FrameSize]; _encodedBytes = new byte[encoder.FrameSize * sizeof(float) * 2]; //Input buffer to store raw data from microphone _input = new BufferedSampleProvider(_inputFormat, encoder.FrameSize * 2); //Resample data from microphone rate -> encoder rate _resampler = new Resampler(_input, encoder.SampleRate); //Provides encoder sized and encoder rate frames of data _output = new SampleToFrameProvider(_resampler, (uint)encoder.FrameSize); }
public NavigationService(IFrameProvider frameProvider, IViewModelBinder viewModelBinder) { frame = frameProvider.CurrentFrame; frame.Navigating += OnNavigating; frame.Navigated += OnNavigated; this.viewModelBinder = viewModelBinder; }
public PointCloud() { InitializeComponent(); Closed += PointCloud_Closed; //pointCloudWriter = new HttpPointCloudWriter(); // This will transmit the processed point cloud over HTTP //pointCloudWriter = new MeshGeometryPointCloudWriter(Mesh); // This will print the processed point cloud on the 3D mesh pointCloudWriter = new ImagePointCloudWriter(DepthImage); // This will print the processed point cloud in the Depth image control //pointCloudWriter = new SocketCloudWriter(DepthImage); //frameProvider = new KinnectFrameProvider(); frameProvider = new RecordingFrameProvider(@"C:\Users\cgled\Desktop\ShadowWallRecording1.csv"); frameProvider.FrameArrived += depthReader_FrameArrived; filters = new IPointCloudFilter[] { //new AgingFilter(), new GroundingFilter(), //new LightSourceFilter() }; DataContext = this; }
public void ConsumeFrame(IFrameProvider <IVideoFrame> provider, IVideoFrame frame) { if (IsDisposed) { throw new ObjectDisposedException(nameof(FileVideoWriter)); } else if (InternalVideoWriter == null) { throw new InvalidOperationException($"{nameof(FileVideoWriter)} must be started before perform this action."); } else { Mat matFrame = frame.ToMat(); bool requireResize = frame.EncodingProperties.Resolution != EncodingProperties.Resolution; if (requireResize) { matFrame = matFrame.Resize(EncodingProperties.Resolution.ToOcvSize()); } InternalVideoWriter.Write(matFrame); if (requireResize) { matFrame.Dispose(); } } }
public LogoRecPresenter(ILogoRecView view, LogoRecViewModel model, IFrameAnalyzer analyzer, IFrameProvider frameProvider) { _view = view; _model = model; _analyzer = analyzer; _frameProvider = frameProvider; _view.Bind(_model); _model.PropertyChanged += _model_PropertyChanged; _view.GoButtonPressed += _view_GoButtonPressed; _view.StopButtonPressed += _view_StopButtonPressed; }
public EncoderPipeline(IMicrophoneCapture mic, IVoiceEncoder encoder, ICommsNetwork net, Func <int> channelCount) { _mic = mic; _encoder = encoder; _net = net; _channelCount = channelCount; _encodedBytes = new byte[encoder.FrameSize * sizeof(float)]; _plainSamples = new float[encoder.FrameSize]; _inputFormat = mic.Format; //Create an input buffer with plenty of spare space _input = new BufferedSampleProvider(_inputFormat, Math.Max(_encoder.FrameSize * 2, mic.FrameSize * 2)); _resampler = new Resampler(_input, _encoder.SampleRate); //Whatever we did above, we need to read in frame size chunks _output = new SampleToFrameProvider(_resampler, (uint)encoder.FrameSize); }
/// <param name="micName"></param> /// <param name="source">Source to read frames from</param> private MicrophoneCapture([CanBeNull] string micName, AudioClip source) { if (source == null) { throw new ArgumentNullException("source", Log.PossibleBugMessage("capture source clip is null", "333E11A6-8026-41EB-9B34-EF9ADC54B651")); } _micName = micName; _clip = source; var captureFormat = new WaveFormat(1, source.frequency); _maxReadBufferPower = (byte)Math.Ceiling(Math.Log(0.1f * source.frequency, 2)); _preprocessing = new WebRtcPreprocessingPipeline(captureFormat); _preprocessing.Start(); //Ensure we have enough buffer size to contain several input frames to the preprocessor _rawMicSamples = new BufferedSampleProvider(captureFormat, _preprocessing.InputFrameSize * 4); _rawMicFrames = new SampleToFrameProvider(_rawMicSamples, (uint)_preprocessing.InputFrameSize); Log.Info("Began mic capture (SampleRate:{0} FrameSize:{1}, Buffer Limit:2^{2})", captureFormat.SampleRate, _preprocessing.InputFrameSize, _maxReadBufferPower); }
public MainForm(IFrameProvider provider) : this() { this.provider = provider; }
/// <summary> /// Initializes a new instance of the <see cref="EntitySerializer"/> class. /// </summary> /// <param name="contextProvider">The JSON-LD @context provider.</param> /// <param name="frameProvider">The JSON-LD frame provider.</param> public EntitySerializer(IContextProvider contextProvider, IFrameProvider frameProvider) : this(new ContextResolver(contextProvider)) { this.frameProvider = frameProvider; }
public SerializerTestContext() { _contextProvider = A.Fake<IContextProvider>(); _frameProvider = A.Fake<IFrameProvider>(); _serializer = new EntitySerializer(_contextProvider, _frameProvider); }
public virtual WaveFormat StartCapture(string inputMicName) { //Sanity checks Log.AssertAndThrowPossibleBug(_clip == null, "1BAD3E74-B451-4B7D-A9B9-35225BE55364", "Attempted to Start microphone capture, but capture is already running"); //Early exit if there are no microphones connected if (Log.AssertAndLogWarn(Microphone.devices.Length > 0, "No microphone detected; disabling voice capture")) { return(null); } //Check the micName and default to null if it's invalid (all whitespace or not a known device) _micName = ChooseMicName(inputMicName); //Get device capabilities and choose a sample rate as close to 48000 as possible. //If min and max are both zero that indicates we can use any sample rate int minFreq; int maxFreq; Microphone.GetDeviceCaps(_micName, out minFreq, out maxFreq); var sampleRate = minFreq == 0 && maxFreq == 0 ? 48000 : Mathf.Clamp(48000, minFreq, maxFreq); Log.Debug("GetDeviceCaps name=`{0}` min=`{1}` max=`{2}`", _micName, minFreq, maxFreq); //Get the audioclip from Unity for this microphone (with a fairly large internal buffer) _clip = Microphone.Start(_micName, true, 10, sampleRate); if (_clip == null) { Log.Error("Failed to start microphone capture"); return(null); } //Setup buffers for capture _format = new WaveFormat(_clip.frequency, 1); _maxReadBufferPower = (byte)Math.Ceiling(Math.Log(0.1f * _clip.frequency, 2)); // Create/resize the audio buffers to contain 20ms frames of data. Any frame size will work (the pipeline will buffer/split them as necessary) but 20ms is // optimal because that's native frame size the preprocessor works at so it has to do no extra work to assemble the frames at it's desired size. var frameSize = (int)(0.02 * _clip.frequency); if (_rawMicSamples == null || _rawMicSamples.WaveFormat != _format || _rawMicSamples.Capacity != frameSize || _rawMicFrames.FrameSize != frameSize) { _rawMicSamples = new BufferedSampleProvider(_format, frameSize * 4); _rawMicFrames = new SampleToFrameProvider(_rawMicSamples, (uint)frameSize); } if (_frame == null || _frame.Length != frameSize) { _frame = new float[frameSize]; } //watch for device changes - we need to reset if the audio device changes AudioSettings.OnAudioConfigurationChanged += OnAudioDeviceChanged; _audioDeviceChanged = false; //Reset subscribers to prepare them for another stream of data for (var i = 0; i < _subscribers.Count; i++) { _subscribers[i].Reset(); } Latency = TimeSpan.FromSeconds(frameSize / (float)_format.SampleRate); Log.Info("Began mic capture (SampleRate:{0}Hz, FrameSize:{1}, Buffer Limit:2^{2}, Latency:{3}ms, Device:'{4}')", _clip.frequency, frameSize, _maxReadBufferPower, Latency.TotalMilliseconds, _micName); return(_format); }
public SerializerTestContext() { this.contextProvider = A.Fake <IContextProvider>(); this.frameProvider = A.Fake <IFrameProvider>(); this.serializer = new EntitySerializer(this.contextProvider, this.frameProvider); }