public int Read(byte[] buffer, int offset, int count) { if (this.transform == null) { this.transform = this.CreateTransform(); this.InitializeTransformForStreaming(); } int i = 0; if (this.outputBufferCount > 0) { i += this.ReadFromOutputBuffer(buffer, offset, count - i); } while (i < count) { IMFSample iMFSample = this.ReadFromSource(); if (iMFSample == null) { this.EndStreamAndDrain(); i += this.ReadFromOutputBuffer(buffer, offset + i, count - i); break; } if (!this.initializedForStreaming) { this.InitializeTransformForStreaming(); } this.transform.ProcessInput(0, iMFSample, 0); Marshal.ReleaseComObject(iMFSample); this.ReadFromTransform(); i += this.ReadFromOutputBuffer(buffer, offset + i, count - i); } return(i); }
/// <summary> /// Disposes the <see cref="MFResampler"/> object. /// </summary> public void Dispose() { if (this.resampler != null) { Marshal.ReleaseComObject(this.resampler); this.resampler = null; } if (this.inputBuffer != null) { Marshal.ReleaseComObject(this.inputBuffer); this.inputBuffer = null; } if (this.inputSample != null) { Marshal.ReleaseComObject(this.inputSample); this.inputSample = null; } if (this.outputBuffer != null) { Marshal.ReleaseComObject(this.outputBuffer); this.outputBuffer = null; } if (this.outputSample != null) { Marshal.ReleaseComObject(this.outputSample); this.outputSample = null; } }
public IMFAttributes GetVideoAttributes() { IMFAttributes ia = null; if (m_VideoNode != null) { HResult hr; object o; hr = m_VideoNode.GetObject(out o); if (Succeeded(hr) && o != null) { IMFTransform t = o as IMFTransform; if (t != null) { hr = t.GetAttributes(out ia); //SafeRelease(t); } //SafeRelease(o); } } return(ia); }
private void CreateH264Decoder() { HResult hr; // create H.264 decoder var comobject = new ResamplerMediaComObject(); decodertransform = (IMFTransform)comobject; // setup input media type for decoder MFExtern.MFCreateMediaType(out decinputmediatype); // setup media type manualy IMFMediaType testdecinputmediatype, testdecoutputmediatype; decinputmediatype.SetGUID(MFAttributesClsid.MF_MT_MAJOR_TYPE, MFMediaType.Video); decinputmediatype.SetGUID(MFAttributesClsid.MF_MT_SUBTYPE, MFMediaType.H264); decinputmediatype.SetUINT32(MFAttributesClsid.MF_MT_INTERLACE_MODE, (int)MFVideoInterlaceMode.Progressive); MFExtern.MFSetAttributeSize(decinputmediatype, MFAttributesClsid.MF_MT_FRAME_SIZE, VIDEO_SAMPLE_WIDTH, VIDEO_SAMPLE_HEIGHT); uint fixedSampleSize = VIDEO_SAMPLE_WIDTH * (16 * ((VIDEO_SAMPLE_HEIGHT + 15) / 16)) + VIDEO_SAMPLE_WIDTH * (VIDEO_SAMPLE_HEIGHT / 2);//for Y, U and V decinputmediatype.SetUINT32(MFAttributesClsid.MF_MT_SAMPLE_SIZE, fixedSampleSize); decinputmediatype.SetUINT32(MFAttributesClsid.MF_MT_DEFAULT_STRIDE, VIDEO_SAMPLE_WIDTH); decinputmediatype.SetUINT32(MFAttributesClsid.MF_MT_FIXED_SIZE_SAMPLES, 1); decinputmediatype.SetUINT32(MFAttributesClsid.MF_MT_ALL_SAMPLES_INDEPENDENT, 1); MFExtern.MFSetAttributeRatio(decinputmediatype, MFAttributesClsid.MF_MT_PIXEL_ASPECT_RATIO, 1, 1); hr = decodertransform.SetInputType(0, decinputmediatype, 0); decodertransform.GetInputAvailableType(0, 0, out testdecinputmediatype); // setup media type for output of decoder MFExtern.MFCreateMediaType(out decoutputmediatype); decoutputmediatype.SetGUID(MFAttributesClsid.MF_MT_MAJOR_TYPE, MFMediaType.Video); decoutputmediatype.SetGUID(MFAttributesClsid.MF_MT_SUBTYPE, MFMediaType.IYUV); MFExtern.MFSetAttributeSize(decoutputmediatype, MFAttributesClsid.MF_MT_FRAME_SIZE, VIDEO_SAMPLE_WIDTH, VIDEO_SAMPLE_HEIGHT); MFExtern.MFSetAttributeRatio(decoutputmediatype, MFAttributesClsid.MF_MT_FRAME_RATE, 30, 1); MFExtern.MFSetAttributeRatio(decoutputmediatype, MFAttributesClsid.MF_MT_PIXEL_ASPECT_RATIO, 1, 1); decoutputmediatype.SetUINT32(MFAttributesClsid.MF_MT_INTERLACE_MODE, 2); hr = decodertransform.SetOutputType(0, decoutputmediatype, 0); decodertransform.GetOutputAvailableType(0, 0, out testdecoutputmediatype); decodertransform.GetInputStatus(0, out mftStatus); if (mftStatus != MFTInputStatusFlags.AcceptData) { Debug.WriteLine("DECODER NOT ACCEPT INPUT DATA"); return; } else { Debug.WriteLine("PROCESS INPUT DONE>>>> " + mftStatus); } decodertransform.ProcessMessage(MFTMessageType.CommandFlush, (IntPtr)null); decodertransform.ProcessMessage(MFTMessageType.NotifyBeginStreaming, (IntPtr)null); decodertransform.ProcessMessage(MFTMessageType.NotifyStartOfStream, (IntPtr)null); }
/// <summary> /// Removes an effect that was added with the IMFPMediaPlayer.InsertEffect method. /// </summary> /// <param name="mediaPlayer">A valid IMFPMediaPlayer instance.</param> /// <param name="transform">The Media Foundation transform (MFT) previously added.</param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult RemoveEffect(this IMFPMediaPlayer mediaPlayer, IMFTransform transform) { if (mediaPlayer == null) { throw new ArgumentNullException("mediaPlayer"); } return(mediaPlayer.RemoveEffect(transform)); }
/// <summary> /// Inserts a video effect. /// </summary> /// <param name="mediaEngine">A valid IMFMediaBufferEx instance.</param> /// <param name="transform">A Media Foundation transform (MFT) that implements the video effect.</param> /// <param name="optional"> /// <list type="bullet"> /// <item> /// <term>True</term> /// <description>The effect is optional. If the Media Engine cannot add the effect, it ignores the effect and continues playback.</description> /// </item> /// <item> /// <term>False</term> /// <description>The effect is required. If the Media Engine object cannot add the effect, a playback error occurs.</description> /// </item> /// </list> /// </param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult InsertVideoEffect(this IMFMediaEngineEx mediaEngine, IMFTransform transform, bool optional) { if (mediaEngine == null) { throw new ArgumentNullException("mediaEngine"); } return(mediaEngine.InsertVideoEffect(transform, optional)); }
public static HResult SetObject(this IMFTopologyNode topologyNode, IMFTransform transform) { if (topologyNode == null) { throw new ArgumentNullException("topologyNode"); } return(topologyNode.SetObject(transform)); }
/// <summary> /// Disposes the <see cref="WasapiCaptureClient"/> object. /// </summary> public void Dispose() { if (this.captureThread != null) { this.shutdownEvent.Set(); this.captureThread.Join(); this.captureThread = null; } if (this.shutdownEvent != null) { this.shutdownEvent.Close(); this.shutdownEvent = null; } if (this.audioClient != null) { Marshal.ReleaseComObject(this.audioClient); this.audioClient = null; } if (this.captureClient != null) { Marshal.ReleaseComObject(this.captureClient); this.captureClient = null; } if (this.resampler != null) { Marshal.ReleaseComObject(this.resampler); this.resampler = null; } if (this.inputBuffer != null) { Marshal.ReleaseComObject(this.inputBuffer); this.inputBuffer = null; } if (this.inputSample != null) { Marshal.ReleaseComObject(this.inputSample); this.inputSample = null; } if (this.outputBuffer != null) { Marshal.ReleaseComObject(this.outputBuffer); this.outputBuffer = null; } if (this.outputSample != null) { Marshal.ReleaseComObject(this.outputSample); this.outputSample = null; } }
/// <summary> /// Reads data out of the source, passing it through the transform /// </summary> /// <param name="buffer">Output buffer</param> /// <param name="offset">Offset within buffer to write to</param> /// <param name="count">Desired byte count</param> /// <returns>Number of bytes read</returns> public int Read(byte[] buffer, int offset, int count) { if (transform == null) { transform = CreateTransform(); InitializeTransformForStreaming(); } // strategy will be to always read 1 second from the source, and give it to the resampler int bytesWritten = 0; // read in any leftovers from last time if (outputBufferCount > 0) { bytesWritten += ReadFromOutputBuffer(buffer, offset, count - bytesWritten); } while (bytesWritten < count) { var sample = ReadFromSource(); if (sample == null) // reached the end of our input { // be good citizens and send some end messages: EndStreamAndDrain(); // resampler might have given us a little bit more to return bytesWritten += ReadFromOutputBuffer(buffer, offset + bytesWritten, count - bytesWritten); ClearOutputBuffer(); break; } // might need to resurrect the stream if the user has read all the way to the end, // and then repositioned the input backwards if (!initializedForStreaming) { InitializeTransformForStreaming(); } // give the input to the resampler // can get MF_E_NOTACCEPTING if we didn't drain the buffer properly transform.ProcessInput(0, sample, 0); Marshal.ReleaseComObject(sample); int readFromTransform; // n.b. in theory we ought to loop here, although we'd need to be careful as the next time into ReadFromTransform there could // still be some leftover bytes in outputBuffer, which would get overwritten. Only introduce this if we find a transform that // needs it. For most transforms, alternating read/write should be OK //do //{ // keep reading from transform readFromTransform = ReadFromTransform(); bytesWritten += ReadFromOutputBuffer(buffer, offset + bytesWritten, count - bytesWritten); //} while (readFromTransform > 0); } return(bytesWritten); }
/// <summary> /// Applies an audio or video effect to playback. /// </summary> /// <param name="mediaPlayer">A valid IMFPMediaPlayer instance.</param> /// <param name="transform">A Media Foundation transform (MFT) that implements the effect.</param> /// <param name="optional"> /// <list type="bullet"> /// <item> /// <term>True</term> /// <description>The effect is optional. If the MFPlay player object cannot add the effect, it ignores the effect and continues playback.</description> /// </item> /// <item> /// <term>False</term> /// <description>The effect is required. If the MFPlay player object cannot add the effect, a playback error occurs.</description> /// </item> /// </list> /// </param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult InsertEffect(this IMFPMediaPlayer mediaPlayer, IMFTransform transform, bool optional) { if (mediaPlayer == null) { throw new ArgumentNullException("mediaPlayer"); } return(mediaPlayer.InsertEffect(transform, optional)); }
public H264Decoder() { var obj = Activator.CreateInstance(Type.GetTypeFromCLSID(CLSID_CMSH264DecoderMFT)); if (obj == null) { //todo: warn no MFT is avilalbe } pDecoderTransform = obj as IMFTransform; // Create H.264 decoder. //CHECK_HR( CoCreateInstance(CLSID_CMSH264DecoderMFT, NULL, CLSCTX.CLSCTX_INPROC_SERVER, IID_IUnknown, out spDecTransformUnk), "Failed to create H264 decoder MFT.\n"); //CHECK_HR(spDecTransformUnk.QueryInterface(IID_PPV_ARGS(&pDecoderTransform)), "Failed to get IMFTransform interface from H264 decoder MFT object.\n"); }
public static ComObject <IMFAttributes> GetAttributes(this IMFTransform input) { if (input == null) { throw new ArgumentNullException(nameof(input)); } if (input.GetAttributes(out var atts).IsError) { return(null); } return(new ComObject <IMFAttributes>(atts)); }
protected override IMFTransform CreateTransform() { object obj = this.CreateResamplerComObject(); IMFTransform arg_1E_0 = (IMFTransform)obj; IMFMediaType iMFMediaType = MediaFoundationApi.CreateMediaTypeFromWaveFormat(this.sourceProvider.WaveFormat); arg_1E_0.SetInputType(0, iMFMediaType, _MFT_SET_TYPE_FLAGS.None); Marshal.ReleaseComObject(iMFMediaType); IMFMediaType iMFMediaType2 = MediaFoundationApi.CreateMediaTypeFromWaveFormat(this.outputWaveFormat); arg_1E_0.SetOutputType(0, iMFMediaType2, _MFT_SET_TYPE_FLAGS.None); Marshal.ReleaseComObject(iMFMediaType2); ((IWMResamplerProps)obj).SetHalfFilterLength(this.ResamplerQuality); return(arg_1E_0); }
public static HResult GetObject(this IMFTopologyNode topologyNode, out IMFTransform transform) { if (topologyNode == null) { throw new ArgumentNullException("topologyNode"); } object tmp; HResult hr = topologyNode.GetObject(out tmp); transform = hr.Succeeded() ? tmp as IMFTransform : null; return(hr); }
/// <summary> /// Initialize the resampler. /// </summary> /// <param name="targetLatencyInMs"> /// The target maximum number of milliseconds of acceptable lag between /// input and resampled output audio samples. /// </param> /// <param name="inFormat"> /// The input format of the audio to be resampled. /// </param> /// <param name="outFormat"> /// The output format of the resampled audio. /// </param> /// <param name="callback"> /// Callback delegate which will receive the resampled data. /// </param> public void Initialize(int targetLatencyInMs, WaveFormat inFormat, WaveFormat outFormat, AudioDataAvailableCallback callback) { // Buffer sizes are calculated from the target latency. this.bufferLengthInMs = targetLatencyInMs; this.inputBytesPerSecond = (int)inFormat.AvgBytesPerSec; this.inputBufferSize = (int)(this.bufferLengthInMs * inFormat.AvgBytesPerSec / 1000); this.outputBufferSize = (int)(this.bufferLengthInMs * outFormat.AvgBytesPerSec / 1000); // Activate native Media Foundation COM objects on a thread-pool thread to ensure that they are in an MTA Task.Run(() => { DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer); DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer); // Create resampler object this.resampler = DeviceUtil.CreateResampler(inFormat, outFormat); }).Wait(); // Set the callback function this.dataAvailableCallback = callback; }
/// <summary> /// Create Media Foundation transform that resamples audio in specified input format /// into specified output format. /// </summary> /// <param name="inputFormat"> /// Wave format input to resampling operation. /// </param> /// <param name="outputFormat"> /// Wave format output from resampling operation. /// </param> /// <returns> /// Media transform object that will resample audio. /// </returns> internal static IMFTransform CreateResampler(WaveFormat inputFormat, WaveFormat outputFormat) { IMFTransform resampler = null; IMFMediaType inputType = null; IMFMediaType outputType = null; try { resampler = (IMFTransform) new CResamplerMediaObject(); inputType = CreateMediaType(inputFormat); resampler.SetInputType(0, inputType, 0); outputType = CreateMediaType(outputFormat); resampler.SetOutputType(0, outputType, 0); } finally { Marshal.ReleaseComObject(inputType); Marshal.ReleaseComObject(outputType); } return(resampler); }
/// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= /// <summary> /// Demonstrates the client/transform communications. Displays the /// frame count in the rotator transform by calling the get accessor /// of a property /// /// This function uses late binding and expects the rotator transform /// to be instantiated. /// </summary> /// <history> /// 01 Nov 18 Cynic - Originally Written /// </history> private void buttonGetFCViaProperty_Click(object sender, EventArgs e) { LogMessage("buttonGetFCViaProperty_Click called"); // get the transform IMFTransform transformObject = ctlTantaEVRFilePlayer1.GetTransform(); if (transformObject == null) { LogMessage("buttonGetFCViaProperty: transformObject == null"); OISMessageBox("No transform object. Is the video running?"); return; } // get the real type of the transform. This assumes it is a .NET // based transform - otherwise it will probably just be a generic // _ComObject and the code below will fail. Type transformObjectType = transformObject.GetType(); // set up to invoke the FrameCountAsPropertyDemonstrator. Note that // we have to know the name of the propery we are calling and the // type it takes. try { object frameCount = transformObjectType.InvokeMember("FrameCountAsPropertyDemonstrator", BindingFlags.GetProperty, null, transformObject, null); if ((frameCount is int) == true) { LogMessage("The frame count is " + frameCount.ToString()); OISMessageBox("FrameCount=" + frameCount.ToString()); } } catch (Exception ex) { OISMessageBox("An error occured please see the logfile"); LogMessage(ex.Message); LogMessage(ex.StackTrace); } }
protected int m_TokenCounter; // Counter. Incremented whenever we create new samples. #endregion Fields #region Constructors /// <summary> /// Constructor /// </summary> public EVRCustomPresenter() { if (System.Threading.Thread.CurrentThread.GetApartmentState() != System.Threading.ApartmentState.MTA) { throw new Exception("Unsupported theading model"); } m_iDiscarded = 0; m_pClock = null; m_pMixer = null; m_pMediaEventSink = null; m_h2 = null; m_pMediaType = null; m_bSampleNotify = false; m_bRepaint = false; m_bEndStreaming = false; m_bPrerolled = false; m_RenderState = RenderState.Shutdown; m_fRate = 1.0f; m_TokenCounter = 0; m_pD3DPresentEngine = new D3DPresentEngine(); m_FrameStep = new FrameStep(); // Frame-stepping information. m_nrcSource = new MFVideoNormalizedRect(0.0f, 0.0f, 1.0f, 1.0f); m_scheduler = new Scheduler(D3DPresentEngine.PRESENTER_BUFFER_COUNT, m_pD3DPresentEngine); // Manages scheduling of samples. m_SamplePool = new SamplePool(D3DPresentEngine.PRESENTER_BUFFER_COUNT); // Pool of allocated samples. // Force load of mf.dll now, rather than when we try to start streaming DllCanUnloadNow(); }
/// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= /// <summary> /// Demonstrates the client/transform communications. Displays the /// frame count in the rotator transform by calling the a function. /// The function requires two parameters a leading string and a ref string /// which is the output. A boolean is returned to indicate success. The /// frame count is appended to the user supplied leading string. /// /// This function uses late binding and expects the rotator transform /// to be instantiated. /// </summary> /// <history> /// 01 Nov 18 Cynic - Originally Written /// </history> private void buttonGetFCViaFunction_Click(object sender, EventArgs e) { LogMessage("buttonGetFCViaFunction_Click called"); // get the transform IMFTransform transformObject = ctlTantaEVRFilePlayer1.GetTransform(); if (transformObject == null) { LogMessage("buttonGetFCViaFunction: transformObject == null"); OISMessageBox("No transform object. Is the video running?"); return; } // get the real type of the transform. This assumes it is a .NET // based transform - otherwise it will probably just be a generic // _ComObject and the code below will fail. Type transformObjectType = transformObject.GetType(); // set up our parameters. both are strings, the second is ref string object[] parameters = new object[2]; string outText = "Unknown FrameCount"; parameters[0] = "I just checked, the frame count is "; parameters[1] = outText; // set up our parameter modifiers. This is how we tell the InvokeMember // call that one of our parameters is a ref ParameterModifier paramMods = new ParameterModifier(2); paramMods[1] = true; ParameterModifier[] paramModifierArray = { paramMods }; try { // set up to invoke the FrameCountAsFunctionDemonstrator. Note that // we have to know the name of the function we are calling, the return // type and its parameter types object retVal = transformObjectType.InvokeMember("FrameCountAsFunctionDemonstrator", BindingFlags.InvokeMethod, null, transformObject, parameters, paramModifierArray, null, null); if ((retVal is bool) == false) { LogMessage("buttonGetFCViaFunction_Click: call to FrameCountAsFunctionDemonstrator failed."); OISMessageBox("call to FrameCountAsFunctionDemonstrator failed."); return; } } catch (Exception ex) { OISMessageBox("An error occured please see the logfile"); LogMessage(ex.Message); LogMessage(ex.StackTrace); } if (parameters[1] == null) { LogMessage("buttonGetFCViaFunction_Click: Null value returned for ref parameter."); OISMessageBox("Null value returned for ref parameter."); return; } if ((parameters[1] is string) == false) { LogMessage("buttonGetFCViaFunction_Click: Reference value is not a string"); OISMessageBox("Reference value is not a string."); return; } LogMessage("buttonGetFCViaFunction_Click: " + (parameters[1] as string)); OISMessageBox((parameters[1] as string)); }
protected void SetMixerSourceRect(IMFTransform pMixer, MFVideoNormalizedRect nrcSource) { if (pMixer == null) { throw new COMException("SetMixerSourceRect", E_Pointer); } int hr; IMFAttributes pAttributes = null; hr = pMixer.GetAttributes(out pAttributes); MFError.ThrowExceptionForHR(hr); Utils.MFSetBlob(pAttributes, MFAttributesClsid.VIDEO_ZOOM_RECT, nrcSource); SafeRelease(pAttributes); pAttributes = null; }
//public void InitServicePointers(IMFTopologyServiceLookup pLookup) public int InitServicePointers(IntPtr p1Lookup) { // Make sure we *never* leave this entry point with an exception try { TRACE(("InitServicePointers")); int hr; int dwObjectCount = 0; IMFTopologyServiceLookup pLookup = null; IHack h1 = (IHack)new Hack(); try { h1.Set(p1Lookup, typeof(IMFTopologyServiceLookup).GUID, true); pLookup = (IMFTopologyServiceLookup)h1; lock (this) { // Do not allow initializing when playing or paused. if (IsActive()) { throw new COMException("EVRCustomPresenter::InitServicePointers", MFError.MF_E_INVALIDREQUEST); } SafeRelease(m_pClock); m_pClock = null; SafeRelease(m_pMixer); m_pMixer = null; SafeRelease(m_h2); m_h2 = null; m_pMediaEventSink = null; // SafeRelease(m_pMediaEventSink); dwObjectCount = 1; object[] o = new object[1]; try { // Ask for the clock. Optional, because the EVR might not have a clock. hr = pLookup.LookupService( MFServiceLookupType.Global, // Not used. 0, // Reserved. MFServices.MR_VIDEO_RENDER_SERVICE, // Service to look up. typeof(IMFClock).GUID, // Interface to look up. o, ref dwObjectCount // Number of elements in the previous parameter. ); MFError.ThrowExceptionForHR(hr); m_pClock = (IMFClock)o[0]; } catch { } // Ask for the mixer. (Required.) dwObjectCount = 1; hr = pLookup.LookupService( MFServiceLookupType.Global, 0, MFServices.MR_VIDEO_MIXER_SERVICE, typeof(IMFTransform).GUID, o, ref dwObjectCount ); MFError.ThrowExceptionForHR(hr); m_pMixer = (IMFTransform)o[0]; // Make sure that we can work with this mixer. ConfigureMixer(m_pMixer); // Ask for the EVR's event-sink interface. (Required.) dwObjectCount = 1; IMFTopologyServiceLookupAlt pLookup2 = (IMFTopologyServiceLookupAlt)pLookup; IntPtr[] p2 = new IntPtr[1]; hr = pLookup2.LookupService( MFServiceLookupType.Global, 0, MFServices.MR_VIDEO_RENDER_SERVICE, typeof(IMediaEventSink).GUID, p2, ref dwObjectCount ); MFError.ThrowExceptionForHR(hr); m_h2 = (IHack)new Hack(); m_h2.Set(p2[0], typeof(IMediaEventSink).GUID, false); m_pMediaEventSink = (IMediaEventSink)m_h2; // Successfully initialized. Set the state to "stopped." m_RenderState = RenderState.Stopped; } } finally { SafeRelease(h1); } return S_Ok; } catch (Exception e) { return Marshal.GetHRForException(e); } }
public int ReleaseServicePointers() { // Make sure we *never* leave this entry point with an exception try { TRACE(("ReleaseServicePointers")); // Enter the shut-down state. { lock (this) { m_RenderState = RenderState.Shutdown; } } // Flush any samples that were scheduled. Flush(); // Clear the media type and release related resources (surfaces, etc). SetMediaType(null); // Release all services that were acquired from InitServicePointers. SafeRelease(m_pClock); m_pClock = null; SafeRelease(m_pMixer); m_pMixer = null; SafeRelease(m_h2); m_h2 = null; m_pMediaEventSink = null; // SafeRelease(m_pMediaEventSink); return S_Ok; } catch (Exception e) { return Marshal.GetHRForException(e); } }
// Mixer operations protected void ConfigureMixer(IMFTransform pMixer) { int hr; Guid deviceID = Guid.Empty; Guid myDeviceId; IMFVideoDeviceID pDeviceID = null; m_pD3DPresentEngine.GetDeviceID(out myDeviceId); try { // Make sure that the mixer has the same device ID as ourselves. pDeviceID = (IMFVideoDeviceID)pMixer; hr = pDeviceID.GetDeviceID(out deviceID); MFError.ThrowExceptionForHR(hr); if (deviceID != myDeviceId) { throw new COMException("ConfigureMixer", MFError.MF_E_INVALIDREQUEST); } // Set the zoom rectangle (ie, the source clipping rectangle). SetMixerSourceRect(pMixer, m_nrcSource); } finally { //SafeRelease(pDeviceID); } }
// Mixer operations protected HRESULT ConfigureMixer(IMFTransform pMixer) { HRESULT hr = S_OK; // Make sure that the mixer has the same device ID as ourselves. IntPtr _mixer = Marshal.GetIUnknownForObject(pMixer); Guid _guid; _guid = typeof(IMFVideoDeviceID).GUID; IntPtr _interface; hr = (HRESULT)Marshal.QueryInterface(_mixer, ref _guid, out _interface); if (hr.Succeeded) { IMFVideoDeviceID pDeviceID = (IMFVideoDeviceID)Marshal.GetObjectForIUnknown(_interface); Guid deviceID = Guid.Empty; hr = (HRESULT)pDeviceID.GetDeviceID(out deviceID); if (hr.Succeeded) { if (deviceID != Device.NativeInterface) { hr = MFHelper.MF_E_INVALIDREQUEST; } } Marshal.Release(_interface); } Marshal.Release(_mixer); // Set the zoom rectangle (ie, the source clipping rectangle). MFHelper.SetMixerSourceRect(pMixer, m_nrcSource); return hr; }
/// <summary> /// Reads data out of the source, passing it through the transform /// </summary> /// <param name="buffer">Output buffer</param> /// <param name="offset">Offset within buffer to write to</param> /// <param name="count">Desired byte count</param> /// <returns>Number of bytes read</returns> public int Read(byte[] buffer, int offset, int count) { if (transform == null) { transform = CreateTransform(); InitializeTransformForStreaming(); } // strategy will be to always read 1 second from the source, and give it to the resampler int bytesWritten = 0; // read in any leftovers from last time if (outputBufferCount > 0) { bytesWritten += ReadFromOutputBuffer(buffer, offset, count - bytesWritten); } while (bytesWritten < count) { var sample = ReadFromSource(); if (sample == null) // reached the end of our input { // be good citizens and send some end messages: EndStreamAndDrain(); // resampler might have given us a little bit more to return bytesWritten += ReadFromOutputBuffer(buffer, offset + bytesWritten, count - bytesWritten); break; } // might need to resurrect the stream if the user has read all the way to the end, // and then repositioned the input backwards if (!initializedForStreaming) { InitializeTransformForStreaming(); } // give the input to the resampler // can get MF_E_NOTACCEPTING if we didn't drain the buffer properly transform.ProcessInput(0, sample, 0); Marshal.ReleaseComObject(sample); int readFromTransform; // n.b. in theory we ought to loop here, although we'd need to be careful as the next time into ReadFromTransform there could // still be some leftover bytes in outputBuffer, which would get overwritten. Only introduce this if we find a transform that // needs it. For most transforms, alternating read/write should be OK //do //{ // keep reading from transform readFromTransform = ReadFromTransform(); bytesWritten += ReadFromOutputBuffer(buffer, offset + bytesWritten, count - bytesWritten); //} while (readFromTransform > 0); } return bytesWritten; }
/// <summary> /// Gets an instance of a Media Foundation transform (MFT) for a specified stream. /// </summary> /// <param name="sourceReader">A valid IMFSourceReaderEx instance.</param></param> /// <param name="streamIndex">The stream to query for the MFT. </param> /// <param name="transformIndex">The zero-based index of the MFT to retreive.</param> /// <param name="guidCategory">Receives a GUID that specifies the category of the MFT.</param> /// <param name="transform">Receives an instance of the IMFTransform interface for the MFT.</param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult GetTransformForStream(this IMFSourceReaderEx sourceReader, SourceReaderFirstStream streamIndex, int transformIndex, out Guid guidCategory, out IMFTransform transform) { if (sourceReader == null) { throw new ArgumentNullException("sourceReader"); } return(sourceReader.GetTransformForStream((int)streamIndex, transformIndex, out guidCategory, out transform)); }
public static IEnumerable <KeyValuePair <Guid, _MF_ATTRIBUTE_TYPE> > EnumerateAttributes(this IMFTransform input) { if (input == null || input.GetAttributes(out var atts).IsError) { return(Enumerable.Empty <KeyValuePair <Guid, _MF_ATTRIBUTE_TYPE> >()); } return(atts.Enumerate()); }
/// <summary> /// Initialize the capturer. /// </summary> /// <param name="engineLatency"> /// Number of milliseconds of acceptable lag between live sound being produced and recording operation. /// </param> /// <param name="gain"> /// The gain to be applied to the audio after capture. /// </param> /// <param name="outFormat"> /// The format of the audio to be captured. If this is NULL, the default audio format of the /// capture device will be used. /// </param> /// <param name="callback"> /// Callback function delegate which will handle the captured data. /// </param> /// <param name="speech"> /// If true, sets the audio category to speech to optimize audio pipeline for speech recognition. /// </param> public void Initialize(int engineLatency, float gain, WaveFormat outFormat, AudioDataAvailableCallback callback, bool speech) { // Create our shutdown event - we want a manual reset event that starts in the not-signaled state. this.shutdownEvent = new ManualResetEvent(false); // Now activate an IAudioClient object on our preferred endpoint and retrieve the mix format for that endpoint. object obj = this.endpoint.Activate(ref audioClientIID, ClsCtx.INPROC_SERVER, IntPtr.Zero); this.audioClient = (IAudioClient)obj; // The following block enables advanced mic array APO pipeline on Windows 10 RS2 builds >= 15004. // This must be called before the call to GetMixFormat() in LoadFormat(). if (speech) { IAudioClient2 audioClient2 = (IAudioClient2)this.audioClient; if (audioClient2 != null) { AudioClientProperties properties = new AudioClientProperties { Size = Marshal.SizeOf <AudioClientProperties>(), Category = AudioStreamCategory.Speech }; int hr = audioClient2.SetClientProperties(ref properties); if (hr != 0) { Console.WriteLine("Failed to set audio stream category to AudioCategory_Speech: {0}", hr); } } else { Console.WriteLine("Unable to get IAudioClient2 interface"); } } // Load the MixFormat. This may differ depending on the shared mode used. this.LoadFormat(); // Remember our configured latency this.engineLatencyInMs = engineLatency; // Set the gain this.gain = gain; // Determine whether or not we need a resampler this.resampler = null; if (outFormat != null) { // Check if the desired format is supported IntPtr closestMatchPtr; IntPtr outFormatPtr = WaveFormat.MarshalToPtr(outFormat); int hr = this.audioClient.IsFormatSupported(AudioClientShareMode.Shared, outFormatPtr, out closestMatchPtr); // Free outFormatPtr to prevent leaking memory Marshal.FreeHGlobal(outFormatPtr); if (hr == 0) { // Replace _MixFormat with outFormat. Since it is supported, we will initialize // the audio capture client with that format and capture without resampling. this.mixFormat = outFormat; this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels; } else { // In all other cases, we need to resample to OutFormat if ((hr == 1) && (closestMatchPtr != IntPtr.Zero)) { // Use closest match suggested by IsFormatSupported() and resample this.mixFormat = WaveFormat.MarshalFromPtr(closestMatchPtr); this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels; // Free closestMatchPtr to prevent leaking memory Marshal.FreeCoTaskMem(closestMatchPtr); } this.inputBufferSize = (int)(this.engineLatencyInMs * this.mixFormat.AvgBytesPerSec / 1000); this.outputBufferSize = (int)(this.engineLatencyInMs * outFormat.AvgBytesPerSec / 1000); DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer); DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer); // Create resampler object this.resampler = DeviceUtil.CreateResampler(this.mixFormat, outFormat); } } this.InitializeAudioEngine(); // Set the callback function this.dataAvailableCallback = callback; }
public static bool IsBuiltinEncoder(this IMFTransform input) => input is IMFObjectInformation;
/// <summary> /// Adds an effect to a capture streamIndex. /// </summary> /// <param name="captureSource">A valid IMFCaptureSource instance.</param> /// <param name="sourceStreamIndex">The capture streamIndex.</param> /// <param name="transform">A Media Foundation transform instance.</param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult AddEffect(this IMFCaptureSource captureSource, int sourceStreamIndex, IMFTransform transform) { if (captureSource == null) { throw new ArgumentNullException("captureSource"); } return(captureSource.AddEffect(sourceStreamIndex, transform)); }
/// <summary> /// Adds an effect to a capture streamIndex. /// </summary> /// <param name="captureSource">A valid IMFCaptureSource instance.</param> /// <param name="sourceStream">A member of the <see cref="CaptureEngineStreams"/> enumeration.</param> /// <param name="transform">A Media Foundation transform instance.</param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult AddEffect(this IMFCaptureSource captureSource, CaptureEngineStreams sourceStream, IMFTransform transform) { if (captureSource == null) { throw new ArgumentNullException("captureSource"); } return(captureSource.AddEffect((int)sourceStream, transform)); }
public MftWrapper(Object transform) { _mft = (IMFTransform)transform; }
public static bool IsBuiltinEncoder(this IMFTransform obj) => obj is IMFObjectInformation;
/// <summary> /// Initialize the renderer. /// </summary> /// <param name="engineLatency"> /// Number of milliseconds of acceptable lag between playback of samples and live sound being produced. /// </param> /// <param name="gain"> /// The gain to be applied to the audio before rendering. /// </param> /// <param name="inFormat"> /// The format of the input audio samples to be rendered. If this is NULL, the current default audio /// format of the renderer device will be assumed. /// </param> /// <param name="callback"> /// Callback function delegate which will supply the data to be rendered. /// </param> public void Initialize(int engineLatency, float gain, WaveFormat inFormat, AudioDataRequestedCallback callback) { // Create our shutdown event - we want a manual reset event that starts in the not-signaled state. this.shutdownEvent = new ManualResetEvent(false); // Now activate an IAudioClient object on our preferred endpoint and retrieve the mix format for that endpoint. object obj = this.endpoint.Activate(ref audioClientIID, ClsCtx.INPROC_SERVER, IntPtr.Zero); this.audioClient = (IAudioClient)obj; // Load the MixFormat. This may differ depending on the shared mode used. this.LoadFormat(); // Remember our configured latency this.engineLatencyInMs = engineLatency; // Set the gain this.gain = gain; // Check if the desired format is supported IntPtr closestMatchPtr; IntPtr inFormatPtr = WaveFormat.MarshalToPtr(inFormat); int hr = this.audioClient.IsFormatSupported(AudioClientShareMode.Shared, inFormatPtr, out closestMatchPtr); // Free outFormatPtr to prevent leaking memory Marshal.FreeHGlobal(inFormatPtr); if (hr == 0) { // Replace _MixFormat with inFormat. Since it is supported, we will initialize // the audio render client with that format and render without resampling. this.mixFormat = inFormat; this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels; } else { // In all other cases, we need to resample to OutFormat if ((hr == 1) && (closestMatchPtr != IntPtr.Zero)) { // Use closest match suggested by IsFormatSupported() and resample this.mixFormat = WaveFormat.MarshalFromPtr(closestMatchPtr); this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels; // Free closestMatchPtr to prevent leaking memory Marshal.FreeCoTaskMem(closestMatchPtr); } } this.inputBufferSize = (int)(this.engineLatencyInMs * inFormat.AvgBytesPerSec / 1000); this.outputBufferSize = (int)(this.engineLatencyInMs * this.mixFormat.AvgBytesPerSec / 1000); DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer); DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer); // Create resampler object this.resampler = DeviceUtil.CreateResampler(inFormat, this.mixFormat); this.InitializeAudioEngine(); // Set the callback function this.dataRequestedCallback = callback; }
/// <summary> /// Adds a transform, such as an audio or video effect, to a stream. /// </summary> /// <param name="sourceReader">A valid IMFSourceReaderEx instance.</param></param> /// <param name="streamIndex">The stream to configure.</param> /// <param name="transform">An instance of a Media Foundation transform (MFT).</param> /// <returns>If this function succeeds, it returns the S_OK member. Otherwise, it returns another HResult's member that describe the error.</returns> public static HResult AddTransformForStream(this IMFSourceReaderEx sourceReader, SourceReaderFirstStream streamIndex, IMFTransform transform) { if (sourceReader == null) { throw new ArgumentNullException("sourceReader"); } return(sourceReader.AddTransformForStream((int)streamIndex, transform)); }
private void CaptureStillImages(MediaItem item) { using (var releaser = new ComReleaser()) { MF.CreateVideoDeviceSource(item.DeviceItem.SymLink, out IMFMediaSource source); releaser.Add(source); source.CreatePresentationDescriptor(out IMFPresentationDescriptor presDesc); releaser.Add(presDesc); presDesc.GetStreamDescriptorByIndex(item.DescIndex, out bool selected, out IMFStreamDescriptor strmDesc); releaser.Add(strmDesc); strmDesc.GetMediaTypeHandler(out IMFMediaTypeHandler handler); releaser.Add(handler); handler.GetMediaTypeByIndex(item.TypeIndex, out IMFMediaType type); handler.SetCurrentMediaType(type); MF.CreateSourceReaderFromMediaSource(source, out IMFSourceReader reader); if (reader == null) { return; } releaser.Add(reader); IMFTransform transform = null; MFTOutputDataBuffer[] outSamples = null; IMFSample outRgb24Sample = null; IMFMediaBuffer outRgb24Buffer = null; int rgbSize = item.Width * item.Height * 3; var needToConvert = item.SubType != MFMediaType.RGB24; if (needToConvert) { var processor = new VideoProcessorMFT(); releaser.Add(processor); transform = (IMFTransform)processor; HR(transform.SetInputType(0, type, MFTSetTypeFlags.None)); var rgbMediaType = MF.CreateMediaType(); releaser.Add(rgbMediaType); HR(type.CopyAllItems(rgbMediaType)); HR(rgbMediaType.SetGUID(MFAttributesClsid.MF_MT_SUBTYPE, MFMediaType.RGB24)); HR(rgbMediaType.SetUINT32(MFAttributesClsid.MF_MT_DEFAULT_STRIDE, 3 * item.Width)); HR(rgbMediaType.SetUINT32(MFAttributesClsid.MF_MT_SAMPLE_SIZE, rgbSize)); HR(transform.SetOutputType(0, rgbMediaType, MFTSetTypeFlags.None)); outSamples = new MFTOutputDataBuffer[1]; outSamples[0] = new MFTOutputDataBuffer(); outRgb24Sample = MF.CreateSample(); releaser.Add(outRgb24Sample); outRgb24Buffer = MF.CreateMemoryBuffer(rgbSize); releaser.Add(outRgb24Buffer); outRgb24Sample.AddBuffer(outRgb24Buffer); outSamples[0].pSample = Marshal.GetIUnknownForObject(outRgb24Sample); } while (true) { int frames = 0; var hrRS = reader.ReadSample( (int)MF_SOURCE_READER.AnyStream, MF_SOURCE_READER_CONTROL_FLAG.None, out int streamIndex, out MF_SOURCE_READER_FLAG flags, out long timeStamp, out IMFSample sample ); if (sample != null) { try { IMFSample rgbSample = sample; if (transform != null) { transform.ProcessInput(0, sample, 0); while (true) { var hrPO = transform.ProcessOutput( MFTProcessOutputFlags.None, 1, outSamples, out ProcessOutputStatus status ); if (hrPO.Succeeded()) { ConsumeBuffer(outRgb24Buffer, item); frames++; Marshal.ReleaseComObject(sample); return; //break; } else { break; } } //var hrPI = transform.ProcessInput(0, sample, 0); continue; } rgbSample.GetBufferByIndex(0, out IMFMediaBuffer buff); if (ConsumeBuffer(buff, item)) { frames++; } else { return; } } finally { Marshal.ReleaseComObject(sample); } break; } } } }