Exemple #1
0
 /// <summary>
 /// Frees any allocated memory made from building a WebRTC frame.
 /// Only used when building your own frame objects, such as in an app defined video source.
 /// </summary>
 /// <param name="frame">Frame object with the allocated memory.</param>
 /// <param name="frameNative">Timestamp of the frame.</param>
 internal static void FreeAllocatedImageMemory(MLWebRTC.VideoSink.Frame frame, MLWebRTC.VideoSink.Frame.NativeBindings.MLWebRTCFrame frameNative)
 {
     for (int i = 0; i < frame.ImagePlanes.Length; ++i)
     {
         MLWebRTC.VideoSink.Frame.ImagePlane imagePlane = frame.ImagePlanes[i];
         MLWebRTC.VideoSink.Frame.NativeBindings.ImagePlaneInfoNative imagePlaneNative = frameNative.ImagePlanes[i];
     }
 }
Exemple #2
0
        private Task PushRGBFrame(MLMRCamera.Frame mrCameraFrame)
        {
            for (int i = 0; i < imagePlanesRGB.Length; i++)
            {
                MLMRCamera.Frame.ImagePlane imagePlane = mrCameraFrame.ImagePlanes[i];
                imagePlanesRGB[i] = MLWebRTC.VideoSink.Frame.ImagePlane.Create(imagePlane.Width, imagePlane.Height, imagePlane.Stride, imagePlane.BytesPerPixel, imagePlane.Size, imagePlane.DataPtr);
            }

            MLWebRTC.VideoSink.Frame frame = MLWebRTC.VideoSink.Frame.Create(mrCameraFrame.Id, mrCameraFrame.TimeStampNs / 1000, imagePlanesRGB, MLWebRTC.VideoSink.Frame.OutputFormat.RGBA_8888);

            _ = this.PushFrameAsync(frame);
            return(Task.CompletedTask);
        }
Exemple #3
0
            /// <summary>
            /// Pushes a frame into the defined video source.
            /// </summary>
            /// <param name="frame">The frame to push to the video source.</param>
            /// <returns>
            /// MLResult.Result will be <c>MLResult.Code.Ok</c> if destroying all handles was successful.
            /// MLResult.Result will be <c>MLResult.Code.WebRTCResultInstanceNotCreated</c> if MLWebRTC instance was not created.
            /// MLResult.Result will be <c>MLResult.Code.InvalidParam</c> if an invalid parameter was passed.
            /// MLResult.Result will be <c>MLResult.Code.WebRTCResultMismatchingHandle</c> if an incorrect handle was sent.
            /// MLResult.Result will be <c>MLResult.Code.WebRTCResultInvalidFrameFormat</c> if an invalid frame format was specified.
            /// MLResult.Result will be <c>MLResult.Code.WebRTCResultInvalidFramePlaneCount</c> if an invalid plane count was specified for the given frame format.
            /// </returns>
            protected async Task <MLResult> PushFrameAsync(MLWebRTC.VideoSink.Frame frame)
            {
                if (!MagicLeapNativeBindings.MLHandleIsValid(this.Handle))
                {
                    return(await Task.FromResult(MLResult.Create(MLResult.Code.InvalidParam, "Handle is invalid.")));
                }
                pushFrameEvent.Reset();

                MLWebRTC.VideoSink.Frame.NativeBindings.MLWebRTCFrame frameNative = MLWebRTC.VideoSink.Frame.NativeBindings.MLWebRTCFrame.Create(frame);
                MLResult.Code resultCode = NativeBindings.MLWebRTCSourceAppDefinedVideoSourcePushFrame(this.Handle, in frameNative);
                DidNativeCallSucceed(resultCode, "MLWebRTCSourceAppDefinedVideoSourcePushFrame()");
                pushFrameEvent.Set();
                return(await Task.FromResult(MLResult.Create(resultCode)));
            }
Exemple #4
0
                        /// <summary>
                        /// Creates and returns an initialized version of this struct from a MLWebRTC.VideoSink.Frame object.
                        /// </summary>
                        /// <param name="frame">The frame object to use for initializing.</param>
                        /// <returns>An initialized version of this struct.</returns>
                        public static MLWebRTCFrame Create(MLWebRTC.VideoSink.Frame frame)
                        {
                            MLWebRTCFrame frameNative = new MLWebRTCFrame();

                            frameNative.Version     = 1;
                            frameNative.PlaneCount  = (ushort)frame.ImagePlanes.Length;
                            frameNative.ImagePlanes = nativeImagePlanesBuffer.Get();

                            for (int i = 0; i < frame.ImagePlanes.Length; ++i)
                            {
                                frameNative.ImagePlanes[i].Data = frame.ImagePlanes[i];
                            }

                            frameNative.TimeStamp = frame.TimeStampUs;
                            frameNative.Format    = frame.Format;
                            return(frameNative);
                        }
        private void PushYUVFrame(MLCamera.ResultExtras results, MLCamera.YUVFrameInfo frameInfo, MLCamera.FrameMetadata metadata)
        {
            MLCamera.YUVBuffer buffer;
            MLWebRTC.VideoSink.Frame.ImagePlane[] imagePlaneArray = imagePlanesBuffer.Get();
            for (int i = 0; i < imagePlaneArray.Length; ++i)
            {
                switch (i)
                {
                case 0:
                {
                    buffer = frameInfo.Y;
                    break;
                }

                case 1:
                {
                    buffer = frameInfo.U;
                    break;
                }

                case 2:
                {
                    buffer = frameInfo.V;
                    break;
                }

                default:
                {
                    buffer = new MLCamera.YUVBuffer();
                    break;
                }
                }

                imagePlaneArray[i] = MLWebRTC.VideoSink.Frame.ImagePlane.Create(buffer.Width, buffer.Height, buffer.Stride, buffer.BytesPerPixel, buffer.Size, buffer.DataPtr);
            }

            MLWebRTC.VideoSink.Frame frame = MLWebRTC.VideoSink.Frame.Create((ulong)results.RequestId, results.VcamTimestampUs, imagePlaneArray, MLWebRTC.VideoSink.Frame.OutputFormat.YUV_420_888);

            _ = this.PushFrameAsync(frame);
        }