/// <summary> /// Adds an already created server. /// </summary> /// <param name="server">The server to add</param> public void AddServer(VideoSink server) { lock (m_lockObject) { m_sinks.Add(server.Name, server); } }
/// <summary> /// Start automatically capturing images from an existing camera to send to /// the dashboard. /// </summary> /// <remarks> /// You should call this method to see a camera feed on the dashboard. /// If you also want to perform vision processing on the roboRIO, use /// <see cref="GetVideo()"/> to get access to the camera images /// </remarks> /// <param name="camera">The camera to stream from.</param> public void StartAutomaticCapture(VideoSource camera) { AddCamera(camera); VideoSink server = AddServer($"serve_{camera.Name}"); server.Source = camera; }
// Send seek event to change rate static void SendSeekEvent() { var format = Format.Time; long position; // Obtain the current position, needed for the seek event if (!Pipeline.QueryPosition(format, out position)) { Console.WriteLine("Unable to retrieve current position."); return; } Event seekEvent; // Create the seek event if (Rate > 0) { seekEvent = new Event(Rate, Format.Time, SeekFlags.Flush | SeekFlags.Accurate, SeekType.Set, position, SeekType.None, 0); } else { seekEvent = new Event(Rate, Format.Time, SeekFlags.Flush | SeekFlags.Accurate, SeekType.Set, 0, SeekType.Set, position); } if (VideoSink == null) { // If we have not done so, obtain the sink through which we will send the seek events VideoSink = (Element)Pipeline ["video-sink"]; } // Send the event VideoSink.SendEvent(seekEvent); Console.WriteLine("Current rate: {0}", Rate); }
/// <summary> /// Creates an initialized VideoSink object. /// </summary> /// <param name="result">The MLResult object of the inner platform call(s).</param> /// <returns> An initialized VideoSink object.</returns> public static VideoSink Create(out MLResult result) { VideoSink videoSink = null; #if PLATFORM_LUMIN List <MLWebRTC.Sink> sinks = MLWebRTC.Instance.sinks; ulong Handle = MagicLeapNativeBindings.InvalidHandle; MLResult.Code resultCode = NativeBindings.MLWebRTCVideoSinkCreate(out Handle); if (!DidNativeCallSucceed(resultCode, "MLWebRTCVideoSinkCreate()")) { result = MLResult.Create(resultCode); return(videoSink); } videoSink = new VideoSink(Handle); if (MagicLeapNativeBindings.MLHandleIsValid(videoSink.Handle)) { sinks.Add(videoSink); } result = MLResult.Create(resultCode); #else result = new MLResult(); #endif return(videoSink); }
// Process keyboard input static void HandleKeyboard() { ConsoleKeyInfo x; bool terminate = false; while (!terminate) { x = Console.ReadKey(); switch (x.Key) { case ConsoleKey.P: Playing = !Playing; Pipeline.SetState(Playing ? State.Playing : State.Paused); Console.WriteLine("Setting state to {0}", Playing ? "PLAYING" : "PAUSE"); break; case ConsoleKey.S: if (x.Modifiers == ConsoleModifiers.Shift) { Rate *= 2.0; } else { Rate /= 2.0; } SendSeekEvent(); break; case ConsoleKey.D: Rate *= -1.0; SendSeekEvent(); break; case ConsoleKey.N: if (VideoSink == null) { // If we have not done so, obtain the sink through which we will send the step events VideoSink = (Element)Pipeline ["video-sink"]; } var evnt = new Event(Format.Buffers, 1, Rate, true, false); VideoSink.SendEvent(evnt); Console.WriteLine("Stepping one frame"); break; case ConsoleKey.Q: terminate = true; break; default: break; } } }
private Task StopVideoStream() { if (VideoDepacketizer != null) { VideoDepacketizer.Destroy(); VideoDepacketizer = null; } if (VideoDecoder != null) { VideoDecoder.Destroy(); VideoDecoder = null; } if (VideoConverter != null) { VideoConverter.Destroy(); VideoConverter = null; } if (ResetVideoPipe != null) { ResetVideoPipe.Destroy(); ResetVideoPipe = null; } if (VideoEncoder != null) { VideoEncoder.Destroy(); VideoEncoder = null; } if (VideoPacketizer != null) { VideoPacketizer.Destroy(); VideoPacketizer = null; } if (VideoSink != null) { VideoSink.Destroy(); VideoSink = null; } return(Task.CompletedTask); }