internal Visualization(PlayerEngine.AudioSinkBin audiobin) { // The basic pipeline we're constructing is: // .audiotee ! queue ! audioresample ! audioconvert ! fakesink Element converter, resampler; Element audiosinkqueue; Pad pad; vis_buffer = null; vis_fft = new Gst.FFT.FFTF32 (SLICE_SIZE * 2, false); vis_fft_buffer = new GstFFTF32Complex [SLICE_SIZE + 1]; vis_fft_sample_buffer = new float [SLICE_SIZE]; // Core elements, if something fails here, it's the end of the world audiosinkqueue = ElementFactory.Make ("queue", "vis-queue"); pad = audiosinkqueue.GetStaticPad ("sink"); pad.AddProbe (PadProbeType.EventDownstream, EventProbe); resampler = ElementFactory.Make ("audioresample", "vis-resample"); converter = ElementFactory.Make ("audioconvert", "vis-convert"); Element fakesink = ElementFactory.Make ("fakesink", "vis-sink"); if (audiosinkqueue == null || resampler == null || converter == null || fakesink == null) { Log.Debug ("Could not construct visualization pipeline, a fundamental element could not be created"); return; } //http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gstreamer-plugins/html/gstreamer-plugins-queue.html#GstQueueLeaky const int GST_QUEUE_LEAK_DOWNSTREAM = 2; // Keep around the 5 most recent seconds of audio so that when resuming // visualization we have something to show right away. audiosinkqueue ["leaky"] = GST_QUEUE_LEAK_DOWNSTREAM; audiosinkqueue ["max-size-buffers"] = 0; audiosinkqueue ["max-size-bytes"] = 0; audiosinkqueue ["max-size-time"] = ((long)Constants.SECOND) * 5L; fakesink.Connect ("handoff", PCMHandoff); // This enables the handoff signal. fakesink ["signal-handoffs"] = true; // Synchronize so we see vis at the same time as we hear it. fakesink ["sync"] = true; // Drop buffers if they come in too late. This is mainly used when // thawing the vis pipeline. fakesink ["max-lateness"] = ((long)Constants.SECOND / 120L); // Deliver buffers one frame early. This allows for rendering // time. (TODO: It would be great to calculate this on-the-fly so // we match the rendering time. fakesink ["ts-offset"] = -((long)Constants.SECOND / 60L); // Don't go to PAUSED when we freeze the pipeline. fakesink ["async"] = false; audiobin.Add (audiosinkqueue, resampler, converter, fakesink); pad = audiosinkqueue.GetStaticPad ("sink"); Pad teepad = audiobin.RequestTeePad (); teepad.Link (pad); teepad.Dispose (); pad.Dispose (); Element.Link (audiosinkqueue, resampler, converter); converter.LinkFiltered (fakesink, caps); vis_buffer = new Adapter (); vis_resampler = resampler; vis_thawing = false; active = false; }
public Visualization (Bin audiobin, Pad teepad) { // The basic pipeline we're constructing is: // .audiotee ! queue ! audioresample ! audioconvert ! fakesink Element converter, resampler; Queue audiosinkqueue; Pad pad; vis_buffer = null; vis_fft = gst_fft_f32_new (SLICE_SIZE * 2, false); vis_fft_buffer = new GstFFTF32Complex [SLICE_SIZE + 1]; vis_fft_sample_buffer = new float [SLICE_SIZE]; // Core elements, if something fails here, it's the end of the world audiosinkqueue = (Queue)ElementFactory.Make ("queue", "vis-queue"); pad = audiosinkqueue.GetStaticPad ("sink"); pad.AddEventProbe (new PadEventProbeCallback (EventProbe)); resampler = ElementFactory.Make ("audioresample", "vis-resample"); converter = ElementFactory.Make ("audioconvert", "vis-convert"); FakeSink fakesink = ElementFactory.Make ("fakesink", "vis-sink") as FakeSink; // channels * slice size * float size = size of chunks we want wanted_size = (uint)(2 * SLICE_SIZE * sizeof(float)); if (audiosinkqueue == null || resampler == null || converter == null || fakesink == null) { Log.Debug ("Could not construct visualization pipeline, a fundamental element could not be created"); return; } // Keep around the 5 most recent seconds of audio so that when resuming // visualization we have something to show right away. audiosinkqueue.Leaky = Queue.LeakyType.Downstream; audiosinkqueue.MaxSizeBuffers = 0; audiosinkqueue.MaxSizeBytes = 0; audiosinkqueue.MaxSizeTime = Clock.Second * 5; fakesink.Handoff += PCMHandoff; // This enables the handoff signal. fakesink.SignalHandoffs = true; // Synchronize so we see vis at the same time as we hear it. fakesink.Sync = true; // Drop buffers if they come in too late. This is mainly used when // thawing the vis pipeline. fakesink.MaxLateness = (long)(Clock.Second / 120); // Deliver buffers one frame early. This allows for rendering // time. (TODO: It would be great to calculate this on-the-fly so // we match the rendering time. fakesink.TsOffset = -(long)(Clock.Second / 60); // Don't go to PAUSED when we freeze the pipeline. fakesink.Async = false; audiobin.Add (audiosinkqueue, resampler, converter, fakesink); pad = audiosinkqueue.GetStaticPad ("sink"); teepad.Link (pad); Element.Link (audiosinkqueue, resampler, converter); converter.LinkFiltered (fakesink, caps); vis_buffer = new Adapter (); vis_resampler = resampler; vis_thawing = false; active = false; // Disable the pipeline till we hear otherwise from managed land. Blocked = true; }