예제 #1
0
        /// <summary>
        /// When a AudioProperty in the fed packets is found this callback is called
        /// </summary>
        private void AudioPropertyFound(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.ReadyToProducePackets)
            {
                Started = false;

                if (OutputQueue != null)
                {
                    OutputQueue.Dispose();
                }

                OutputQueue = new OutputAudioQueue(_audioFileStream.StreamBasicDescription);
                OutputReady?.Invoke(OutputQueue);

                _currentByteCount            = 0;
                OutputQueue.BufferCompleted += HandleBufferCompleted;
                _outputBuffers = new List <AudioBuffer>();

                for (int i = 0; i < MaxBufferCount; i++)
                {
                    OutputQueue.AllocateBuffer(BufferSize, out IntPtr outBuffer);
                    _outputBuffers.Add(new AudioBuffer()
                    {
                        Buffer             = outBuffer,
                        PacketDescriptions = new List <AudioStreamPacketDescription>()
                    });
                }

                _currentBuffer = _outputBuffers.First();

                OutputQueue.MagicCookie = _audioFileStream.MagicCookie;
            }
        }
예제 #2
0
 void OnPropertyFound(object sender, PropertyFoundEventArgs e)
 {
     if (e.Property == AudioFileStreamProperty.ReadyToProducePackets) {
         outputQueue = new OutputAudioQueue (audioFileStream.StreamBasicDescription);
         outputQueue.OutputCompleted += OnOutputQueueOutputCompleted;
     }
 }
        /// <summary>
        /// When a AudioProperty in the fed packets is found this callback is called
        /// </summary>
        void AudioPropertyFound(object sender, PropertyFoundEventArgs args)
        {
            switch (args.Property)
            {
            case AudioFileStreamProperty.ReadyToProducePackets:
                Started = false;


                if (OutputQueue != null)
                {
                    OutputQueue.Dispose();
                }

                OutputQueue                  = new OutputAudioQueue(fileStream.StreamBasicDescription);
                currentByteCount             = 0;
                OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;
                outputBuffers                = new List <AudioBuffer>();

                for (int i = 0; i < MaxBufferCount; i++)
                {
                    IntPtr outBuffer;
                    OutputQueue.AllocateBuffer(BufferSize, out outBuffer);
                    outputBuffers.Add(new AudioBuffer()
                    {
                        Buffer = outBuffer, PacketDescriptions = new List <AudioStreamPacketDescription>()
                    });
                }

                currentBuffer = outputBuffers.First();

                OutputQueue.MagicCookie = fileStream.MagicCookie;
                break;
            }
        }
예제 #4
0
 void OnPropertyFound(object sender, PropertyFoundEventArgs e)
 {
     if (e.Property == AudioFileStreamProperty.ReadyToProducePackets)
     {
         outputQueue = new OutputAudioQueue(audioFileStream.StreamBasicDescription);
         outputQueue.OutputCompleted += OnOutputQueueOutputCompleted;
     }
 }
예제 #5
0
 // event handler - never executed
 void OnPropertyFound(object sender, PropertyFoundEventArgs e)
 {
     if (e.Property == AudioFileStreamProperty.ReadyToProducePackets)
     {
         oaq = new OutputAudioQueue(afs.StreamBasicDescription);
         oaq.BufferCompleted += OnBufferCompleted;
         OutputReady(oaq);
     }
 }
        void StreamPropertyListenerProc(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.DataFormat) {
                dataFormat = audioFileStream.DataFormat;
                return;
            }

            if (args.Property != AudioFileStreamProperty.ReadyToProducePackets)
                return;

            if (audioQueue != null) {
                // TODO: Dispose
                //throw new NotImplementedException ();
            }

            audioQueue = new OutputAudioQueue (dataFormat);
            audioQueue.VolumeRampTime = 2.0f;
            audioQueue.OutputCompleted += HandleOutputCompleted;
        }
        void StreamPropertyListenerProc(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.DataFormat)
            {
                dataFormat = audioFileStream.DataFormat;
                return;
            }

            if (args.Property != AudioFileStreamProperty.ReadyToProducePackets)
            {
                return;
            }

            if (audioQueue != null)
            {
                // TODO: Dispose
                throw new NotImplementedException();
            }

            audioQueue = new OutputAudioQueue(dataFormat);
            audioQueue.BufferCompleted += HandleBufferCompleted;

            AudioQueueStatus status;

            aqTap = audioQueue.CreateProcessingTap(TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
            if (status != AudioQueueStatus.Ok)
            {
                throw new ApplicationException("Could not create AQ tap");
            }

            // create an augraph to process in the tap. needs to convert from tapFormat to effect format and back

            /* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
             * before and after the effect, usually because effects want floats, and everything else in iOS
             * core audio works with ints (or, in rare cases, fixed-point).
             * the graph looks like this:
             * [render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
             * prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
             * knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
             * pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
             * render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
             * is described as "batshit crazy", but it seems to work pretty well in practice.
             */

            auGraph = new AUGraph();
            auGraph.Open();
            var effectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.NewTimePitch));

            effectUnit = auGraph.GetNodeInfo(effectNode);

            var convertToEffectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.AU));
            var convertToEffectUnit = auGraph.GetNodeInfo(convertToEffectNode);

            var convertFromEffectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.AU));
            var convertFromEffectUnit = auGraph.GetNodeInfo(convertFromEffectNode);

            var genericOutputNode = auGraph.AddNode(AudioComponentDescription.CreateOutput(AudioTypeOutput.Generic));

            genericOutputUnit = auGraph.GetNodeInfo(genericOutputNode);

            // set the format conversions throughout the graph
            var effectFormat = effectUnit.GetAudioFormat(AudioUnitScopeType.Output);
            var tapFormat    = aqTap.ProcessingFormat;

            convertToEffectUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Input);
            convertToEffectUnit.SetAudioFormat(effectFormat, AudioUnitScopeType.Output);

            convertFromEffectUnit.SetAudioFormat(effectFormat, AudioUnitScopeType.Input);
            convertFromEffectUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Output);

            genericOutputUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Input);
            genericOutputUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Output);

            // set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
            const uint maxFramesPerSlice = 4096;

            if (convertToEffectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (effectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (convertFromEffectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (genericOutputUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }

            // connect the nodes
            auGraph.ConnnectNodeInput(convertToEffectNode, 0, effectNode, 0);
            auGraph.ConnnectNodeInput(effectNode, 0, convertFromEffectNode, 0);
            auGraph.ConnnectNodeInput(convertFromEffectNode, 0, genericOutputNode, 0);

            // set up the callback into the first convert unit
            if (convertToEffectUnit.SetRenderCallback(ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            var res = auGraph.Initialize();

            if (res != AUGraphError.OK)
            {
                throw new ApplicationException();
            }
        }
		/// <summary>
		/// When a AudioProperty in the fed packets is found this callback is called
		/// </summary>
		void AudioPropertyFound (object sender, PropertyFoundEventArgs args)
		{
			switch (args.Property) {
			case AudioFileStreamProperty.ReadyToProducePackets:
				Started = false;
				
				
				if (OutputQueue != null)
					OutputQueue.Dispose ();
				
				OutputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
				currentByteCount = 0;
				OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;
				outputBuffers = new List<AudioBuffer>();
				
				for (int i = 0; i < MaxBufferCount; i++)
				{
					IntPtr outBuffer;
					OutputQueue.AllocateBuffer (BufferSize, out outBuffer);
					outputBuffers.Add (new AudioBuffer () { Buffer = outBuffer, PacketDescriptions = new List<AudioStreamPacketDescription>() });
				}
				
				currentBuffer = outputBuffers.First ();
				
				OutputQueue.MagicCookie = fileStream.MagicCookie;				
				break;
			}
		}
예제 #9
0
		void StreamPropertyListenerProc (object sender, PropertyFoundEventArgs args)
		{
			if (args.Property == AudioFileStreamProperty.DataFormat) {
				dataFormat = audioFileStream.DataFormat;
				return;
			}

			if (args.Property != AudioFileStreamProperty.ReadyToProducePackets) 
				return;

			if (audioQueue != null) {
				// TODO: Dispose
				throw new NotImplementedException ();
			}

			audioQueue = new OutputAudioQueue (dataFormat);
			audioQueue.OutputCompleted += HandleOutputCompleted;

			AudioQueueStatus status;
			aqTap = audioQueue.CreateProcessingTap (TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
			if (status != AudioQueueStatus.Ok)
				throw new ApplicationException ("Could not create AQ tap");

			// create an augraph to process in the tap. needs to convert from tapFormat to effect format and back
			/* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
			before and after the effect, usually because effects want floats, and everything else in iOS
			core audio works with ints (or, in rare cases, fixed-point).
			the graph looks like this:
			[render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
			prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
			knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
			pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
			render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
			is described as "batshit crazy", but it seems to work pretty well in practice.
			*/

			auGraph = new AUGraph ();
			auGraph.Open ();
			var effectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.NewTimePitch));
			effectUnit = auGraph.GetNodeInfo (effectNode);

			var convertToEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertToEffectUnit = auGraph.GetNodeInfo (convertToEffectNode);

			var convertFromEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertFromEffectUnit = auGraph.GetNodeInfo (convertFromEffectNode);

			var genericOutputNode = auGraph.AddNode (AudioComponentDescription.CreateOutput (AudioTypeOutput.Generic));
			genericOutputUnit = auGraph.GetNodeInfo (genericOutputNode);

			// set the format conversions throughout the graph
			var effectFormat = effectUnit.GetAudioFormat (AudioUnitScopeType.Output);
			var tapFormat = aqTap.ProcessingFormat;

			convertToEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			convertToEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Output);

			convertFromEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Input);
			convertFromEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			// set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
			const uint maxFramesPerSlice = 4096;
			if (convertToEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (effectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (convertFromEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (genericOutputUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();

			// connect the nodes
			auGraph.ConnnectNodeInput (convertToEffectNode, 0, effectNode, 0);
			auGraph.ConnnectNodeInput (effectNode, 0, convertFromEffectNode, 0);
			auGraph.ConnnectNodeInput (convertFromEffectNode, 0, genericOutputNode, 0);

			// set up the callback into the first convert unit
			if (convertToEffectUnit.SetRenderCallback (ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			var res = auGraph.Initialize ();
			if (res != AUGraphError.OK)
				throw new ApplicationException ();
		}
예제 #10
0
        void AudioPropertyFound(object sender, PropertyFoundEventArgs args)
        {
            switch (args.Property){
                //
                // Enough data has been read that we can start producing output
                //
            case AudioFileStreamProperty.ReadyToProducePackets:
                bytesFilled = 0;
                fillBufferIndex = 0;
                packetsFilled = 0;
                started = false;
                OutputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
                OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;

                outputBuffers = new IntPtr [4];
                inuse = new bool [4];

                // Allocate audio queue buffers
                for (int i = 0; i < outputBuffers.Length; i++)
                    OutputQueue.AllocateBuffer (bufferSize, out outputBuffers [i]);

                OutputQueue.MagicCookie = fileStream.MagicCookie;
                OutputQueue.AddListener (AudioQueueProperty.IsRunning, delegate (AudioQueueProperty p) {
                    var h = Finished;
                    if (h != null)
                        h (this, EventArgs.Empty);
                });

                break;
            }

            Console.WriteLine ("Property: {0}", args);
        }
예제 #11
0
		/// <summary>
		/// When a AudioProperty in the fed packets is found this callback is called
		/// </summary>
		void AudioPropertyFound (object sender, PropertyFoundEventArgs args)
		{
			lock (locker) {
				if (args.Property == AudioFileStreamProperty.ReadyToProducePackets) {
					if (outputQueue != null)
						outputQueue.Dispose ();

					availableBuffers = new Queue<AudioBuffer> ();
					outputBuffers = new Dictionary<IntPtr, AudioBuffer> ();
					outputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
					outputQueue.AddListener (AudioQueueProperty.IsRunning, EmitFinishedEvent);
					outputQueue.Volume = Volume;
					outputQueue.AddListener (AudioQueueProperty.ConverterError, (AudioQueueProperty property) => {
						LoggingService.LogInfo ("Got an error reading the file: {0}", outputQueue.ConverterError);
					});
					if (OutputReady != null)
						OutputReady (outputQueue);

					outputQueue.BufferCompleted += HandleBufferCompleted;
					outputQueue.MagicCookie = fileStream.MagicCookie;
				}
			}
		}