コード例 #1
0
        uint TapProc(AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
        {
            AudioQueueProcessingTapFlags source_flags;
            uint source_frames;

            if (audioQueueTap.GetSourceAudio(numberOfFrames, ref timeStamp, out source_flags, out source_frames, data) != AudioQueueStatus.Ok)
            {
                throw new ApplicationException();
            }

            preRenderData = data [0].Data;
            data.SetData(0, IntPtr.Zero);

            var renderTimeStamp = new AudioTimeStamp();

            renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
            AudioUnitRenderActionFlags action_flags = 0;

            var res = genericOutputUnit.Render(ref action_flags, renderTimeStamp, 0, numberOfFrames, data);

            if (res != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            return(source_frames);
        }
コード例 #2
0
        uint TapProc(AudioQueueProcessingTap audioQueueTap, uint inNumberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
        {
            AudioQueueProcessingTapFlags sourceFlags;
            uint sourceFrames;

            if (audioQueueTap.GetSourceAudio(inNumberOfFrames, ref timeStamp, out sourceFlags, out sourceFrames, data) != AudioQueueStatus.Ok)
            {
                throw new ApplicationException();
            }

            for (int channel = 0; channel < data.Count; channel++)
            {
                preRenderData[channel] = data [channel].Data;
                data.SetData(channel, IntPtr.Zero);
            }

            renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
            AudioUnitRenderActionFlags actionFlags = 0;

            AudioUnitStatus res = genericOutputUnit.Render(ref actionFlags, renderTimeStamp, 0, inNumberOfFrames, data);

            if (res != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            return(sourceFrames);
        }
コード例 #3
0
        void StreamPropertyListenerProc(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.DataFormat)
            {
                dataFormat = audioFileStream.DataFormat;
                return;
            }

            if (args.Property != AudioFileStreamProperty.ReadyToProducePackets)
            {
                return;
            }

            if (audioQueue != null)
            {
                // TODO: Dispose
                throw new NotImplementedException();
            }

            audioQueue = new OutputAudioQueue(dataFormat);
            audioQueue.BufferCompleted += HandleBufferCompleted;

            AudioQueueStatus status;

            aqTap = audioQueue.CreateProcessingTap(TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
            if (status != AudioQueueStatus.Ok)
            {
                throw new ApplicationException("Could not create AQ tap");
            }

            // create an augraph to process in the tap. needs to convert from tapFormat to effect format and back

            /* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
             * before and after the effect, usually because effects want floats, and everything else in iOS
             * core audio works with ints (or, in rare cases, fixed-point).
             * the graph looks like this:
             * [render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
             * prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
             * knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
             * pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
             * render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
             * is described as "batshit crazy", but it seems to work pretty well in practice.
             */

            auGraph = new AUGraph();
            auGraph.Open();
            var effectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.NewTimePitch));

            effectUnit = auGraph.GetNodeInfo(effectNode);

            var convertToEffectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.AU));
            var convertToEffectUnit = auGraph.GetNodeInfo(convertToEffectNode);

            var convertFromEffectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.AU));
            var convertFromEffectUnit = auGraph.GetNodeInfo(convertFromEffectNode);

            var genericOutputNode = auGraph.AddNode(AudioComponentDescription.CreateOutput(AudioTypeOutput.Generic));

            genericOutputUnit = auGraph.GetNodeInfo(genericOutputNode);

            // set the format conversions throughout the graph
            var effectFormat = effectUnit.GetAudioFormat(AudioUnitScopeType.Output);
            var tapFormat    = aqTap.ProcessingFormat;

            convertToEffectUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Input);
            convertToEffectUnit.SetAudioFormat(effectFormat, AudioUnitScopeType.Output);

            convertFromEffectUnit.SetAudioFormat(effectFormat, AudioUnitScopeType.Input);
            convertFromEffectUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Output);

            genericOutputUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Input);
            genericOutputUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Output);

            // set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
            const uint maxFramesPerSlice = 4096;

            if (convertToEffectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (effectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (convertFromEffectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (genericOutputUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }

            // connect the nodes
            auGraph.ConnnectNodeInput(convertToEffectNode, 0, effectNode, 0);
            auGraph.ConnnectNodeInput(effectNode, 0, convertFromEffectNode, 0);
            auGraph.ConnnectNodeInput(convertFromEffectNode, 0, genericOutputNode, 0);

            // set up the callback into the first convert unit
            if (convertToEffectUnit.SetRenderCallback(ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            var res = auGraph.Initialize();

            if (res != AUGraphError.OK)
            {
                throw new ApplicationException();
            }
        }
コード例 #4
0
		void StreamPropertyListenerProc (object sender, PropertyFoundEventArgs args)
		{
			if (args.Property == AudioFileStreamProperty.DataFormat) {
				dataFormat = audioFileStream.DataFormat;
				return;
			}

			if (args.Property != AudioFileStreamProperty.ReadyToProducePackets) 
				return;

			if (audioQueue != null) {
				// TODO: Dispose
				throw new NotImplementedException ();
			}

			audioQueue = new OutputAudioQueue (dataFormat);
			audioQueue.OutputCompleted += HandleOutputCompleted;

			AudioQueueStatus status;
			aqTap = audioQueue.CreateProcessingTap (TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
			if (status != AudioQueueStatus.Ok)
				throw new ApplicationException ("Could not create AQ tap");

			// create an augraph to process in the tap. needs to convert from tapFormat to effect format and back
			/* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
			before and after the effect, usually because effects want floats, and everything else in iOS
			core audio works with ints (or, in rare cases, fixed-point).
			the graph looks like this:
			[render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
			prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
			knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
			pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
			render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
			is described as "batshit crazy", but it seems to work pretty well in practice.
			*/

			auGraph = new AUGraph ();
			auGraph.Open ();
			var effectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.NewTimePitch));
			effectUnit = auGraph.GetNodeInfo (effectNode);

			var convertToEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertToEffectUnit = auGraph.GetNodeInfo (convertToEffectNode);

			var convertFromEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertFromEffectUnit = auGraph.GetNodeInfo (convertFromEffectNode);

			var genericOutputNode = auGraph.AddNode (AudioComponentDescription.CreateOutput (AudioTypeOutput.Generic));
			genericOutputUnit = auGraph.GetNodeInfo (genericOutputNode);

			// set the format conversions throughout the graph
			var effectFormat = effectUnit.GetAudioFormat (AudioUnitScopeType.Output);
			var tapFormat = aqTap.ProcessingFormat;

			convertToEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			convertToEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Output);

			convertFromEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Input);
			convertFromEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			// set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
			const uint maxFramesPerSlice = 4096;
			if (convertToEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (effectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (convertFromEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (genericOutputUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();

			// connect the nodes
			auGraph.ConnnectNodeInput (convertToEffectNode, 0, effectNode, 0);
			auGraph.ConnnectNodeInput (effectNode, 0, convertFromEffectNode, 0);
			auGraph.ConnnectNodeInput (convertFromEffectNode, 0, genericOutputNode, 0);

			// set up the callback into the first convert unit
			if (convertToEffectUnit.SetRenderCallback (ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			var res = auGraph.Initialize ();
			if (res != AUGraphError.OK)
				throw new ApplicationException ();
		}
コード例 #5
0
		uint TapProc (AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
		{
			AudioQueueProcessingTapFlags source_flags;
			uint source_frames;

			if (audioQueueTap.GetSourceAudio (numberOfFrames, ref timeStamp, out source_flags, out source_frames, data) != AudioQueueStatus.Ok)
				throw new ApplicationException ();

			preRenderData = data [0].Data;
			data.SetData (0, IntPtr.Zero);

			var renderTimeStamp = new AudioTimeStamp ();
			renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
			AudioUnitRenderActionFlags action_flags = 0;

			var res = genericOutputUnit.Render (ref action_flags, renderTimeStamp, 0, numberOfFrames, data);
			if (res != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			return source_frames;
		}
コード例 #6
0
		uint TapProc (AudioQueueProcessingTap audioQueueTap, uint inNumberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
		{
			AudioQueueProcessingTapFlags sourceFlags;
			uint sourceFrames;

			if (audioQueueTap.GetSourceAudio (inNumberOfFrames, ref timeStamp, out sourceFlags, out sourceFrames, data) != AudioQueueStatus.Ok)
				throw new ApplicationException ();

			for (int channel = 0; channel < data.Count; channel++) {
				preRenderData[channel] = data [channel].Data;
				data.SetData (channel, IntPtr.Zero);
			}

			renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
			AudioUnitRenderActionFlags actionFlags = 0;

			AudioUnitStatus res = genericOutputUnit.Render (ref actionFlags, renderTimeStamp, 0, inNumberOfFrames, data);
			if (res != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			return sourceFrames;
		}