void PrepareExtAudioFile()
        {
            extAudioFile = ExtAudioFile.OpenUrl(url);
            CheckValue(extAudioFile, "ExtAudioFile.OpenUrl failed");

            srcFormat = extAudioFile.FileDataFormat;

            // This is how you say,“When you convert the data, this is the format I’d like to receive.”
            // The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats.
            extAudioFile.ClientDataFormat = dstFormat;

            // getting total frame
            TotalFrames = extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
            buffer = new AudioBuffers(srcFormat.ChannelsPerFrame);
            for (int i = 0; i < buffer.Count; ++i)
            {
                int size = (int)(sizeof(int) * TotalFrames);
                buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
            numberOfChannels = srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;

            extAudioFile.Read((uint)TotalFrames, buffer, out status);
            if (status != ExtAudioFileError.OK)
            {
                throw new ApplicationException();
            }
        }
        uint TapProc(AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
        {
            AudioQueueProcessingTapFlags source_flags;
            uint source_frames;

            if (audioQueueTap.GetSourceAudio(numberOfFrames, ref timeStamp, out source_flags, out source_frames, data) != AudioQueueStatus.Ok)
            {
                throw new ApplicationException();
            }

            preRenderData = data [0].Data;
            data.SetData(0, IntPtr.Zero);

            var renderTimeStamp = new AudioTimeStamp();

            renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
            AudioUnitRenderActionFlags action_flags = 0;

            var res = genericOutputUnit.Render(ref action_flags, renderTimeStamp, 0, numberOfFrames, data);

            if (res != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            return(source_frames);
        }
        uint TapProc(AudioQueueProcessingTap audioQueueTap, uint inNumberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
        {
            AudioQueueProcessingTapFlags sourceFlags;
            uint sourceFrames;

            if (audioQueueTap.GetSourceAudio(inNumberOfFrames, ref timeStamp, out sourceFlags, out sourceFrames, data) != AudioQueueStatus.Ok)
            {
                throw new ApplicationException();
            }

            for (int channel = 0; channel < data.Count; channel++)
            {
                preRenderData[channel] = data [channel].Data;
                data.SetData(channel, IntPtr.Zero);
            }

            renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
            AudioUnitRenderActionFlags actionFlags = 0;

            AudioUnitStatus res = genericOutputUnit.Render(ref actionFlags, renderTimeStamp, 0, inNumberOfFrames, data);

            if (res != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            return(sourceFrames);
        }
Ejemplo n.º 4
0
        // Input data proc callback
        AudioConverterError EncoderDataProc(AudioFileIO afio, ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription [] dataPacketDescription)
        {
            // figure out how much to read
            int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;

            if (numberDataPackets > maxPackets)
            {
                numberDataPackets = maxPackets;
            }

            // read from the file
            int outNumBytes = 16384;

            // modified for iOS7 (ReadPackets depricated)
            afio.PacketDescriptions = afio.SourceFile.ReadPacketData(false, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer, ref outNumBytes);

            if (afio.PacketDescriptions.Length == 0 && numberDataPackets > 0)
            {
                throw new ApplicationException(afio.PacketDescriptions.ToString());
            }

            // advance input file packet position
            afio.SrcFilePos += numberDataPackets;

            // put the data pointer into the buffer list
            data.SetData(0, afio.SrcBuffer, outNumBytes);

            // don't forget the packet descriptions if required
            if (dataPacketDescription is not null)
            {
                dataPacketDescription = afio.PacketDescriptions;
            }

            return(AudioConverterError.None);
        }
 AudioUnitStatus ConvertInputRenderCallback(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
 {
     renderTimeStamp.SampleTime += numberFrames;
     for (int channel = 0; channel < data.Count; channel++)
     {
         data.SetData(channel, preRenderData [channel]);
     }
     return(AudioUnitStatus.NoError);
 }
Ejemplo n.º 6
0
        public AudioUnitStatus InternalRenderBlockProc(ref AudioUnitRenderActionFlags actionFlags, ref AudioTimeStamp timestamp, uint frameCount, nint outputBusNumber, AudioBuffers outputData, AURenderEventEnumerator realtimeEventListHead, AURenderPullInputBlock pullInputBlock)
        {
            var transportStateFlags = (AUHostTransportStateFlags)0;

            double currentSamplePosition  = 0;
            double cycleStartBeatPosition = 0;
            double cycleEndBeatPosition   = 0;

            var callBack = TransportStateBlock;

            if (callBack != null)
            {
                callBack(ref transportStateFlags, ref currentSamplePosition, ref cycleStartBeatPosition, ref cycleEndBeatPosition);
            }

            var state = Kernel;
            var input = inputBus;

            var             pullFlags = (AudioUnitRenderActionFlags)0;
            AudioUnitStatus err       = input.PullInput(ref pullFlags, timestamp, frameCount, 0, pullInputBlock);

            if (err != AudioUnitStatus.NoError)
            {
                return(err);
            }

            AudioBuffers inAudioBufferList = input.MutableAudioBufferList;

            if (outputData [0].Data == IntPtr.Zero)
            {
                for (int i = 0; i < outputData.Count; i++)
                {
                    outputData.SetData(i, inAudioBufferList [i].Data);
                }
            }

            state.SetBuffers(inAudioBufferList, outputData);
            state.ProcessWithEvents(timestamp, (int)frameCount, realtimeEventListHead);

            return(AudioUnitStatus.NoError);
        }
Ejemplo n.º 7
0
        // Input data proc callback
        AudioConverterError EncoderDataProc(ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
        {
            // figure out how much to read
            int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;

            if (numberDataPackets > maxPackets)
            {
                numberDataPackets = maxPackets;
            }

            // read from the file
            int outNumBytes;
            var res = afio.SourceFile.ReadPackets(false, out outNumBytes, afio.PacketDescriptions, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer);

            if (res != 0)
            {
                throw new ApplicationException(res.ToString());
            }

            // advance input file packet position
            afio.SrcFilePos += numberDataPackets;

            // put the data pointer into the buffer list
            data.SetData(0, afio.SrcBuffer, outNumBytes);

            // don't forget the packet descriptions if required
            if (dataPacketDescription != null)
            {
                if (afio.PacketDescriptions != null)
                {
                    dataPacketDescription = afio.PacketDescriptions;
                }
                else
                {
                    dataPacketDescription = null;
                }
            }

            return(AudioConverterError.None);
        }
Ejemplo n.º 8
0
        void prepareExtAudioFile()
        {
            // Opening Audio File
            _extAudioFile = ExtAudioFile.OpenUrl(_url);

            // Getting file data format
            _srcFormat = _extAudioFile.FileDataFormat;

            // Setting the channel number of the output format same to the input format
            _dstFormat              = AudioStreamBasicDescription.CreateLinearPCM(channelsPerFrame: (uint)_srcFormat.ChannelsPerFrame, bitsPerChannel: 32);
            _dstFormat.FormatFlags |= AudioFormatFlags.IsNonInterleaved;

            // setting reading format as audio unit cannonical format
            _extAudioFile.ClientDataFormat = _dstFormat;

            // getting total frame
            _totalFrames = _extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
            _buffer = new AudioBuffers(_srcFormat.ChannelsPerFrame);
            for (int i = 0; i < _buffer.Count; ++i)
            {
                int size = (int)(sizeof(uint) * _totalFrames);
                _buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
            _numberOfChannels = _srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;

            _extAudioFile.Read((uint)_totalFrames, _buffer, out status);
            if (status != ExtAudioFileError.OK)
            {
                throw new ApplicationException();
            }
        }
 AudioUnitStatus ConvertInputRenderCallback(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
 {
     data.SetData(0, preRenderData);
     return(AudioUnitStatus.NoError);
 }
Ejemplo n.º 10
0
        public static bool GetDataFromExtAudioFile(ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize,
                                                   byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate)
        {
            uint errorStatus        = 0;
            uint bufferSizeInFrames = 0;

            dataBufferSize = 0;
            format         = ALFormat.Mono16;
            sampleRate     = 0;
            /* Compute how many frames will fit into our max buffer size */
            bufferSizeInFrames = (uint)(maxBufferSize / outputFormat.BytesPerFrame);

            if (dataBuffer != null)
            {
                var audioBufferList = new AudioBuffers(maxBufferSize);

                // This a hack so if there is a problem speak to kjpou1 -Kenneth
                // the cleanest way is to copy the buffer to the pointer already allocated
                // but what we are going to do is replace the pointer with our own and restore it later
                //
                GCHandle meBePinned  = GCHandle.Alloc(dataBuffer, GCHandleType.Pinned);
                IntPtr   meBePointer = meBePinned.AddrOfPinnedObject();

                audioBufferList.SetData(0, meBePointer);

                try {
                    // Read the data into an AudioBufferList
                    // errorStatus here returns back the amount of information read
                    ExtAudioFileError extAudioFileError = ExtAudioFileError.OK;
                    errorStatus = ext.Read(bufferSizeInFrames, audioBufferList, out extAudioFileError);
                    if (errorStatus >= 0)
                    {
                        /* Success */
                        /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */

                        /* ExtAudioFile.Read returns the number of frames actually read.
                         * Need to convert back to bytes.
                         */
                        dataBufferSize = (int)bufferSizeInFrames * outputFormat.BytesPerFrame;

                        // Now we set our format
                        format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16;

                        sampleRate = outputFormat.SampleRate;
                    }
                    else
                    {
#if DEBUG
                        Console.WriteLine("ExtAudioFile.Read failed, Error = " + errorStatus);
#endif
                        return(false);
                    }
                } catch (Exception exc) {
#if DEBUG
                    Console.WriteLine("ExtAudioFile.Read failed: " + exc.Message);
#endif
                    return(false);
                } finally {
                    // Don't forget to free our dataBuffer memory pointer that was pinned above
                    meBePinned.Free();
                    // and restore what was allocated to beginwith
                    audioBufferList.SetData(0, IntPtr.Zero);
                }
            }
            return(true);
        }
Ejemplo n.º 11
0
		AudioUnitStatus ConvertInputRenderCallback (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			data.SetData (0, preRenderData);
			return AudioUnitStatus.NoError;
		}
Ejemplo n.º 12
0
		uint TapProc (AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
		{
			AudioQueueProcessingTapFlags source_flags;
			uint source_frames;

			if (audioQueueTap.GetSourceAudio (numberOfFrames, ref timeStamp, out source_flags, out source_frames, data) != AudioQueueStatus.Ok)
				throw new ApplicationException ();

			preRenderData = data [0].Data;
			data.SetData (0, IntPtr.Zero);

			var renderTimeStamp = new AudioTimeStamp ();
			renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
			AudioUnitRenderActionFlags action_flags = 0;

			var res = genericOutputUnit.Render (ref action_flags, renderTimeStamp, 0, numberOfFrames, data);
			if (res != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			return source_frames;
		}
Ejemplo n.º 13
0
		unsafe static void RenderAudio (CFUrl sourceUrl, CFUrl destinationUrl)
		{
			AudioStreamBasicDescription dataFormat;
			AudioQueueBuffer *buffer = null;
			long currentPacket = 0;
			int packetsToRead = 0;
			AudioStreamPacketDescription [] packetDescs = null;
			bool flushed = false;
			bool done = false;
			int bufferSize;
			
			using (var audioFile = AudioFile.Open (sourceUrl, AudioFilePermission.Read, (AudioFileType) 0)) {
				dataFormat = audioFile.StreamBasicDescription;
				
				using (var queue = new OutputAudioQueue (dataFormat, CFRunLoop.Current, CFRunLoop.CFRunLoopCommonModes)) {
					queue.OutputCompleted += (sender, e) => 
					{
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
					};
					
					// we need to calculate how many packets we read at a time and how big a buffer we need
					// we base this on the size of the packets in the file and an approximate duration for each buffer
					bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;
					
					// first check to see what the max size of a packet is - if it is bigger
					// than our allocation default size, that needs to become larger
					// adjust buffer size to represent about a second of audio based on this format 
					CalculateBytesForTime (dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);
				
					if (isVBR) {
						packetDescs = new AudioStreamPacketDescription [packetsToRead];
					} else {
						packetDescs = null; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
					}
				
					if (audioFile.MagicCookie.Length != 0)
						queue.MagicCookie = audioFile.MagicCookie;
		
					// allocate the input read buffer
					queue.AllocateBuffer (bufferSize, out buffer);
					
					// prepare the capture format
					var captureFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame, 32);
					captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

					queue.SetOfflineRenderFormat (captureFormat, audioFile.ChannelLayout);
					
					// prepare the target format
					var dstFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame);

					using (var captureFile = ExtAudioFile.CreateWithUrl (destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
						captureFile.ClientDataFormat = captureFormat;
						
						int captureBufferSize = bufferSize / 2;
						AudioBuffers captureABL = new AudioBuffers (1);
						
						AudioQueueBuffer *captureBuffer;
						queue.AllocateBuffer (captureBufferSize, out captureBuffer);
						
						captureABL[0] = new AudioBuffer () {
							Data = captureBuffer->AudioData,
							NumberChannels = captureFormat.ChannelsPerFrame
						};

						queue.Start ();

						double ts = 0;
						queue.RenderOffline (ts, captureBuffer, 0);
						
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
						
						while (true) {
							int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;
							
							queue.RenderOffline (ts, captureBuffer, reqFrames);

							captureABL.SetData (0, captureBuffer->AudioData, (int) captureBuffer->AudioDataByteSize);
							var writeFrames = captureABL[0].DataByteSize / captureFormat.BytesPerFrame;
							
							// Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes", 
							//	ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);
							
							captureFile.WriteAsync ((uint) writeFrames, captureABL);
							
							if (flushed)
								break;
							
							ts += writeFrames;
						}
					
						CFRunLoop.Current.RunInMode (CFRunLoop.CFDefaultRunLoopMode, 1, false);
					}
				}
			}
		}
		AudioUnitStatus ConvertInputRenderCallback (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			renderTimeStamp.SampleTime += numberFrames;
			for (int channel = 0; channel < data.Count; channel++) {
				data.SetData (channel, preRenderData [channel]);
			}
			return AudioUnitStatus.NoError;
		}
		uint TapProc (AudioQueueProcessingTap audioQueueTap, uint inNumberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
		{
			AudioQueueProcessingTapFlags sourceFlags;
			uint sourceFrames;

			if (audioQueueTap.GetSourceAudio (inNumberOfFrames, ref timeStamp, out sourceFlags, out sourceFrames, data) != AudioQueueStatus.Ok)
				throw new ApplicationException ();

			for (int channel = 0; channel < data.Count; channel++) {
				preRenderData[channel] = data [channel].Data;
				data.SetData (channel, IntPtr.Zero);
			}

			renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
			AudioUnitRenderActionFlags actionFlags = 0;

			AudioUnitStatus res = genericOutputUnit.Render (ref actionFlags, renderTimeStamp, 0, inNumberOfFrames, data);
			if (res != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			return sourceFrames;
		}
        // Input data proc callback
        AudioConverterError EncoderDataProc(ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
        {
            // figure out how much to read
            int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;
            if (numberDataPackets > maxPackets)
                numberDataPackets = maxPackets;

            // read from the file
            int outNumBytes = 16384;

            // modified for iOS7 (ReadPackets depricated)
			afio.PacketDescriptions = afio.SourceFile.ReadPacketData(false, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer, ref outNumBytes);

			if (afio.PacketDescriptions.Length == 0 && numberDataPackets > 0)
				throw new ApplicationException(afio.PacketDescriptions.ToString());

            // advance input file packet position
            afio.SrcFilePos += numberDataPackets;

            // put the data pointer into the buffer list
            data.SetData(0, afio.SrcBuffer, outNumBytes);

            // don't forget the packet descriptions if required
            if (dataPacketDescription != null)
                dataPacketDescription = afio.PacketDescriptions;

            return AudioConverterError.None;
        }
		// Input data proc callback
		AudioConverterError EncoderDataProc (ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
		{
			// figure out how much to read
			int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;
			if (numberDataPackets > maxPackets)
				numberDataPackets = maxPackets;
			
			// read from the file
			int outNumBytes;
			var res = afio.SourceFile.ReadPackets (false, out outNumBytes, afio.PacketDescriptions, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer);
			if (res != 0) {
				throw new ApplicationException (res.ToString ());
			}

			// advance input file packet position
			afio.SrcFilePos += numberDataPackets;
			
			// put the data pointer into the buffer list
			data.SetData (0, afio.SrcBuffer, outNumBytes);

			// don't forget the packet descriptions if required
			if (dataPacketDescription != null) {
				if (afio.PacketDescriptions != null) {
					dataPacketDescription = afio.PacketDescriptions;
				} else {
					dataPacketDescription = null;
				}
			}

			return AudioConverterError.None;
		}
Ejemplo n.º 18
0
        unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl)
        {
            AudioStreamBasicDescription dataFormat;
            AudioQueueBuffer *          buffer = null;
            long currentPacket = 0;
            int  packetsToRead = 0;

            AudioStreamPacketDescription[] packetDescs = null;
            bool flushed = false;
            bool done    = false;
            int  bufferSize;

            using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) {
                dataFormat = audioFile.StreamBasicDescription;

                using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) {
                    queue.BufferCompleted += (sender, e) => {
                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
                    };

                    // we need to calculate how many packets we read at a time and how big a buffer we need
                    // we base this on the size of the packets in the file and an approximate duration for each buffer
                    bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;

                    // first check to see what the max size of a packet is - if it is bigger
                    // than our allocation default size, that needs to become larger
                    // adjust buffer size to represent about a second of audio based on this format
                    CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);

                    if (isVBR)
                    {
                        packetDescs = new AudioStreamPacketDescription [packetsToRead];
                    }
                    else
                    {
                        packetDescs = null;                         // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                    }

                    if (audioFile.MagicCookie.Length != 0)
                    {
                        queue.MagicCookie = audioFile.MagicCookie;
                    }

                    // allocate the input read buffer
                    queue.AllocateBuffer(bufferSize, out buffer);

                    // prepare the capture format
                    var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32);
                    captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

                    queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout);

                    // prepare the target format
                    var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame);

                    using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
                        captureFile.ClientDataFormat = captureFormat;

                        int          captureBufferSize = bufferSize / 2;
                        AudioBuffers captureABL        = new AudioBuffers(1);

                        AudioQueueBuffer *captureBuffer;
                        queue.AllocateBuffer(captureBufferSize, out captureBuffer);

                        captureABL [0] = new AudioBuffer()
                        {
                            Data           = captureBuffer->AudioData,
                            NumberChannels = captureFormat.ChannelsPerFrame
                        };

                        queue.Start();

                        double ts = 0;
                        queue.RenderOffline(ts, captureBuffer, 0);

                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);

                        while (true)
                        {
                            int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;

                            queue.RenderOffline(ts, captureBuffer, reqFrames);

                            captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize);
                            var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame;

                            // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes",
                            // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);

                            captureFile.WriteAsync((uint)writeFrames, captureABL);

                            if (flushed)
                            {
                                break;
                            }

                            ts += writeFrames;
                        }

                        CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false);
                    }
                }
            }
        }
        void PrepareExtAudioFile()
        {
			extAudioFile = ExtAudioFile.OpenUrl(url);
			CheckValue (extAudioFile, "ExtAudioFile.OpenUrl failed");

			srcFormat = extAudioFile.FileDataFormat;

			// This is how you say,“When you convert the data, this is the format I’d like to receive.”
			// The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats.
            extAudioFile.ClientDataFormat = dstFormat;

            // getting total frame
			TotalFrames = extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
			buffer = new AudioBuffers(srcFormat.ChannelsPerFrame);
            for (int i = 0; i < buffer.Count; ++i)
            {
                int size = (int)(sizeof(int) * TotalFrames);
                buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
			numberOfChannels = srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;
            extAudioFile.Read((uint)TotalFrames, buffer, out status);
            if (status != ExtAudioFileError.OK)
                throw new ApplicationException();
        }
		void prepareExtAudioFile ()
		{
			// Opening Audio File
			_extAudioFile = ExtAudioFile.OpenUrl (_url);

			// Getting file data format
			_srcFormat = _extAudioFile.FileDataFormat;

			// Setting the channel number of the output format same to the input format
			_dstFormat = AudioStreamBasicDescription.CreateLinearPCM (channelsPerFrame: (uint)_srcFormat.ChannelsPerFrame, bitsPerChannel: 32);
			_dstFormat.FormatFlags |= AudioFormatFlags.IsNonInterleaved;

			// setting reading format as audio unit cannonical format
			_extAudioFile.ClientDataFormat = _dstFormat;

			// getting total frame
			_totalFrames = _extAudioFile.FileLengthFrames;

			// Allocating AudioBufferList
			_buffer = new AudioBuffers (_srcFormat.ChannelsPerFrame);
			for (int i = 0; i < _buffer.Count; ++i) {
				int size = (int)(sizeof(uint) * _totalFrames);
				_buffer.SetData (i, Marshal.AllocHGlobal (size), size);
			}
			_numberOfChannels = _srcFormat.ChannelsPerFrame;

			// Reading all frame into the buffer
			ExtAudioFileError status;
			_extAudioFile.Read ((uint)_totalFrames, _buffer, out status);
			if (status != ExtAudioFileError.OK)
				throw new ApplicationException ();
		}