/// <summary> /// Queues up a file to record to the next time the beat is played /// </summary> /// <param name="fileName">File name.</param> public void QueueFileRecording(string fileName) { _file = ExtAudioFile.CreateWithUrl( new Foundation.NSUrl(fileName, false), AudioFileType.WAVE, AudioStreamBasicDescription.CreateLinearPCM(), AudioFileFlags.EraseFlags, out ExtAudioFileError e ); _fileRecordingQueued = true; }
unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl) { AudioStreamBasicDescription dataFormat; AudioQueueBuffer * buffer = null; long currentPacket = 0; int packetsToRead = 0; AudioStreamPacketDescription[] packetDescs = null; bool flushed = false; bool done = false; int bufferSize; using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) { dataFormat = audioFile.StreamBasicDescription; using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) { queue.BufferCompleted += (sender, e) => { HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs); }; // we need to calculate how many packets we read at a time and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0; // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger // adjust buffer size to represent about a second of audio based on this format CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead); if (isVBR) { packetDescs = new AudioStreamPacketDescription [packetsToRead]; } else { packetDescs = null; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) } if (audioFile.MagicCookie.Length != 0) { queue.MagicCookie = audioFile.MagicCookie; } // allocate the input read buffer queue.AllocateBuffer(bufferSize, out buffer); // prepare the capture format var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32); captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4; queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout); // prepare the target format var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame); using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) { captureFile.ClientDataFormat = captureFormat; int captureBufferSize = bufferSize / 2; AudioBuffers captureABL = new AudioBuffers(1); AudioQueueBuffer *captureBuffer; queue.AllocateBuffer(captureBufferSize, out captureBuffer); captureABL [0] = new AudioBuffer() { Data = captureBuffer->AudioData, NumberChannels = captureFormat.ChannelsPerFrame }; queue.Start(); double ts = 0; queue.RenderOffline(ts, captureBuffer, 0); HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs); while (true) { int reqFrames = captureBufferSize / captureFormat.BytesPerFrame; queue.RenderOffline(ts, captureBuffer, reqFrames); captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize); var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame; // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes", // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize); captureFile.WriteAsync((uint)writeFrames, captureABL); if (flushed) { break; } ts += writeFrames; } CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false); } } } }
/// <summary> /// Renders the given number of seconds to the given wav file /// </summary> /// <param name="fileName">File name.</param> /// <param name="seconds">Seconds.</param> public void RenderToFile(string fileName, double seconds) { long samples = (long)(seconds * Metronome.SampleRate); var inputStream = MixerNode.GetAudioFormat(AudioUnitScopeType.Output); var outputStream = AudioStreamBasicDescription.CreateLinearPCM(44100, 2); AudioConverter converter = AudioConverter.Create(inputStream, outputStream); var file = ExtAudioFile.CreateWithUrl( new Foundation.NSUrl(fileName, false), AudioFileType.WAVE, outputStream, AudioFileFlags.EraseFlags, out ExtAudioFileError e ); long samplesRead = 0; // initialize the buffers var buffers = new AudioBuffers(2); buffers[0] = new AudioBuffer() { DataByteSize = BufferSize * 4, NumberChannels = 1, Data = Marshal.AllocHGlobal(sizeof(float) * BufferSize) }; buffers[1] = new AudioBuffer() { DataByteSize = BufferSize * 4, NumberChannels = 1, Data = Marshal.AllocHGlobal(sizeof(float) * BufferSize) }; var convBuffers = new AudioBuffers(1); convBuffers[0] = new AudioBuffer() { DataByteSize = BufferSize * 4, NumberChannels = 2, Data = Marshal.AllocHGlobal(sizeof(float) * BufferSize) }; while (samples > 0) { int numSamples = (int)(Math.Min(BufferSize, samples)); // get samples from the mixer Render((uint)numSamples, buffers, samplesRead); // conver to the file's format converter.ConvertComplexBuffer(numSamples, buffers, convBuffers); // write samples to the file var error = file.Write((uint)numSamples, convBuffers); if (error != ExtAudioFileError.OK) { throw new ApplicationException(); } samples -= BufferSize; samplesRead += numSamples; } buffers.Dispose(); convBuffers.Dispose(); converter.Dispose(); file.Dispose(); }