void PrepareExtAudioFile() { extAudioFile = ExtAudioFile.OpenUrl(url); CheckValue(extAudioFile, "ExtAudioFile.OpenUrl failed"); srcFormat = extAudioFile.FileDataFormat; // This is how you say,“When you convert the data, this is the format I’d like to receive.” // The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats. extAudioFile.ClientDataFormat = dstFormat; // getting total frame TotalFrames = extAudioFile.FileLengthFrames; // Allocating AudioBufferList buffer = new AudioBuffers(srcFormat.ChannelsPerFrame); for (int i = 0; i < buffer.Count; ++i) { int size = (int)(sizeof(int) * TotalFrames); buffer.SetData(i, Marshal.AllocHGlobal(size), size); } numberOfChannels = srcFormat.ChannelsPerFrame; // Reading all frame into the buffer ExtAudioFileError status; extAudioFile.Read((uint)TotalFrames, buffer, out status); if (status != ExtAudioFileError.OK) { throw new ApplicationException(); } }
public static ExtAudioFile GetExtAudioFile(NSUrl url, out AudioStreamBasicDescription audioDescription) { // Notice the following line that we can not pass a NSUrl to a CFUrl //ExtAudioFile ext = ExtAudioFile.OpenUrl(url); // Basic Descriptions AudioStreamBasicDescription fileFormat; AudioStreamBasicDescription outputFormat; // So now we create a CFUrl CFUrl curl = CFUrl.FromFile(url.Path); // Open the file ExtAudioFile ext = ExtAudioFile.OpenUrl(curl); // Get the audio format fileFormat = ext.FileDataFormat; // Don't know how to handle sounds with more than 2 channels (i.e. stereo) // Remember that OpenAL sound effects must be mono to be spatialized anyway. if (fileFormat.ChannelsPerFrame > 2) { #if DEBUG Console.WriteLine("Unsupported Format: Channel count [0] is greater than stereo.", fileFormat.ChannelsPerFrame); #endif audioDescription = new AudioStreamBasicDescription(); return(null); } // The output format must be linear PCM because that's the only type OpenAL knows how to deal with. // Set the client format to 16 bit signed integer (native-endian) data because that is the most // optimal format on iPhone/iPod Touch hardware. // Maintain the channel count and sample rate of the original source format. outputFormat = new AudioStreamBasicDescription(); // Create our output format description to be converted to outputFormat.SampleRate = fileFormat.SampleRate; // Preserve the original sample rate outputFormat.ChannelsPerFrame = fileFormat.ChannelsPerFrame; // Preserve the orignal number of channels outputFormat.Format = AudioFormatType.LinearPCM; // We want Linear PCM // IsBigEndian is causing some problems with distorted sounds on MacOSX // outputFormat.FormatFlags = AudioFormatFlags.IsBigEndian // | AudioFormatFlags.IsPacked // | AudioFormatFlags.IsSignedInteger; outputFormat.FormatFlags = AudioFormatFlags.IsPacked | AudioFormatFlags.IsSignedInteger; outputFormat.FramesPerPacket = 1; // We know for linear PCM, the definition is 1 frame per packet outputFormat.BitsPerChannel = 16; // We know we want 16-bit outputFormat.BytesPerPacket = 2 * outputFormat.ChannelsPerFrame; // We know we are using 16-bit, so 2-bytes per channel per frame outputFormat.BytesPerFrame = 2 * outputFormat.ChannelsPerFrame; // For PCM, since 1 frame is 1 packet, it is the same as mBytesPerPacket // Set the desired client (output) data format ext.ClientDataFormat = outputFormat; // Copy the output format to the audio description that was passed in so the // info will be returned to the user. audioDescription = outputFormat; return(ext); }
public void ClientDataFormat() { var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf")); using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path))) { var fmt = file.ClientDataFormat; } }
public void ClientDataFormat() { var path = NSBundle.MainBundle.PathForResource("1", "caf", "AudioToolbox"); using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path))) { var fmt = file.ClientDataFormat; } }
protected void LoadAudioFile(StreamInfoProvider info) { // get the path to the file string path; if (info.IsInternal) { path = NSBundle.MainBundle.PathForSoundResource(info.Uri); } else { // file path is the Uri for user sources path = info.Uri; } using (var url = CFUrl.FromFile(path)) { using (var file = ExtAudioFile.OpenUrl(url)) { var clientFormat = file.FileDataFormat; clientFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat; clientFormat.ChannelsPerFrame = 1; clientFormat.FramesPerPacket = 1; clientFormat.BitsPerChannel = 8 * sizeof(float); clientFormat.BytesPerPacket = clientFormat.BytesPerFrame = clientFormat.ChannelsPerFrame * sizeof(float); file.ClientDataFormat = clientFormat; double rateRatio = Metronome.SampleRate / clientFormat.SampleRate; var numFrames = file.FileLengthFrames; numFrames = (uint)(numFrames * rateRatio); TotalFrames = numFrames; UInt32 samples = (uint)(numFrames * clientFormat.ChannelsPerFrame); var dataSize = (int)(sizeof(uint) * samples); Data = Marshal.AllocHGlobal(dataSize); // set up a AudioBufferList to read data into var bufList = new AudioBuffers(1); bufList[0] = new AudioBuffer { NumberChannels = 1, Data = Data, DataByteSize = dataSize }; ExtAudioFileError error; file.Read((uint)numFrames, bufList, out error); if (error != ExtAudioFileError.OK) { throw new ApplicationException(); } } } }
public void OpenCFUrlTest() { var path = NSBundle.MainBundle.PathForResource("1", "caf", "AudioToolbox"); ExtAudioFileError err; using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path), out err)) { Assert.IsTrue(err == ExtAudioFileError.OK, "OpenCFUrlTest"); Assert.IsNotNull(file.AudioFile, "OpenCFUrlTest"); } }
public void OpenCFUrlTest() { var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf")); ExtAudioFileError err; using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path), out err)) { Assert.IsTrue(err == ExtAudioFileError.OK, "OpenCFUrlTest"); Assert.IsNotNull(file.AudioFile, "OpenCFUrlTest"); } }
public void WrapAudioFileID() { var path = NSBundle.MainBundle.PathForResource("1", "caf", "AudioToolbox"); using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path))) { Assert.IsNotNull(file.AudioFile, "#1"); ExtAudioFile f2; Assert.AreEqual(ExtAudioFileError.OK, ExtAudioFile.WrapAudioFileID(file.AudioFile.Value, true, out f2)); } }
public void ClientDataFormat() { #if MONOMAC var path = NSBundle.MainBundle.PathForResource("1", "caf", "AudioToolbox"); #else var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf")); #endif using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path))) { var fmt = file.ClientDataFormat; } }
public void WrapAudioFileID() { var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf")); using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path))) { Assert.IsNotNull(file.AudioFile, "#1"); ExtAudioFile f2; Assert.AreEqual(ExtAudioFileError.OK, ExtAudioFile.WrapAudioFileID(file.AudioFile.Value, true, out f2)); } }
/// <summary> /// Queues up a file to record to the next time the beat is played /// </summary> /// <param name="fileName">File name.</param> public void QueueFileRecording(string fileName) { _file = ExtAudioFile.CreateWithUrl( new Foundation.NSUrl(fileName, false), AudioFileType.WAVE, AudioStreamBasicDescription.CreateLinearPCM(), AudioFileFlags.EraseFlags, out ExtAudioFileError e ); _fileRecordingQueued = true; }
public void OpenCFUrlTest() { #if MONOMAC var path = NSBundle.MainBundle.PathForResource("1", "caf", "AudioToolbox"); #else var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf")); #endif ExtAudioFileError err; using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path), out err)) { Assert.IsTrue(err == ExtAudioFileError.OK, "OpenCFUrlTest"); Assert.IsNotNull(file.AudioFile, "OpenCFUrlTest"); } }
public void WrapAudioFileID() { #if MONOMAC var path = NSBundle.MainBundle.PathForResource("1", "caf", "AudioToolbox"); #else var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf")); #endif using (var file = ExtAudioFile.OpenUrl(CFUrl.FromFile(path))) { Assert.IsNotNull(file.AudioFile, "#1"); ExtAudioFile f2; Assert.AreEqual(ExtAudioFileError.OK, ExtAudioFile.WrapAudioFileID(file.AudioFile.Value, true, out f2)); } }
// load up audio data from the demo files into mSoundBuffer.data used in the render proc void LoadFiles() { const int FilesCount = 2; for (int i = 0; i < FilesCount; i++) { Debug.Print("Loading file #{0}", i); using (var file = ExtAudioFile.OpenUrl(sourceURL [i])) { var clientFormat = file.FileDataFormat; clientFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat; clientFormat.ChannelsPerFrame = 1; clientFormat.FramesPerPacket = 1; clientFormat.BitsPerChannel = 8 * sizeof(int); clientFormat.BytesPerPacket = clientFormat.BytesPerFrame = clientFormat.ChannelsPerFrame * sizeof(int); file.ClientDataFormat = clientFormat; // set the client format to be what we want back double rateRatio = GraphSampleRate / clientFormat.SampleRate; var numFrames = file.FileLengthFrames; numFrames = (uint)(numFrames * rateRatio); // account for any sample rate conversion Debug.Print("Number of Sample Frames after rate conversion (if any): {0}", numFrames); // set up our buffer soundBuffer[i].TotalFrames = numFrames; UInt32 samples = (uint)(numFrames * clientFormat.ChannelsPerFrame); var data_size = (int)(sizeof(uint) * samples); soundBuffer[i].Data = Marshal.AllocHGlobal(data_size); // set up a AudioBufferList to read data into var bufList = new AudioBuffers(1); bufList [0] = new AudioBuffer { NumberChannels = 1, Data = soundBuffer [i].Data, DataByteSize = data_size }; ExtAudioFileError error; file.Read((uint)numFrames, bufList, out error); if (error != ExtAudioFileError.OK) { throw new ApplicationException(); } } } }
void prepareExtAudioFile() { // Opening Audio File _extAudioFile = ExtAudioFile.OpenURL(_url); // Getting file data format _srcFormat = _extAudioFile.FileDataFormat; // Setting the channel number of the output format same to the input format _dstFormat = AudioUnitUtils.AUCanonicalASBD(_sampleRate, _srcFormat.ChannelsPerFrame); // setting reading format as audio unit cannonical format _extAudioFile.ClientDataFormat = _dstFormat; // getting total frame _totalFrames = _extAudioFile.FileLengthFrames; // Seeking to the file head _extAudioFile.Seek(0); }
void prepareExtAudioFile() { // Opening Audio File _extAudioFile = ExtAudioFile.OpenURL(_url); // Getting file data format _srcFormat = _extAudioFile.FileDataFormat; // Setting the channel number of the output format same to the input format _dstFormat = AudioUnitUtils.AUCanonicalASBD(_sampleRate, _srcFormat.ChannelsPerFrame); // setting reading format as audio unit cannonical format _extAudioFile.ClientDataFormat = _dstFormat; // getting total frame _totalFrames = _extAudioFile.FileLengthFrames; // Aloocating AudoBufferList _buffer = new AudioBufferList((uint)_srcFormat.ChannelsPerFrame, (uint)(sizeof(uint) * _totalFrames)); _numberOfChannels = _srcFormat.ChannelsPerFrame; // Reading all frame into the buffer _extAudioFile.Read((uint)_totalFrames, _buffer); }
void prepareExtAudioFile() { // Opening Audio File _extAudioFile = ExtAudioFile.OpenUrl(_url); // Getting file data format _srcFormat = _extAudioFile.FileDataFormat; // Setting the channel number of the output format same to the input format _dstFormat = AudioStreamBasicDescription.CreateLinearPCM(channelsPerFrame: (uint)_srcFormat.ChannelsPerFrame, bitsPerChannel: 32); _dstFormat.FormatFlags |= AudioFormatFlags.IsNonInterleaved; // setting reading format as audio unit cannonical format _extAudioFile.ClientDataFormat = _dstFormat; // getting total frame _totalFrames = _extAudioFile.FileLengthFrames; // Allocating AudioBufferList _buffer = new AudioBuffers(_srcFormat.ChannelsPerFrame); for (int i = 0; i < _buffer.Count; ++i) { int size = (int)(sizeof(uint) * _totalFrames); _buffer.SetData(i, Marshal.AllocHGlobal(size), size); } _numberOfChannels = _srcFormat.ChannelsPerFrame; // Reading all frame into the buffer ExtAudioFileError status; _extAudioFile.Read((uint)_totalFrames, _buffer, out status); if (status != ExtAudioFileError.OK) { throw new ApplicationException(); } }
public static bool GetDataFromExtAudioFile(ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize, byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate) { int errorStatus = 0; int bufferSizeInFrames = 0; dataBufferSize = 0; format = ALFormat.Mono16; sampleRate = 0; /* Compute how many frames will fit into our max buffer size */ bufferSizeInFrames = maxBufferSize / outputFormat.BytesPerFrame; if (dataBuffer != null) { MutableAudioBufferList audioBufferList = new MutableAudioBufferList(1, maxBufferSize); audioBufferList.Buffers [0].DataByteSize = maxBufferSize; audioBufferList.Buffers [0].NumberChannels = outputFormat.ChannelsPerFrame; // This a hack so if there is a problem speak to kjpou1 -Kenneth // the cleanest way is to copy the buffer to the pointer already allocated // but what we are going to do is replace the pointer with our own and restore it later // GCHandle meBePinned = GCHandle.Alloc(dataBuffer, GCHandleType.Pinned); IntPtr meBePointer = meBePinned.AddrOfPinnedObject(); // Let's not use copy for right now while we test this. For very large files this // might show some stutter in the sound loading //Marshal.Copy(dataBuffer, 0, audioBufferList.Buffers[0].Data, maxBufferSize); IntPtr savedDataPtr = audioBufferList.Buffers [0].Data; audioBufferList.Buffers [0].Data = meBePointer; try { // Read the data into an AudioBufferList // errorStatus here returns back the amount of information read errorStatus = ext.Read(bufferSizeInFrames, audioBufferList); if (errorStatus >= 0) { /* Success */ /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */ /* ExtAudioFile.Read returns the number of frames actually read. * Need to convert back to bytes. */ dataBufferSize = bufferSizeInFrames * outputFormat.BytesPerFrame; // Now we set our format format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16; sampleRate = outputFormat.SampleRate; } else { #if DEBUG Console.WriteLine("ExtAudioFile.Read failed, Error = " + errorStatus); #endif return(false); } } catch (Exception exc) { #if DEBUG Console.WriteLine("ExtAudioFile.Read failed: " + exc.Message); #endif return(false); } finally { // Don't forget to free our dataBuffer memory pointer that was pinned above meBePinned.Free(); // and restore what was allocated to beginwith audioBufferList.Buffers[0].Data = savedDataPtr; } } return(true); }
void PrepareExtAudioFile() { extAudioFile = ExtAudioFile.OpenUrl(url); CheckValue (extAudioFile, "ExtAudioFile.OpenUrl failed"); srcFormat = extAudioFile.FileDataFormat; // This is how you say,“When you convert the data, this is the format I’d like to receive.” // The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats. extAudioFile.ClientDataFormat = dstFormat; // getting total frame TotalFrames = extAudioFile.FileLengthFrames; // Allocating AudioBufferList buffer = new AudioBuffers(srcFormat.ChannelsPerFrame); for (int i = 0; i < buffer.Count; ++i) { int size = (int)(sizeof(int) * TotalFrames); buffer.SetData(i, Marshal.AllocHGlobal(size), size); } numberOfChannels = srcFormat.ChannelsPerFrame; // Reading all frame into the buffer ExtAudioFileError status; extAudioFile.Read((uint)TotalFrames, buffer, out status); if (status != ExtAudioFileError.OK) throw new ApplicationException(); }
public static bool GetDataFromExtAudioFile (ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize, byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate) { uint errorStatus = 0; uint bufferSizeInFrames = 0; dataBufferSize = 0; format = ALFormat.Mono16; sampleRate = 0; /* Compute how many frames will fit into our max buffer size */ bufferSizeInFrames = (uint)(maxBufferSize / outputFormat.BytesPerFrame); if (dataBuffer != null) { var audioBufferList = new AudioBuffers(maxBufferSize); // This a hack so if there is a problem speak to kjpou1 -Kenneth // the cleanest way is to copy the buffer to the pointer already allocated // but what we are going to do is replace the pointer with our own and restore it later // GCHandle meBePinned = GCHandle.Alloc (dataBuffer, GCHandleType.Pinned); IntPtr meBePointer = meBePinned.AddrOfPinnedObject (); audioBufferList.SetData (0, meBePointer); try { // Read the data into an AudioBufferList // errorStatus here returns back the amount of information read ExtAudioFileError extAudioFileError = ExtAudioFileError.OK; errorStatus = ext.Read (bufferSizeInFrames, audioBufferList, out extAudioFileError); if (errorStatus >= 0) { /* Success */ /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */ /* ExtAudioFile.Read returns the number of frames actually read. * Need to convert back to bytes. */ dataBufferSize = (int)bufferSizeInFrames * outputFormat.BytesPerFrame; // Now we set our format format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16; sampleRate = outputFormat.SampleRate; } else { #if DEBUG Console.WriteLine ("ExtAudioFile.Read failed, Error = " + errorStatus); #endif return false; } } catch (Exception exc) { #if DEBUG Console.WriteLine ("ExtAudioFile.Read failed: " + exc.Message); #endif return false; } finally { // Don't forget to free our dataBuffer memory pointer that was pinned above meBePinned.Free (); // and restore what was allocated to beginwith audioBufferList.SetData (0, IntPtr.Zero); } } return true; }
public static bool GetDataFromExtAudioFile (ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize, byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate) { int errorStatus = 0; int bufferSizeInFrames = 0; dataBufferSize = 0; format = ALFormat.Mono16; sampleRate = 0; /* Compute how many frames will fit into our max buffer size */ bufferSizeInFrames = maxBufferSize / outputFormat.BytesPerFrame; if (dataBuffer != null) { MutableAudioBufferList audioBufferList = new MutableAudioBufferList (1, maxBufferSize); audioBufferList.Buffers [0].DataByteSize = maxBufferSize; audioBufferList.Buffers [0].NumberChannels = outputFormat.ChannelsPerFrame; // This a hack so if there is a problem speak to kjpou1 -Kenneth // the cleanest way is to copy the buffer to the pointer already allocated // but what we are going to do is replace the pointer with our own and restore it later // GCHandle meBePinned = GCHandle.Alloc (dataBuffer, GCHandleType.Pinned); IntPtr meBePointer = meBePinned.AddrOfPinnedObject (); // Let's not use copy for right now while we test this. For very large files this // might show some stutter in the sound loading //Marshal.Copy(dataBuffer, 0, audioBufferList.Buffers[0].Data, maxBufferSize); IntPtr savedDataPtr = audioBufferList.Buffers [0].Data; audioBufferList.Buffers [0].Data = meBePointer; try { // Read the data into an AudioBufferList // errorStatus here returns back the amount of information read errorStatus = ext.Read (bufferSizeInFrames, audioBufferList); if (errorStatus >= 0) { /* Success */ /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */ /* ExtAudioFile.Read returns the number of frames actually read. * Need to convert back to bytes. */ dataBufferSize = bufferSizeInFrames * outputFormat.BytesPerFrame; // Now we set our format format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16; sampleRate = outputFormat.SampleRate; } else { #if DEBUG Console.WriteLine ("ExtAudioFile.Read failed, Error = " + errorStatus); #endif return false; } } catch (Exception exc) { #if DEBUG Console.WriteLine ("ExtAudioFile.Read failed: " + exc.Message); #endif return false; } finally { // Don't forget to free our dataBuffer memory pointer that was pinned above meBePinned.Free (); // and restore what was allocated to beginwith audioBufferList.Buffers[0].Data = savedDataPtr; } } return true; }
public static bool GetDataFromExtAudioFile(ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize, byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate) { uint errorStatus = 0; uint bufferSizeInFrames = 0; dataBufferSize = 0; format = ALFormat.Mono16; sampleRate = 0; /* Compute how many frames will fit into our max buffer size */ bufferSizeInFrames = (uint)(maxBufferSize / outputFormat.BytesPerFrame); if (dataBuffer != null) { var audioBufferList = new AudioBuffers(maxBufferSize); // This a hack so if there is a problem speak to kjpou1 -Kenneth // the cleanest way is to copy the buffer to the pointer already allocated // but what we are going to do is replace the pointer with our own and restore it later // GCHandle meBePinned = GCHandle.Alloc(dataBuffer, GCHandleType.Pinned); IntPtr meBePointer = meBePinned.AddrOfPinnedObject(); audioBufferList.SetData(0, meBePointer); try { // Read the data into an AudioBufferList // errorStatus here returns back the amount of information read ExtAudioFileError extAudioFileError = ExtAudioFileError.OK; errorStatus = ext.Read(bufferSizeInFrames, audioBufferList, out extAudioFileError); if (errorStatus >= 0) { /* Success */ /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */ /* ExtAudioFile.Read returns the number of frames actually read. * Need to convert back to bytes. */ dataBufferSize = (int)bufferSizeInFrames * outputFormat.BytesPerFrame; // Now we set our format format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16; sampleRate = outputFormat.SampleRate; } else { #if DEBUG Console.WriteLine("ExtAudioFile.Read failed, Error = " + errorStatus); #endif return(false); } } catch (Exception exc) { #if DEBUG Console.WriteLine("ExtAudioFile.Read failed: " + exc.Message); #endif return(false); } finally { // Don't forget to free our dataBuffer memory pointer that was pinned above meBePinned.Free(); // and restore what was allocated to beginwith audioBufferList.SetData(0, IntPtr.Zero); } } return(true); }
unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl) { AudioStreamBasicDescription dataFormat; AudioQueueBuffer * buffer = null; long currentPacket = 0; int packetsToRead = 0; AudioStreamPacketDescription[] packetDescs = null; bool flushed = false; bool done = false; int bufferSize; using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) { dataFormat = audioFile.StreamBasicDescription; using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) { queue.BufferCompleted += (sender, e) => { HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs); }; // we need to calculate how many packets we read at a time and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0; // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger // adjust buffer size to represent about a second of audio based on this format CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead); if (isVBR) { packetDescs = new AudioStreamPacketDescription [packetsToRead]; } else { packetDescs = null; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) } if (audioFile.MagicCookie.Length != 0) { queue.MagicCookie = audioFile.MagicCookie; } // allocate the input read buffer queue.AllocateBuffer(bufferSize, out buffer); // prepare the capture format var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32); captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4; queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout); // prepare the target format var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame); using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) { captureFile.ClientDataFormat = captureFormat; int captureBufferSize = bufferSize / 2; AudioBuffers captureABL = new AudioBuffers(1); AudioQueueBuffer *captureBuffer; queue.AllocateBuffer(captureBufferSize, out captureBuffer); captureABL [0] = new AudioBuffer() { Data = captureBuffer->AudioData, NumberChannels = captureFormat.ChannelsPerFrame }; queue.Start(); double ts = 0; queue.RenderOffline(ts, captureBuffer, 0); HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs); while (true) { int reqFrames = captureBufferSize / captureFormat.BytesPerFrame; queue.RenderOffline(ts, captureBuffer, reqFrames); captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize); var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame; // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes", // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize); captureFile.WriteAsync((uint)writeFrames, captureABL); if (flushed) { break; } ts += writeFrames; } CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false); } } } }
/// <summary> /// Renders the given number of seconds to the given wav file /// </summary> /// <param name="fileName">File name.</param> /// <param name="seconds">Seconds.</param> public void RenderToFile(string fileName, double seconds) { long samples = (long)(seconds * Metronome.SampleRate); var inputStream = MixerNode.GetAudioFormat(AudioUnitScopeType.Output); var outputStream = AudioStreamBasicDescription.CreateLinearPCM(44100, 2); AudioConverter converter = AudioConverter.Create(inputStream, outputStream); var file = ExtAudioFile.CreateWithUrl( new Foundation.NSUrl(fileName, false), AudioFileType.WAVE, outputStream, AudioFileFlags.EraseFlags, out ExtAudioFileError e ); long samplesRead = 0; // initialize the buffers var buffers = new AudioBuffers(2); buffers[0] = new AudioBuffer() { DataByteSize = BufferSize * 4, NumberChannels = 1, Data = Marshal.AllocHGlobal(sizeof(float) * BufferSize) }; buffers[1] = new AudioBuffer() { DataByteSize = BufferSize * 4, NumberChannels = 1, Data = Marshal.AllocHGlobal(sizeof(float) * BufferSize) }; var convBuffers = new AudioBuffers(1); convBuffers[0] = new AudioBuffer() { DataByteSize = BufferSize * 4, NumberChannels = 2, Data = Marshal.AllocHGlobal(sizeof(float) * BufferSize) }; while (samples > 0) { int numSamples = (int)(Math.Min(BufferSize, samples)); // get samples from the mixer Render((uint)numSamples, buffers, samplesRead); // conver to the file's format converter.ConvertComplexBuffer(numSamples, buffers, convBuffers); // write samples to the file var error = file.Write((uint)numSamples, convBuffers); if (error != ExtAudioFileError.OK) { throw new ApplicationException(); } samples -= BufferSize; samplesRead += numSamples; } buffers.Dispose(); convBuffers.Dispose(); converter.Dispose(); file.Dispose(); }
public static ExtAudioFileError WrapAudioFileID(IntPtr audioFileID, bool forWriting, out ExtAudioFile outAudioFile) { IntPtr ptr; ExtAudioFileError res; unsafe { res = ExtAudioFileWrapAudioFileID (audioFileID, forWriting, (IntPtr)(&ptr)); } if (res != ExtAudioFileError.OK) { outAudioFile = null; return res; } outAudioFile = new ExtAudioFile (ptr); return res; }