예제 #1
0
        static int oldRenderCallback(IntPtr inRefCon,
                                     ref AudioUnitRenderActionFlags _ioActionFlags,
                                     ref AudioTimeStamp _inTimeStamp,
                                     int _inBusNumber,
                                     int _inNumberFrames,
                                     AudioBufferList _ioData)
        {
            // getting audiounit instance
            var handler = GCHandle.FromIntPtr(inRefCon);
            var inst    = (AUGraph)handler.Target;

            // invoke event handler with an argument
            if (inst.RenderCallback != null)
            {
                var args = new AudioGraphEventArgs(
                    _ioActionFlags,
                    _inTimeStamp,
                    _inBusNumber,
                    _inNumberFrames,
                    _ioData);
                inst.RenderCallback(inst, args);
            }

            return(0);            // noerror
        }
예제 #2
0
        public void WriteAsync(int numberFrames, AudioBufferList data)
        {
            IntPtr buffer = data == null ? IntPtr.Zero : data.ToPointer();
            int    err    = ExtAudioFileWriteAsync(_extAudioFile, numberFrames, buffer);

            // Try not to leak unmanaged pointer buffer
            if (last_async_write_buffer_ptr != IntPtr.Zero)
            {
                Marshal.FreeHGlobal(last_async_write_buffer_ptr);
                last_async_write_buffer_ptr = IntPtr.Zero;
            }

            if (err != 0)
            {
                if (buffer != IntPtr.Zero)
                {
                    Marshal.FreeHGlobal(buffer);
                }
                throw new ArgumentException(String.Format("Error code:{0}", err));
            }

            if (buffer != IntPtr.Zero)
            {
                last_async_write_buffer_ptr = buffer;
            }
        }
        public MTAudioProcessingTapError GetSourceAudio(long frames, ref AudioBufferList bufferList, out MTAudioProcessingTapFlags flags,
                                                        out CMTimeRange timeRange, long framesProvided)
        {
            int outFp;

            return(MTAudioProcessingTapGetSourceAudio(handle, (int)frames, ref bufferList, out flags, out timeRange, out outFp));
        }
 static extern int AudioConverterFillComplexBuffer(
     IntPtr inAudioConverter,
     AudioConverterComplexInputDataProc inInputDataProc,
     IntPtr inInputDataProcUserData,
     ref uint ioOutputDataPacketSize,
     AudioBufferList outOutputData,
     AudioStreamPacketDescription[] outPacketDescription);
예제 #5
0
 public AudioGraphEventArgs(AudioUnitRenderActionFlags actionFlags,
                            MonoMac.AudioToolbox.AudioTimeStamp timeStamp,
                            int busNumber,
                            int numberFrames,
                            AudioBufferList data)
     : base(actionFlags, timeStamp, busNumber, numberFrames, data)
 {
 }
 public _AudioConverterEventArgs(
     uint _NumberDataPackets,
     AudioBufferList _Data,
     MonoTouch.AudioToolbox.AudioStreamPacketDescription[] _DataPacketDescription)
 {
     NumberDataPackets = _NumberDataPackets;
     Data = _Data;
     DataPacketDescription = _DataPacketDescription;
 }
예제 #7
0
        public void WriteAsync(int numberFrames, AudioBufferList data)
        {
            int err = ExtAudioFileWriteAsync(_extAudioFile, numberFrames, data);

            if (err != 0)
            {
                throw new ArgumentException(String.Format("Error code:{0}", err));
            }
        }
 public _AudioConverterEventArgs(
     uint _NumberDataPackets,
     AudioBufferList _Data,
     AudioStreamPacketDescription[] _DataPacketDescription)
 {
     NumberDataPackets = _NumberDataPackets;
     Data = _Data;
     DataPacketDescription = _DataPacketDescription;
 }
예제 #9
0
        public int Read(int numberFrames, AudioBufferList data)
        {
            int err = ExtAudioFileRead(_extAudioFile, ref numberFrames, data);

            if (err != 0)
            {
                throw new ArgumentException(String.Format("Error code:{0}", err));
            }

            return(numberFrames);
        }
예제 #10
0
 public AudioUnitStatus TryRender(AudioUnitRenderActionFlags flags,
                                  AudioTimeStamp timeStamp,
                                  int outputBusnumber,
                                  int numberFrames, AudioBufferList data)
 {
     return((AudioUnitStatus)AudioUnitRender(handle,
                                             ref flags,
                                             ref timeStamp,
                                             outputBusnumber,
                                             numberFrames,
                                             data));
 }
예제 #11
0
 public AudioUnitEventArgs(AudioUnitRenderActionFlags actionFlags,
                           AudioTimeStamp timestamp,
                           int busNumber,
                           int frames,
                           AudioBufferList data)
 {
     ActionFlags    = actionFlags;
     this.TimeStamp = timestamp;
     BusNumber      = busNumber;
     NumberFrames   = frames;
     Data           = data;
 }
        public void FillBuffer(AudioBufferList data, uint numberFrames, AudioStreamPacketDescription[] packetDescs)
        {
            uint numPackets = numberFrames;
            int  err        = AudioConverterFillComplexBuffer(
                _audioConverter,
                complexInputDataProc,
                GCHandle.ToIntPtr(_handle),
                ref numPackets,
                data,
                packetDescs);

            if (err != 0 || numPackets == 0)
            {
                throw new InvalidOperationException(String.Format("Error code:{0}", err));
            }
        }
예제 #13
0
        public void Render(AudioUnitRenderActionFlags flags,
                           AudioTimeStamp timeStamp,
                           int outputBusnumber,
                           int numberFrames, AudioBufferList data)
        {
            int err = AudioUnitRender(handle,
                                      ref flags,
                                      ref timeStamp,
                                      outputBusnumber,
                                      numberFrames,
                                      data);

            if (err != 0)
            {
                throw new AudioUnitException(err);
            }
        }
예제 #14
0
        public int Read(int numberFrames, AudioBufferList data)
        {
            if (data == null)
            {
                throw new ArgumentNullException("data");
            }

            var ptr = data.ToPointer();
            int err = ExtAudioFileRead(_extAudioFile, ref numberFrames, ptr);

            Marshal.FreeHGlobal(ptr);
            if (err != 0)
            {
                throw new ArgumentException(String.Format("Error code:{0}", err));
            }

            return(numberFrames);
        }
        /// <summary>
        /// Initialise capture devices and connect the appropriate buffers
        /// </summary>
        private void CaptureSetup()
        {
            // Try setting up the capture devices.
            if (config.Audio.CaptureDeviceNumber >= 0)
            {
                InitAudioCapture();
            }
            if (config.Video.CaptureDeviceNumber >= 0)
            {
                InitVideoCapture();
            }

            // Set up buffers
            if (mic != null)
            {
                AudioBuffers        = new AudioBufferList(config);
                mic.FrameAvailable += new EventHandler <AudioDataEventArgs>(AudioBuffers.HandleCapturedSamples);
            }
            else if (config.Audio.CaptureDeviceNumber == -2)                 // dummy mode: buffers but no capture
            {
                AudioBuffers = new AudioBufferList(config);
            }

            if (cam != null)
            {
                ImageBuffers        = new ImageBufferList(config);
                cam.TargetFrameSize = new Size(config.EncoderSettings.OutputWidth, config.EncoderSettings.OutputHeight);
                cam.FrameAvailable += new EventHandler <VideoDataEventArgs>(ImageBuffers.HandleCapturedFrame);
            }
            else if (config.Video.CaptureDeviceNumber == -2)                 // dummy mode: buffers but no capture
            {
                ImageBuffers = new ImageBufferList(config);
            }

            if (ImageBuffers == null && AudioBuffers == null)
            {
                throw new Exception("Neither Audio or Video capture was specified");
            }
        }
예제 #16
0
        static int device_renderCallback(IntPtr inRefCon,
                                         ref AudioUnit.AudioUnitRenderActionFlags _ioActionFlags,
                                         ref AudioTimeStamp _inTimeStamp,
                                         uint _inBusNumber,
                                         uint _inNumberFrames,
                                         AudioBufferList _ioData)
        {
            System.Diagnostics.Debug.WriteLine("o");

            var handler = GCHandle.FromIntPtr(inRefCon);
            var inst    = (RemoteOutput)handler.Target;
            var waveDef = inst._waveDef[_inBusNumber];

            double dphai = 2 * Math.PI * waveDef.frequency / waveDef.sampleRate;
            double phase = waveDef.phase;

            // Getting a pointer to a buffer to be filled
            IntPtr outL = _ioData.mBuffers[0].mData;
            IntPtr outR = _ioData.mBuffers[1].mData;

            // filling sin waveform.
            // AudioUnitSampleType is different between a simulator (float32) and a real device (int32).
            unsafe
            {
                var outLPtr = (int *)outL.ToPointer();
                var outRPtr = (int *)outR.ToPointer();
                for (int i = 0; i < _inNumberFrames; i++)
                {
                    int sample    = (int)(Math.Sin(phase) * int.MaxValue / 128); // signal waveform format is fixed-point (8.24)
                    *   outLPtr++ = sample;
                    *   outRPtr++ = sample;
                    phase += dphai;
                }
            }
            waveDef.phase = phase % (2 * Math.PI);
            return(0);
        }
        static int complexInputDataProc(
            IntPtr inAudioConverrter,
            ref uint ioNumberDataPackets,
            AudioBufferList ioData,
            ref AudioStreamPacketDescription[] outDataPacketDescription, //AudioStreamPacketDescription**
            IntPtr inUserData
            )
        {
            // getting audiounit instance
            var handler = GCHandle.FromIntPtr(inUserData);
            var inst    = (_AudioConverter)handler.Target;

            // evoke event handler with an argument
            if (inst.EncoderCallback != null)
            {
                var args = new _AudioConverterEventArgs(
                    ioNumberDataPackets,
                    ioData,
                    outDataPacketDescription);
                inst.EncoderCallback(inst, args);
            }

            return(0); // noerror
        }
예제 #18
0
        void prepareExtAudioFile()
        {
            // Opening Audio File
            _extAudioFile = ExtAudioFile.OpenURL(_url);

            // Getting file data format
            _srcFormat = _extAudioFile.FileDataFormat;

            // Setting the channel number of the output format same to the input format
            _dstFormat = AudioUnitUtils.AUCanonicalASBD(_sampleRate, _srcFormat.ChannelsPerFrame);

            // setting reading format as audio unit cannonical format
            _extAudioFile.ClientDataFormat = _dstFormat;

            // getting total frame
            _totalFrames = _extAudioFile.FileLengthFrames;

            // Aloocating AudoBufferList
            _buffer           = new AudioBufferList((uint)_srcFormat.ChannelsPerFrame, (uint)(sizeof(uint) * _totalFrames));
            _numberOfChannels = _srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            _extAudioFile.Read((uint)_totalFrames, _buffer);
        }
예제 #19
0
 static extern int ExtAudioFileRead(IntPtr  inExtAudioFile, ref int ioNumberFrames, AudioBufferList ioData);
예제 #20
0
 static extern int /* OSStatus */ ExtAudioFileWriteAsync(IntPtr inExtAudioFile, int /* UInt32 */ inNumberFrames, AudioBufferList ioData);
예제 #21
0
 static extern int ExtAudioFileRead(IntPtr inExtAudioFile, ref int /* UInt32* */ ioNumberFrames, AudioBufferList ioData);
        /// <summary>
        /// Initialise capture devices and connect the appropriate buffers
        /// </summary>
        private void CaptureSetup()
        {
            // Try setting up the capture devices.
            if (config.Audio.CaptureDeviceNumber >= 0) InitAudioCapture();
            if (config.Video.CaptureDeviceNumber >= 0) InitVideoCapture();

            // Set up buffers
            if (mic != null) {
                AudioBuffers = new AudioBufferList(config);
                mic.FrameAvailable += new EventHandler<AudioDataEventArgs>(AudioBuffers.HandleCapturedSamples);
            } else if (config.Audio.CaptureDeviceNumber == -2) { // dummy mode: buffers but no capture
                AudioBuffers = new AudioBufferList(config);
            }

            if (cam != null) {
                ImageBuffers = new ImageBufferList(config);
                cam.TargetFrameSize = new Size(config.EncoderSettings.OutputWidth, config.EncoderSettings.OutputHeight);
                cam.FrameAvailable += new EventHandler<VideoDataEventArgs>(ImageBuffers.HandleCapturedFrame);
            } else if (config.Video.CaptureDeviceNumber == -2) { // dummy mode: buffers but no capture
                ImageBuffers = new ImageBufferList(config);
            }

            if (ImageBuffers == null && AudioBuffers == null) throw new Exception("Neither Audio or Video capture was specified");
        }
예제 #23
0
        unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl)
        {
            AudioStreamBasicDescription dataFormat;
            AudioQueueBuffer *          buffer = null;
            long currentPacket = 0;
            int  packetsToRead = 0;

            AudioStreamPacketDescription [] packetDescs = null;
            bool flushed = false;
            bool done    = false;
            int  bufferSize;

            using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) {
                dataFormat = audioFile.StreamBasicDescription;

                using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.CFRunLoopCommonModes)) {
                    queue.OutputCompleted += (sender, e) =>
                    {
                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
                    };

                    // we need to calculate how many packets we read at a time and how big a buffer we need
                    // we base this on the size of the packets in the file and an approximate duration for each buffer
                    bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;

                    // first check to see what the max size of a packet is - if it is bigger
                    // than our allocation default size, that needs to become larger
                    // adjust buffer size to represent about a second of audio based on this format
                    CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);

                    if (isVBR)
                    {
                        packetDescs = new AudioStreamPacketDescription [packetsToRead];
                    }
                    else
                    {
                        packetDescs = null;                         // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                    }

                    if (audioFile.MagicCookie.Length != 0)
                    {
                        queue.MagicCookie = audioFile.MagicCookie;
                    }

                    // allocate the input read buffer
                    queue.AllocateBuffer(bufferSize, out buffer);

                    // prepare the capture format
                    AudioStreamBasicDescription captureFormat;
                    captureFormat.SampleRate  = dataFormat.SampleRate;
                    captureFormat.Format      = AudioFormatType.LinearPCM;
                    captureFormat.FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked |
                                                (AudioFormatFlags)(24 << (int)AudioFormatFlags.LinearPCMSampleFractionShift);
                    captureFormat.ChannelsPerFrame = dataFormat.ChannelsPerFrame;
                    captureFormat.FramesPerPacket  = 1;
                    captureFormat.BitsPerChannel   = 32;
                    captureFormat.BytesPerPacket   = dataFormat.ChannelsPerFrame * 4;
                    captureFormat.BytesPerFrame    = captureFormat.BytesPerPacket;

                    queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout);

                    // prepare the target format
                    AudioStreamBasicDescription dstFormat;
                    dstFormat.SampleRate       = dataFormat.SampleRate;
                    dstFormat.ChannelsPerFrame = dataFormat.ChannelsPerFrame;
                    dstFormat.Format           = AudioFormatType.LinearPCM;
                    dstFormat.FormatFlags      = AudioFormatFlags.IsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
                    dstFormat.BitsPerChannel   = 16;
                    dstFormat.BytesPerPacket   = 2 * dstFormat.ChannelsPerFrame;
                    dstFormat.BytesPerFrame    = dstFormat.BytesPerPacket;
                    dstFormat.FramesPerPacket  = 1;

                    using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
                        captureFile.ClientDataFormat = captureFormat;

                        int captureBufferSize = bufferSize / 2;
                        var captureABL        = new AudioBufferList(1);

                        AudioQueueBuffer *captureBuffer;
                        queue.AllocateBuffer(captureBufferSize, out captureBuffer);

                        captureABL.Buffers [0].Data           = captureBuffer->AudioData;
                        captureABL.Buffers [0].NumberChannels = captureFormat.ChannelsPerFrame;

                        queue.Start();

                        double ts = 0;
                        queue.RenderOffline(ts, captureBuffer, 0);

                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);

                        while (true)
                        {
                            int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;

                            queue.RenderOffline(ts, captureBuffer, reqFrames);

                            captureABL.Buffers [0].Data         = captureBuffer->AudioData;
                            captureABL.Buffers [0].DataByteSize = (int)captureBuffer->AudioDataByteSize;
                            int writeFrames = captureABL.Buffers [0].DataByteSize / captureFormat.BytesPerFrame;

                            // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes",
                            //	ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);

                            captureFile.WriteAsync(writeFrames, captureABL);

                            if (flushed)
                            {
                                break;
                            }

                            ts += writeFrames;
                        }

                        CFRunLoop.Current.RunInMode(CFRunLoop.CFDefaultRunLoopMode, 1, false);
                    }
                }
            }
        }
 extern static /* OSStatus */ MTAudioProcessingTapError MTAudioProcessingTapGetSourceAudio(
     /* MTAudioProcessingTapRef */ IntPtr tap, int numberFrames,
     /* AudioBufferList* */ ref AudioBufferList bufferListInOut,
     out MTAudioProcessingTapFlags flagsOut, out CMTimeRange timeRangeOut, out int numberFramesOut);
예제 #25
0
 static extern int ExtAudioFileWriteAsync(IntPtr inExtAudioFile, int inNumberFrames, AudioBufferList ioData);
예제 #26
0
 static extern int AudioUnitRender(IntPtr inUnit,
                                   ref AudioUnitRenderActionFlags ioActionFlags,
                                   ref AudioTimeStamp inTimeStamp,
                                   int inOutputBusNumber,
                                   int inNumberFrames,
                                   AudioBufferList ioData);