Example #1
0
            unsafe void UpdateVolumes(AudioBuffers bufferList, nint numberFrames)
            {
                // Calculate root mean square (RMS) for left and right audio channel.
                // http://en.wikipedia.org/wiki/Root_mean_square
                for (int i = 0; i < bufferList.Count; i++)
                {
                    var  pBuffer  = bufferList[i];
                    long cSamples = numberFrames * (context.IsNonInterleaved ? 1 : pBuffer.NumberChannels);

                    var pData = (float *)(void *)pBuffer.Data;

                    float rms = 0;
                    for (var j = 0; j < cSamples; j++)
                    {
                        rms += pData[j] * pData[j];
                    }

                    if (cSamples > 0)
                    {
                        rms = (float)Math.Sqrt(rms / cSamples);
                    }

                    if (i == 0)
                    {
                        context.LeftChannelVolume = rms;
                    }
                    if (i == 1 || (i == 1 && bufferList.Count == 1))
                    {
                        context.RightChannelVolume = rms;
                    }
                }

                // Pass calculated left and right channel volume to VU meters.
                UpdateVolumes(context.LeftChannelVolume, context.RightChannelVolume);
            }
        // Input data proc callback
        AudioConverterError EncoderDataProc(ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
        {
            // figure out how much to read
            int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;

            if (numberDataPackets > maxPackets)
            {
                numberDataPackets = maxPackets;
            }

            // read from the file
            int outNumBytes = 16384;

            // modified for iOS7 (ReadPackets depricated)
            afio.PacketDescriptions = afio.SourceFile.ReadPacketData(false, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer, ref outNumBytes);

            if (afio.PacketDescriptions.Length == 0)
            {
                throw new ApplicationException(afio.PacketDescriptions.ToString());
            }

            // advance input file packet position
            afio.SrcFilePos += numberDataPackets;

            // put the data pointer into the buffer list
            data.SetData(0, afio.SrcBuffer, outNumBytes);

            // don't forget the packet descriptions if required
            if (dataPacketDescription != null)
            {
                dataPacketDescription = afio.PacketDescriptions;
            }

            return(AudioConverterError.None);
        }
Example #3
0
            unsafe void TapProcess(MTAudioProcessingTap tap, nint numberFrames, MTAudioProcessingTapFlags flags,
                                   AudioBuffers bufferList,
                                   out nint numberFramesOut,
                                   out MTAudioProcessingTapFlags flagsOut)
            {
                numberFramesOut = 0;
                flagsOut        = (MTAudioProcessingTapFlags)0;

                // Skip processing when format not supported.
                if (!context.SupportedTapProcessingFormat)
                {
                    Console.WriteLine("Unsupported tap processing format.");
                    return;
                }

                if (IsBandpassFilterEnabled)
                {
                    // Apply bandpass filter Audio Unit.
                    if (context.AudioUnit != null)
                    {
                        var audioTimeStamp = new AudioTimeStamp
                        {
                            SampleTime = context.SampleCount,
                            Flags      = AudioTimeStamp.AtsFlags.SampleTimeValid
                        };

                        var f      = (AudioUnitRenderActionFlags)0;
                        var status = context.AudioUnit.Render(ref f, audioTimeStamp, 0, (uint)numberFrames, bufferList);
                        if (status != AudioUnitStatus.NoError)
                        {
                            Console.WriteLine("AudioUnitRender(): {0}", status);
                            return;
                        }

                        // Increment sample count for audio unit.
                        context.SampleCount += numberFrames;

                        // Set number of frames out.
                        numberFramesOut = numberFrames;
                    }
                }
                else
                {
                    // Get actual audio buffers from MTAudioProcessingTap (AudioUnitRender() will fill bufferListInOut otherwise).
                    CMTimeRange tr;
                    var         status = tap.GetSourceAudio(numberFrames, bufferList, out flagsOut, out tr, out numberFramesOut);
                    if (status != MTAudioProcessingTapError.None)
                    {
                        Console.WriteLine("MTAudioProcessingTapGetSourceAudio: {0}", status);
                        return;
                    }
                }
                try
                {
                    UpdateVolumes(bufferList, numberFrames);
                }
                catch (Exception ex)
                {
                }
            }
        public unsafe void ConvertData(AudioBuffers audioBufferList, uint frames, float[][] buffers, out AudioStreamPacketDescription packetDescriptions)
        {
            var    totalArrays = buffers.Length;
            var    arraySize   = buffers [0].Length;
            var    pSize       = Marshal.SizeOf(typeof(IntPtr));
            IntPtr pDataBuffers;
            IntPtr pPointerBuffers;

            IntPtrHelper.ConvertToIntPtr <float> (buffers, out pDataBuffers, out pPointerBuffers);

            _ConvertData((IntPtr)audioBufferList, frames, pPointerBuffers, out packetDescriptions);

            for (int i = 0; i < totalArrays; i++)
            {
                var pArray = Marshal.ReadIntPtr(IntPtr.Add(pPointerBuffers, i * pSize));

                fixed(float *arrAddr = &buffers[i][0])
                {
                    IntPtrHelper.MemoryCopy((IntPtr)arrAddr, pArray, (nuint)arraySize);
                }
            }

            Marshal.FreeHGlobal(pDataBuffers);
            Marshal.FreeHGlobal(pPointerBuffers);
        }
        void PrepareExtAudioFile()
        {
            extAudioFile = ExtAudioFile.OpenUrl(url);
            CheckValue(extAudioFile, "ExtAudioFile.OpenUrl failed");

            srcFormat = extAudioFile.FileDataFormat;

            // This is how you say,“When you convert the data, this is the format I’d like to receive.”
            // The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats.
            extAudioFile.ClientDataFormat = dstFormat;

            // getting total frame
            TotalFrames = extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
            buffer = new AudioBuffers(srcFormat.ChannelsPerFrame);
            for (int i = 0; i < buffer.Count; ++i)
            {
                int size = (int)(sizeof(int) * TotalFrames);
                buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
            numberOfChannels = srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;

            extAudioFile.Read((uint)TotalFrames, buffer, out status);
            if (status != ExtAudioFileError.OK)
            {
                throw new ApplicationException();
            }
        }
Example #6
0
        static AudioUnitStatus renderCallback(IntPtr inRefCon,
                                              ref AudioUnitRenderActionFlags _ioActionFlags,
                                              ref AudioTimeStamp _inTimeStamp,
                                              uint _inBusNumber,
                                              uint _inNumberFrames,
                                              IntPtr _ioData)
        {
            // getting audiounit instance
            var handler = GCHandle.FromIntPtr(inRefCon);
            var inst    = (AUGraph)handler.Target;
            HashSet <RenderDelegate> renderers = inst.graphUserCallbacks;

            if (renderers.Count != 0)
            {
                using (var buffers = new AudioBuffers(_ioData)) {
                    foreach (RenderDelegate renderer in renderers)
                    {
                        renderer(_ioActionFlags, _inTimeStamp, _inBusNumber, _inNumberFrames, buffers);
                    }
                    return(AudioUnitStatus.OK);
                }
            }

            return(AudioUnitStatus.InvalidParameter);
        }
        public void AllocateRenderResources(uint inMaxFrames)
        {
            MaxFrames = inMaxFrames;
            pcmBuffer = new AVAudioPcmBuffer(Bus.Format, MaxFrames);

            OriginalAudioBufferList = pcmBuffer.AudioBufferList;
            MutableAudioBufferList  = pcmBuffer.MutableAudioBufferList;
        }
        protected void LoadAudioFile(StreamInfoProvider info)
        {
            // get the path to the file
            string path;

            if (info.IsInternal)
            {
                path = NSBundle.MainBundle.PathForSoundResource(info.Uri);
            }
            else
            {
                // file path is the Uri for user sources
                path = info.Uri;
            }

            using (var url = CFUrl.FromFile(path))
            {
                using (var file = ExtAudioFile.OpenUrl(url))
                {
                    var clientFormat = file.FileDataFormat;
                    clientFormat.FormatFlags       = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat;
                    clientFormat.ChannelsPerFrame  = 1;
                    clientFormat.FramesPerPacket   = 1;
                    clientFormat.BitsPerChannel    = 8 * sizeof(float);
                    clientFormat.BytesPerPacket    =
                        clientFormat.BytesPerFrame = clientFormat.ChannelsPerFrame * sizeof(float);

                    file.ClientDataFormat = clientFormat;

                    double rateRatio = Metronome.SampleRate / clientFormat.SampleRate;

                    var numFrames = file.FileLengthFrames;
                    numFrames = (uint)(numFrames * rateRatio);

                    TotalFrames = numFrames;

                    UInt32 samples  = (uint)(numFrames * clientFormat.ChannelsPerFrame);
                    var    dataSize = (int)(sizeof(uint) * samples);
                    Data = Marshal.AllocHGlobal(dataSize);

                    // set up a AudioBufferList to read data into
                    var bufList = new AudioBuffers(1);
                    bufList[0] = new AudioBuffer
                    {
                        NumberChannels = 1,
                        Data           = Data,
                        DataByteSize   = dataSize
                    };

                    ExtAudioFileError error;
                    file.Read((uint)numFrames, bufList, out error);
                    if (error != ExtAudioFileError.OK)
                    {
                        throw new ApplicationException();
                    }
                }
            }
        }
        public MTAudioProcessingTapError GetSourceAudio(int frames, AudioBuffers bufferList, out MTAudioProcessingTapFlags flags, out CMTimeRange timeRange, out int framesProvided)
        {
            if (bufferList == null)
            {
                throw new ArgumentNullException("bufferList");
            }

            return(MTAudioProcessingTapGetSourceAudio(handle, frames, (IntPtr)bufferList, out flags, out timeRange, out framesProvided));
        }
Example #10
0
        public ExtAudioFileError Write(uint numberFrames, AudioBuffers audioBufferList)
        {
            if (audioBufferList == null)
            {
                throw new ArgumentNullException("audioBufferList");
            }

            return(ExtAudioFileWrite(_extAudioFile, numberFrames, (IntPtr)audioBufferList));
        }
Example #11
0
        public ExtAudioFileError Write(uint numberFrames, AudioBuffers audioBufferList)
        {
            if (audioBufferList is null)
            {
                ObjCRuntime.ThrowHelper.ThrowArgumentNullException(nameof(audioBufferList));
            }

            return(ExtAudioFileWrite(_extAudioFile, numberFrames, (IntPtr)audioBufferList));
        }
        /// <summary>
        /// Add a plug-in to the audio buffer's plug-in list
        /// </summary>
        public void RegisterPlugin(IAudioProcessor PlugIn)
        {
            if (AudioBuffers == null)
            {
                throw new Exception("Can't register a plug-in: audio buffer has not been configured");
            }

            AudioBuffers.RegisterPlugin(PlugIn);
        }
Example #13
0
        public uint Read(uint numberFrames, AudioBuffers audioBufferList, out ExtAudioFileError status)
        {
            if (audioBufferList is null)
            {
                ObjCRuntime.ThrowHelper.ThrowArgumentNullException(nameof(audioBufferList));
            }

            status = ExtAudioFileRead(_extAudioFile, ref numberFrames, (IntPtr)audioBufferList);
            return(numberFrames);
        }
Example #14
0
        public uint Read(uint numberFrames, AudioBuffers audioBufferList, out ExtAudioFileError status)
        {
            if (audioBufferList == null)
            {
                throw new ArgumentNullException("audioBufferList");
            }

            status = ExtAudioFileRead(_extAudioFile, ref numberFrames, (IntPtr)audioBufferList);
            return(numberFrames);
        }
 /// <summary>
 /// Clear all captured frames from all attached buffers.
 /// Buffers will refill if capture devices are running.
 /// </summary>
 public void ClearBuffers()
 {
     if (ImageBuffers != null)
     {
         ImageBuffers.WipeBuffer();
     }
     if (AudioBuffers != null)
     {
         AudioBuffers.WipeBuffer();
     }
 }
Example #16
0
        public MTAudioProcessingTapError GetSourceAudio(nint frames, AudioBuffers bufferList, out MTAudioProcessingTapFlags flags, out CMTimeRange timeRange, out nint framesProvided)
        {
            if (bufferList == null)
            {
                throw new ArgumentNullException("bufferList");
            }

            IntPtr result;
            var    r = MTAudioProcessingTapGetSourceAudio(handle, (IntPtr)frames, (IntPtr)bufferList, out flags, out timeRange, out result);

            framesProvided = (nint)result;
            return(r);
        }
        public void Init(AVAudioFormat defaultFormat, uint maxChannels)
        {
            MaxFrames = 0;
            pcmBuffer = null;
            OriginalAudioBufferList = null;
            MutableAudioBufferList  = null;
            NSError error;

            Bus = new AUAudioUnitBus(defaultFormat, out error)
            {
                MaximumChannelCount = maxChannels
            };
        }
Example #18
0
        /// <summary>
        ///     Initialize from buffer
        /// </summary>
        /// <param name="blob"></param>
        public GCADPCMSound(Span <byte> blob)
        {
            FullBuffer = new Memory <byte>(blob.ToArray());
            Header     = MemoryMarshal.Read <ADPCMSoundHeader>(blob);
            Table      = MemoryMarshal.Cast <byte, GCADPCMSoundInfo>(blob.Slice(Header.ADPCMPointer, Header.ADPCMSize)).ToArray();
            var pointers = MemoryMarshal.Cast <byte, int>(blob.Slice(Header.PointerTablePointer, 4 * Header.Streams));
            var sizes    = MemoryMarshal.Cast <byte, int>(blob.Slice(Header.SizeTablePointer, 4 * Header.Streams));

            for (var i = 0; i < Header.Streams; ++i)
            {
                AudioBuffers.Add(new Memory <byte>(blob.Slice(pointers[i], sizes[i]).ToArray()));
            }
        }
 /// <summary>
 /// Force a timed frame into the encoder's buffers.
 /// May cause unexpected operation. Use with caution!
 /// </summary>
 public void ForceInsertFrame(TimedSample AudioFrame)
 {
     if (AudioBuffers != null)
     {
         AudioBuffers.HandleCapturedSamples(this, new AudioDataEventArgs()
         {
             Samples = AudioFrame.Samples, CaptureTime = AudioFrame.Seconds
         });
     }
     else
     {
         throw new Exception("Can't send audio frame to uninitialised buffer. Please include an audio device in your config.");
     }
 }
		void PrepareInputBufferList ()
		{
			uint byteSize = MaxFrames * sizeof(float);

			MutableAudioBufferList = new AudioBuffers (OriginalAudioBufferList.Count);

			for (int i = 0; i < OriginalAudioBufferList.Count; ++i) {
				MutableAudioBufferList[i] = new AudioBuffer {
					Data = OriginalAudioBufferList [i].Data,
					DataByteSize = (int)byteSize,
					NumberChannels = OriginalAudioBufferList [i].NumberChannels
				};
			}
		}
Example #21
0
        // load up audio data from the demo files into mSoundBuffer.data used in the render proc
        void LoadFiles()
        {
            const int FilesCount = 2;

            for (int i = 0; i < FilesCount; i++)
            {
                Debug.Print("Loading file #{0}", i);

                using (var file = ExtAudioFile.OpenUrl(sourceURL [i])) {
                    var clientFormat = file.FileDataFormat;
                    clientFormat.FormatFlags       = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat;
                    clientFormat.ChannelsPerFrame  = 1;
                    clientFormat.FramesPerPacket   = 1;
                    clientFormat.BitsPerChannel    = 8 * sizeof(int);
                    clientFormat.BytesPerPacket    =
                        clientFormat.BytesPerFrame = clientFormat.ChannelsPerFrame * sizeof(int);

                    file.ClientDataFormat = clientFormat;

                    // set the client format to be what we want back
                    double rateRatio = GraphSampleRate / clientFormat.SampleRate;

                    var numFrames = file.FileLengthFrames;
                    numFrames = (uint)(numFrames * rateRatio);                     // account for any sample rate conversion
                    Debug.Print("Number of Sample Frames after rate conversion (if any): {0}", numFrames);

                    // set up our buffer
                    soundBuffer[i].TotalFrames = numFrames;

                    UInt32 samples   = (uint)(numFrames * clientFormat.ChannelsPerFrame);
                    var    data_size = (int)(sizeof(uint) * samples);
                    soundBuffer[i].Data = Marshal.AllocHGlobal(data_size);

                    // set up a AudioBufferList to read data into
                    var bufList = new AudioBuffers(1);
                    bufList [0] = new AudioBuffer {
                        NumberChannels = 1,
                        Data           = soundBuffer [i].Data,
                        DataByteSize   = data_size
                    };

                    ExtAudioFileError error;
                    file.Read((uint)numFrames, bufList, out error);
                    if (error != ExtAudioFileError.OK)
                    {
                        throw new ApplicationException();
                    }
                }
            }
        }
Example #22
0
        private void FetchABL(AudioBuffers audioBuffers, int destOffset, IntPtr[] buffers, int srcOffset, int numBytes)
        {
            int         numChannels = audioBuffers.Count;
            AudioBuffer dest;

            for (int i = 0; i < numChannels; i++)
            {
                dest = audioBuffers[i];
                if (destOffset > dest.DataByteSize)
                {
                    continue;
                }
                InteropHelper.MemCpy(dest.Data + destOffset, buffers[i] + srcOffset, Math.Min(numBytes, dest.DataByteSize - destOffset));
            }
        }
Example #23
0
        private void ZeroABL(AudioBuffers audioBuffers, int destOffset, int numBytes)
        {
            int         numBuffers = audioBuffers.Count;
            AudioBuffer dest;

            for (int i = 0; i < numBuffers; i++)
            {
                dest = audioBuffers[i];
                if (destOffset > dest.DataByteSize)
                {
                    continue;
                }
                InteropHelper.FillWithZeroes(dest.Data + destOffset, Math.Min(numBytes, dest.DataByteSize - destOffset));
            }
        }
Example #24
0
        void PrepareInputBufferList()
        {
            uint byteSize = MaxFrames * sizeof(float);

            MutableAudioBufferList = new AudioBuffers(OriginalAudioBufferList.Count);

            for (int i = 0; i < OriginalAudioBufferList.Count; ++i)
            {
                MutableAudioBufferList[i] = new AudioBuffer {
                    Data           = OriginalAudioBufferList [i].Data,
                    DataByteSize   = (int)byteSize,
                    NumberChannels = OriginalAudioBufferList [i].NumberChannels
                };
            }
        }
Example #25
0
            AudioUnitStatus Render(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber,
                                   uint numberFrames, AudioBuffers data)
            {
                // Just return audio buffers from MTAudioProcessingTap.
                MTAudioProcessingTapFlags flags;
                CMTimeRange range;
                nint        n;
                var         error =
                    (AudioUnitStatus)(int)audioProcessingTap.GetSourceAudio((nint)numberFrames, data, out flags, out range, out n);

                if (error != AudioUnitStatus.NoError)
                {
                    Console.WriteLine("{0} audioProcessingTap.GetSourceAudio failed", error);
                }
                return(error);
            }
Example #26
0
        static AudioUnitStatus RenderCallbackImpl(IntPtr clientData, ref AudioUnitRenderActionFlags actionFlags, ref AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, IntPtr data)
        {
            GCHandle gch = GCHandle.FromIntPtr(clientData);
            var      au  = (AUGraph)gch.Target;

            RenderDelegate callback;

            if (!au.nodesCallbacks.TryGetValue(busNumber, out callback))
            {
                return(AudioUnitStatus.InvalidParameter);
            }

            using (var buffers = new AudioBuffers(data)) {
                return(callback(actionFlags, timeStamp, busNumber, numberFrames, buffers));
            }
        }
        AudioUnitStatus RenderCallback(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			// getting microphone input signal
			audioUnit.Render (ref actionFlags, timeStamp, 1, numberFrames, data);

			// Getting a pointer to a buffer to be filled
			IntPtr outL = data [0].Data;
			IntPtr outR = data [1].Data;

			// Getting signal level
			// https://en.wikipedia.org/wiki/Root_mean_square
			float sqrSum = 0;
			for (int j = 0;  j < numberFrames; j++) {
				float v = Marshal.ReadInt16(outL, j * sizeof(Int16));
				sqrSum += (v * v);
			}
			SignalLevel = (float)Math.Sqrt (sqrSum / numberFrames);

			if (triggered <= 0 && SignalLevel > Threshold)
				triggered = FramesToPlay;

			// playing sound
			unsafe {
				var outLPtr = (int*)outL.ToPointer ();
				var outRPtr = (int*)outR.ToPointer ();

				for (int i = 0; i < numberFrames; i++) {
					triggered = Math.Max (0, triggered - 1);

					if (triggered > 0) {
						var buf0 = (int*)buffer [0].Data;
						var buf1 = (int*)buffer [numberOfChannels - 1].Data;

						++CurrentFrame;
						*outLPtr++ = buf0 [currentFrame];
						*outRPtr++ = buf1 [currentFrame];
					} else {
						// 0-filling
						*outLPtr++ = 0;
						*outRPtr++ = 0;
					}
				}
			}

			return AudioUnitStatus.NoError;
		}
        //private static void HandleParameterizedThreadStart(object getAudioQueueThreadDelegate)
        //{
        //    var getDelegate = getAudioQueueThreadDelegate as GetAudioQueueThreadDelegate;
        //    RunAudioQueueLoop(getDelegate());
        //}


        private static void RunAudioQueueLoop(InputAudioQueueThread_old audioQueueThread)
        {
            var buffer = audioQueueThread.Buffer;

            //var queue = new TestInputAudioQueue(buffer.AudioUnit.GetAudioFormat(MonoMac.AudioUnit.AudioUnitScopeType.Input));
            //audioQueueThread.AudioQueue = queue;
            //queue.InputCompleted += Queue_InputCompleted;
#if DEBUG
            //PrintAudioQueueDataReadout(queue);
#endif //DEBUG

            //var packetDescription = new AudioStreamPacketDescription
            //{
            //    DataByteSize = queue.AudioStreamDescription.BytesPerPacket
            //};
            //var packetDescriptions = new AudioStreamPacketDescription[] { packetDescription };
            //int bufferSizeInBytes = buffer.BufferLength * queue.DeviceChannels * packetDescription.DataByteSize;
            int bufferSizeInBytes = buffer.AudioUnit.GetAudioFormat(AudioUnitScopeType.Input).FramesPerPacket *sizeof(float);

            //var bufferstuff = new AudioBufferList(buffer.AudioUnit.GetAudioFormat(AudioUnitScopeType.Input).ChannelsPerFrame);
            var bufferstuff = new AudioBuffers(buffer.AudioUnit.GetAudioFormat(AudioUnitScopeType.Input).ChannelsPerFrame);

            int something = buffer.AudioUnit.Initialize();
            audioQueueThread.Buffer.AudioUnit.Start();

            // Allocate, enqueue, and start
            //ErrorHandler.CheckError(queue.AllocateBuffer(bufferSizeInBytes, out buffer.SystemBufferPointer));
            //ErrorHandler.CheckError(queue.EnqueueBuffer(buffer.SystemBufferPointer, packetDescriptions));
            //ErrorHandler.CheckError(queue.Start());

            audioQueueThread.IsRunning = true;
            //CFRunLoop.Current.Run();
            while (!audioQueueThread.StopRequested)
            {
                //Console.WriteLine("Thread is running.");
                Thread.Sleep(50);
                CFRunLoop.Current.Run();
                CFRunLoop.Main.Run();
                //Marshal.Copy(buffer.SystemBufferPointer, dest, 0, audioQueueThread.Buffer.BufferLength);
                //ErrorHandler.CheckError(queue.EnqueueBuffer(buffer.SystemBufferPointer, packetDescriptions));
            }

            Console.WriteLine("Thread is stopping...");
            // May need to clear CoreAudio buffer here.
        }
Example #29
0
        public AudioUnitStatus InternalRenderBlockProc(ref AudioUnitRenderActionFlags actionFlags, ref AudioTimeStamp timestamp, uint frameCount, nint outputBusNumber, AudioBuffers outputData, AURenderEventEnumerator realtimeEventListHead, AURenderPullInputBlock pullInputBlock)
        {
            var transportStateFlags = (AUHostTransportStateFlags)0;

            double currentSamplePosition  = 0;
            double cycleStartBeatPosition = 0;
            double cycleEndBeatPosition   = 0;

            var callBack = TransportStateBlock;

            if (callBack != null)
            {
                callBack(ref transportStateFlags, ref currentSamplePosition, ref cycleStartBeatPosition, ref cycleEndBeatPosition);
            }

            var state = Kernel;
            var input = inputBus;

            var             pullFlags = (AudioUnitRenderActionFlags)0;
            AudioUnitStatus err       = input.PullInput(ref pullFlags, timestamp, frameCount, 0, pullInputBlock);

            if (err != AudioUnitStatus.NoError)
            {
                return(err);
            }

            AudioBuffers inAudioBufferList = input.MutableAudioBufferList;

            if (outputData [0].Data == IntPtr.Zero)
            {
                for (int i = 0; i < outputData.Count; i++)
                {
                    outputData.SetData(i, inAudioBufferList [i].Data);
                }
            }

            state.SetBuffers(inAudioBufferList, outputData);
            state.ProcessWithEvents(timestamp, (int)frameCount, realtimeEventListHead);

            return(AudioUnitStatus.NoError);
        }
Example #30
0
        private void StoreABL(IntPtr[] buffers, int destOffset, AudioBuffers audioBuffers, int srcOffset, int numBytes)
        {
            int         numChannels = audioBuffers.Count;
            AudioBuffer src;

            for (int i = 0; i < numChannels; i++)
            {
                src = audioBuffers[i];
                if (srcOffset > src.DataByteSize)
                {
                    continue;
                }
                InteropHelper.MemCpy(buffers[i] + destOffset, src.Data + srcOffset, src.DataByteSize - srcOffset);

                int     numNewSamples = (src.DataByteSize - srcOffset) / Marshal.SizeOf(typeof(float));
                float[] newSamples    = new float[numNewSamples];
                Marshal.Copy(src.Data + srcOffset, newSamples, 0, numNewSamples);

                //Console.WriteLine("{0} samples written to CARingBuffer.  Max: {1}", numNewSamples, newSamples.Max());
            }
        }
Example #31
0
        /// <summary>
        /// Renders out the beat to the given buffers
        /// </summary>
        /// <returns>The render.</returns>
        /// <param name="samples">Samples.</param>
        /// <param name="buffers">Buffers.</param>
        /// <param name="offset">Offset.</param>
        public void Render(uint samples, AudioBuffers buffers, double offset)
        {
            AudioTimeStamp timeStamp = new AudioTimeStamp();

            timeStamp.SampleTime = offset;

            var flag = AudioUnitRenderActionFlags.DoNotCheckRenderArgs;

            var e = MixerNode.Render(
                ref flag,
                timeStamp,
                0,
                samples,
                buffers
                );

            if (e != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
        }
Example #32
0
        static AudioConverterError HandleInputData(ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
        {
            int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;

            if (numberDataPackets > maxPackets)
            {
                numberDataPackets = maxPackets;
            }

            // read from the file
            int outNumBytes;
            var res = afio.SourceFile.ReadPackets(false, out outNumBytes, afio.PacketDescriptions, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer);

            if (res != 0)
            {
                throw new ApplicationException(res.ToString());
            }

            // advance input file packet position
            afio.SrcFilePos += numberDataPackets;

            // put the data pointer into the buffer list
            data.SetData(0, afio.SrcBuffer, outNumBytes);

            // don't forget the packet descriptions if required
            if (dataPacketDescription != null)
            {
                if (afio.PacketDescriptions != null)
                {
                    dataPacketDescription = afio.PacketDescriptions;
                }
                else
                {
                    dataPacketDescription = null;
                }
            }

            return(AudioConverterError.None);
        }
Example #33
0
        AudioUnitStatus AudioInputCallBack(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioUnit audioUnit)
        {
            MemoryStream ms = new MemoryStream();

            String s = "a000";
            byte[] bufWriter = Encoding.ASCII.GetBytes(s.ToCharArray(), 0, 4);
            ms.Write(bufWriter, 0, 4);

            bufWriter = BitConverter.GetBytes(AudioSessionId);
            if (BitConverter.IsLittleEndian) Array.Reverse(bufWriter);
            ms.Write(bufWriter, 0, 4);

            long time = (long) (DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalMilliseconds;

            //Console.WriteLine ((time - lasttime) + " ms delay");
            lasttime = time;
            bufWriter = BitConverter.GetBytes(time);
            if (BitConverter.IsLittleEndian) Array.Reverse(bufWriter);
            ms.Write(bufWriter, 0, 8);

            var buffer = new AudioBuffer()
                {
                    NumberChannels = 1,
                    DataByteSize = (int)numberFrames * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal((int)numberFrames * 2)
                };

            var bufferList = new AudioBuffers(1);
            bufferList[0] = buffer;

            var status = audioUnit.Render(ref actionFlags, timeStamp, busNumber, numberFrames, bufferList);

            var send = new byte[buffer.DataByteSize];
            System.Runtime.InteropServices.Marshal.Copy(buffer.Data, send, 0, send.Length);

            ms.Write (send, 0, send.Length);

            Console.Write("\n Buffer: ");
            foreach (byte b in send)
                Console.Write("\\x" + b);
            Console.Write("\n");

            System.Runtime.InteropServices.Marshal.FreeHGlobal(buffer.Data);

            byte[] sendbuf = ms.ToArray();
            if (sendbuf.Length > 4096) throw new Exception("Packet size too large!");
            Task tk = Task.Factory.StartNew(() =>
                {
                    try
                    {
                        var aSender = audioCaller.BeginSend(sendbuf, sendbuf.Length, null, null);
                        aSender.AsyncWaitHandle.WaitOne(TimeSpan.FromSeconds(3));
                        if (aSender.IsCompleted) audioCaller.EndSend(aSender);
                    }
                    catch
                    {

                    }
                });

            return AudioUnitStatus.OK;
        }
		unsafe void TapProcess (MTAudioProcessingTap tap, nint numberFrames, MTAudioProcessingTapFlags flags,
		                        AudioBuffers bufferList,
		                        out nint numberFramesOut,
		                        out MTAudioProcessingTapFlags flagsOut)
		{
			numberFramesOut = 0;
			flagsOut = (MTAudioProcessingTapFlags)0;

			// Skip processing when format not supported.
			if (!context.SupportedTapProcessingFormat) {
				Console.WriteLine ("Unsupported tap processing format.");
				return;
			}

			if (IsBandpassFilterEnabled) {
				// Apply bandpass filter Audio Unit.
				if (context.AudioUnit != null) {
					var audioTimeStamp = new AudioTimeStamp {
						SampleTime = context.SampleCount,
						Flags = AudioTimeStamp.AtsFlags.SampleTimeValid
					};

					var f = (AudioUnitRenderActionFlags)0;
					var status = context.AudioUnit.Render (ref f, audioTimeStamp, 0, (uint)numberFrames, bufferList);
					if (status != AudioUnitStatus.NoError) {
						Console.WriteLine ("AudioUnitRender(): {0}", status);
						return;
					}

					// Increment sample count for audio unit.
					context.SampleCount += numberFrames;

					// Set number of frames out.
					numberFramesOut = numberFrames;
				}
			} else {
				// Get actual audio buffers from MTAudioProcessingTap (AudioUnitRender() will fill bufferListInOut otherwise).
				CMTimeRange tr;
				MTAudioProcessingTapError status = tap.GetSourceAudio (numberFrames, bufferList, out flagsOut, out tr, out numberFramesOut);
				if (status != MTAudioProcessingTapError.None) {
					Console.WriteLine ("MTAudioProcessingTapGetSourceAudio: {0}", status);
					return;
				}
			}

			UpdateVolumes (bufferList, numberFrames);
		}
		// Input data proc callback
		AudioConverterError EncoderDataProc (ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
		{
			// figure out how much to read
			int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;
			if (numberDataPackets > maxPackets)
				numberDataPackets = maxPackets;
			
			// read from the file
			int outNumBytes;
			var res = afio.SourceFile.ReadPackets (false, out outNumBytes, afio.PacketDescriptions, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer);
			if (res != 0) {
				throw new ApplicationException (res.ToString ());
			}

			// advance input file packet position
			afio.SrcFilePos += numberDataPackets;
			
			// put the data pointer into the buffer list
			data.SetData (0, afio.SrcBuffer, outNumBytes);

			// don't forget the packet descriptions if required
			if (dataPacketDescription != null) {
				if (afio.PacketDescriptions != null) {
					dataPacketDescription = afio.PacketDescriptions;
				} else {
					dataPacketDescription = null;
				}
			}

			return AudioConverterError.None;
		}
		bool DoConvertFile (CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
		{
			AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription ();

			// in this sample we should never be on the main thread here
			Debug.Assert (!NSThread.IsMain);

			// transition thread state to State::Running before continuing
			AppDelegate.ThreadStateSetRunning ();
			
			Debug.WriteLine ("DoConvertFile");

			// get the source file
			var sourceFile = AudioFile.Open (sourceURL, AudioFilePermission.Read);
			
			// get the source data format
			var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

			// setup the output file format
			dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate); // set sample rate
			if (outputFormat == AudioFormatType.LinearPCM) {
				// if the output format is PC create a 16-bit int PCM file format description as an example
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
				dstFormat.BitsPerChannel = 16;
				dstFormat.BytesPerPacket = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
				dstFormat.FramesPerPacket = 1;
				dstFormat.FormatFlags = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
			} else {
				// compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame); // for iLBC num channels must be 1
				
				// use AudioFormat API to fill out the rest of the description
				var fie = AudioStreamBasicDescription.GetFormatInfo (ref dstFormat);
				if (fie != AudioFormatError.None) {
					Debug.Print ("Cannot create destination format {0:x}", fie);

					AppDelegate.ThreadStateSetDone ();
					return false;
				}
			}

			// create the AudioConverter
			AudioConverterError ce;
			var converter = AudioConverter.Create (srcFormat, dstFormat, out ce);
			Debug.Assert (ce == AudioConverterError.None);

			converter.InputData += EncoderDataProc;

			// if the source has a cookie, get it and set it on the Audio Converter
			ReadCookie (sourceFile, converter);

			// get the actual formats back from the Audio Converter
			srcFormat = converter.CurrentInputStreamDescription;
			dstFormat = converter.CurrentOutputStreamDescription;

			// if encoding to AAC set the bitrate to 192k which is a nice value for this demo
			// kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
			if (dstFormat.Format == AudioFormatType.MPEG4AAC) {
				uint outputBitRate = 192000; // 192k

				// ignore errors as setting may be invalid depending on format specifics such as samplerate
				try {
					converter.EncodeBitRate = outputBitRate;
				} catch {
				}

				// get it back and print it out
				outputBitRate = converter.EncodeBitRate;
				Debug.Print ("AAC Encode Bitrate: {0}", outputBitRate);
			}

			// can the Audio Converter resume conversion after an interruption?
			// this property may be queried at any time after construction of the Audio Converter after setting its output format
			// there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
			// construction time since it means less code to execute during or after interruption time
			bool canResumeFromInterruption;
			try {
				canResumeFromInterruption = converter.CanResumeFromInterruption;
				Debug.Print ("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
			} catch (Exception e) {
				// if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
				// then the codec being used is not a hardware codec so we're not concerned about codec state
				// we are always going to be able to resume conversion after an interruption

				canResumeFromInterruption = false;
				Debug.Print ("CanResumeFromInterruption: {0}", e.Message);
			}
			
			// create the destination file 
			var destinationFile = AudioFile.Create (destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

			// set up source buffers and data proc info struct
			afio = new AudioFileIO (32768);
			afio.SourceFile = sourceFile;
			afio.SrcFormat = srcFormat;

			if (srcFormat.BytesPerPacket == 0) {
				// if the source format is VBR, we need to get the maximum packet size
				// use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
				// in the file (without actually scanning the whole file to find the largest packet,
				// as may happen with kAudioFilePropertyMaximumPacketSize)
				afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

				// how many packets can we read for our buffer size?
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
				
				// allocate memory for the PacketDescription structures describing the layout of each packet
				afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
			} else {
				// CBR source format
				afio.SrcSizePerPacket = srcFormat.BytesPerPacket;
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
			}

			// set up output buffers
			int outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
			const int theOutputBufSize = 32768;
			var outputBuffer = Marshal.AllocHGlobal (theOutputBufSize);
			AudioStreamPacketDescription[] outputPacketDescriptions = null;

			if (outputSizePerPacket == 0) {
				// if the destination format is VBR, we need to get max size per packet from the converter
				outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

				// allocate memory for the PacketDescription structures describing the layout of each packet
				outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
			}
			int numOutputPackets = theOutputBufSize / outputSizePerPacket;
			
			// if the destination format has a cookie, get it and set it on the output file
			WriteCookie (converter, destinationFile);
			
			// write destination channel layout
			if (srcFormat.ChannelsPerFrame > 2) {
				WriteDestinationChannelLayout (converter, sourceFile, destinationFile);
			}

			long totalOutputFrames = 0; // used for debugging
			long outputFilePos = 0;
			AudioBuffers fillBufList = new AudioBuffers (1);
			bool error = false;

			// loop to convert data
			Debug.WriteLine ("Converting...");
			while (true) {
				// set up output buffer list
				fillBufList [0] = new AudioBuffer () {
					NumberChannels = dstFormat.ChannelsPerFrame,
					DataByteSize = theOutputBufSize,
					Data = outputBuffer
				};

				// this will block if we're interrupted
				var wasInterrupted = AppDelegate.ThreadStatePausedCheck();
				
				if (wasInterrupted && !canResumeFromInterruption) {
					// this is our interruption termination condition
					// an interruption has occured but the Audio Converter cannot continue
					Debug.WriteLine ("Cannot resume from interruption");
					error = true;
					break;
				}

				// convert data
				int ioOutputDataPackets = numOutputPackets;
				var fe = converter.FillComplexBuffer (ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
				// if interrupted in the process of the conversion call, we must handle the error appropriately
				if (fe != AudioConverterError.None) {
					Debug.Print ("FillComplexBuffer: {0}", fe);
					error = true;
					break;
				}

				if (ioOutputDataPackets == 0) {
					// this is the EOF conditon
					break;
				}

				// write to output file
				var inNumBytes = fillBufList [0].DataByteSize;

				var we = destinationFile.WritePackets (false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
				if (we != 0) {
					Debug.Print ("WritePackets: {0}", we);
					error = true;
					break;
				}

				// advance output file packet position
				outputFilePos += ioOutputDataPackets;
					
				if (dstFormat.FramesPerPacket != 0) { 
					// the format has constant frames per packet
					totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
				} else {
					// variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
					for (var i = 0; i < ioOutputDataPackets; ++i)
						totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
				}

			}

			Marshal.FreeHGlobal (outputBuffer);

			if (!error) {
				// write out any of the leading and trailing frames for compressed formats only
				if (dstFormat.BitsPerChannel == 0) {
					// our output frame count should jive with
					Debug.Print ("Total number of output frames counted: {0}", totalOutputFrames); 
					WritePacketTableInfo (converter, destinationFile);
				}
					
				// write the cookie again - sometimes codecs will update cookies at the end of a conversion
				WriteCookie (converter, destinationFile);
			}

			converter.Dispose ();
			destinationFile.Dispose ();
			sourceFile.Dispose ();

			// transition thread state to State.Done before continuing
			AppDelegate.ThreadStateSetDone ();

			return !error;
		}
		AudioUnitStatus Render (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			// Just return audio buffers from MTAudioProcessingTap.
			MTAudioProcessingTapFlags flags;
			CMTimeRange range;
			nint n;
			var error = (AudioUnitStatus)(int)audioProcessingTap.GetSourceAudio ((nint)numberFrames, data, out flags, out range, out n);
			if (error != AudioUnitStatus.NoError)
				Console.WriteLine ("{0} audioProcessingTap.GetSourceAudio failed", error);
			return error;
		}
Example #38
0
        public void GrabAudioData(AudioBuffers data)
        {
            if (mAudioBufferSize < data [0].DataByteSize)
                return;

            var audioBuffer = data [0];

            var bytesToCopy = Math.Min (audioBuffer.DataByteSize, mAudioBufferSize - mAudioBufferCurrentIndex);

            memcpy (mAudioBufferCurrentIndex, audioBuffer.Data, bytesToCopy);

            mAudioBufferCurrentIndex += bytesToCopy / sizeof(Int32);

            if (mAudioBufferCurrentIndex >= mAudioBufferSize / sizeof(Int32)) {

                //lock (syncLock) {
                    NeedsNewAudioData = false;
                    hasNewFFTData = true;
                //}
            }
        }
		uint TapProc (AudioQueueProcessingTap audioQueueTap, uint inNumberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
		{
			AudioQueueProcessingTapFlags sourceFlags;
			uint sourceFrames;

			if (audioQueueTap.GetSourceAudio (inNumberOfFrames, ref timeStamp, out sourceFlags, out sourceFrames, data) != AudioQueueStatus.Ok)
				throw new ApplicationException ();

			for (int channel = 0; channel < data.Count; channel++) {
				preRenderData[channel] = data [channel].Data;
				data.SetData (channel, IntPtr.Zero);
			}

			renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
			AudioUnitRenderActionFlags actionFlags = 0;

			AudioUnitStatus res = genericOutputUnit.Render (ref actionFlags, renderTimeStamp, 0, inNumberOfFrames, data);
			if (res != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			return sourceFrames;
		}
		AudioUnitStatus ConvertInputRenderCallback (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			renderTimeStamp.SampleTime += numberFrames;
			for (int channel = 0; channel < data.Count; channel++) {
				data.SetData (channel, preRenderData [channel]);
			}
			return AudioUnitStatus.NoError;
		}
        void PrepareExtAudioFile()
        {
			extAudioFile = ExtAudioFile.OpenUrl(url);
			CheckValue (extAudioFile, "ExtAudioFile.OpenUrl failed");

			srcFormat = extAudioFile.FileDataFormat;

			// This is how you say,“When you convert the data, this is the format I’d like to receive.”
			// The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats.
            extAudioFile.ClientDataFormat = dstFormat;

            // getting total frame
			TotalFrames = extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
			buffer = new AudioBuffers(srcFormat.ChannelsPerFrame);
            for (int i = 0; i < buffer.Count; ++i)
            {
                int size = (int)(sizeof(int) * TotalFrames);
                buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
			numberOfChannels = srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;
            extAudioFile.Read((uint)TotalFrames, buffer, out status);
            if (status != ExtAudioFileError.OK)
                throw new ApplicationException();
        }
		unsafe static void RenderAudio (CFUrl sourceUrl, CFUrl destinationUrl)
		{
			AudioStreamBasicDescription dataFormat;
			AudioQueueBuffer *buffer = null;
			long currentPacket = 0;
			int packetsToRead = 0;
			AudioStreamPacketDescription [] packetDescs = null;
			bool flushed = false;
			bool done = false;
			int bufferSize;
			
			using (var audioFile = AudioFile.Open (sourceUrl, AudioFilePermission.Read, (AudioFileType) 0)) {
				dataFormat = audioFile.StreamBasicDescription;
				
				using (var queue = new OutputAudioQueue (dataFormat, CFRunLoop.Current, CFRunLoop.CFRunLoopCommonModes)) {
					queue.OutputCompleted += (sender, e) => 
					{
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
					};
					
					// we need to calculate how many packets we read at a time and how big a buffer we need
					// we base this on the size of the packets in the file and an approximate duration for each buffer
					bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;
					
					// first check to see what the max size of a packet is - if it is bigger
					// than our allocation default size, that needs to become larger
					// adjust buffer size to represent about a second of audio based on this format 
					CalculateBytesForTime (dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);
				
					if (isVBR) {
						packetDescs = new AudioStreamPacketDescription [packetsToRead];
					} else {
						packetDescs = null; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
					}
				
					if (audioFile.MagicCookie.Length != 0)
						queue.MagicCookie = audioFile.MagicCookie;
		
					// allocate the input read buffer
					queue.AllocateBuffer (bufferSize, out buffer);
					
					// prepare the capture format
					var captureFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame, 32);
					captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

					queue.SetOfflineRenderFormat (captureFormat, audioFile.ChannelLayout);
					
					// prepare the target format
					var dstFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame);

					using (var captureFile = ExtAudioFile.CreateWithUrl (destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
						captureFile.ClientDataFormat = captureFormat;
						
						int captureBufferSize = bufferSize / 2;
						AudioBuffers captureABL = new AudioBuffers (1);
						
						AudioQueueBuffer *captureBuffer;
						queue.AllocateBuffer (captureBufferSize, out captureBuffer);
						
						captureABL[0] = new AudioBuffer () {
							Data = captureBuffer->AudioData,
							NumberChannels = captureFormat.ChannelsPerFrame
						};

						queue.Start ();

						double ts = 0;
						queue.RenderOffline (ts, captureBuffer, 0);
						
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
						
						while (true) {
							int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;
							
							queue.RenderOffline (ts, captureBuffer, reqFrames);

							captureABL.SetData (0, captureBuffer->AudioData, (int) captureBuffer->AudioDataByteSize);
							var writeFrames = captureABL[0].DataByteSize / captureFormat.BytesPerFrame;
							
							// Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes", 
							//	ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);
							
							captureFile.WriteAsync ((uint) writeFrames, captureABL);
							
							if (flushed)
								break;
							
							ts += writeFrames;
						}
					
						CFRunLoop.Current.RunInMode (CFRunLoop.CFDefaultRunLoopMode, 1, false);
					}
				}
			}
		}
		AudioUnitStatus _audioUnit_RenderCallback (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			// getting microphone input signal
			_audioUnit.Render (ref actionFlags,
                timeStamp,
                1, // Remote input
               	numberFrames,
                data);

			// Getting a pointer to a buffer to be filled
			IntPtr outL = data [0].Data;
			IntPtr outR = data [1].Data;

			// Getting signal level and trigger detection
			unsafe {
				var outLPtr = (int*)outL.ToPointer ();
				for (int i = 0; i < numberFrames; i++) {
					// LPF
					float diff = Math.Abs (*outLPtr) - _signalLevel;
					if (diff > 0)
						_signalLevel += diff / 1000f;
					else
						_signalLevel += diff / 10000f;
                    
					diff = Math.Abs (diff);
                    
					// sound triger detection
					if (_triggered <= 0 && diff > _threshold) {
						_triggered = _playingDuration;
					}
				}
			}                        

			// playing sound
			unsafe {
				var outLPtr = (int*)outL.ToPointer ();
				var outRPtr = (int*)outR.ToPointer ();                
                
				for (int i = 0; i < numberFrames; i++) {                    
					_triggered = Math.Max (0, _triggered - 1);

					if (_triggered <= 0) {
						// 0-filling
						*outLPtr++ = 0;
						*outRPtr++ = 0;
					} else {
						var buf0 = (int*)_buffer [0].Data;
						var buf1 = (_numberOfChannels == 2) ? (int*)_buffer [1].Data : buf0;

						if (_currentFrame >= _totalFrames) {
							_currentFrame = 0;
						}
                        
						++_currentFrame;
						*outLPtr++ = buf0 [_currentFrame];
						*outRPtr++ = buf1 [_currentFrame];
					}
				}
			}

			return AudioUnitStatus.NoError;
		}
		AudioUnitStatus ConvertInputRenderCallback (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			data.SetData (0, preRenderData);
			return AudioUnitStatus.NoError;
		}
		uint TapProc (AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data)
		{
			AudioQueueProcessingTapFlags source_flags;
			uint source_frames;

			if (audioQueueTap.GetSourceAudio (numberOfFrames, ref timeStamp, out source_flags, out source_frames, data) != AudioQueueStatus.Ok)
				throw new ApplicationException ();

			preRenderData = data [0].Data;
			data.SetData (0, IntPtr.Zero);

			var renderTimeStamp = new AudioTimeStamp ();
			renderTimeStamp.Flags = AudioTimeStamp.AtsFlags.SampleTimeValid;
			AudioUnitRenderActionFlags action_flags = 0;

			var res = genericOutputUnit.Render (ref action_flags, renderTimeStamp, 0, numberOfFrames, data);
			if (res != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			return source_frames;
		}
Example #46
0
        void Silence(AudioBuffers data,int numFrames)
        {
            for (var i = 0; i < data.Count; i++) {

                var _data = data [i].Data;

                for (var j=0; j < data [i].DataByteSize; j++)
                {
                    System.Runtime.InteropServices.Marshal.WriteByte(_data,j,0);
                }
            }
        }
		// load up audio data from the demo files into mSoundBuffer.data used in the render proc
		void LoadFiles ()
		{
			const int FilesCount = 2;

			for (int i = 0; i < FilesCount; i++) {
				Debug.Print ("Loading file #{0}", i);

				using (var file = ExtAudioFile.OpenUrl (sourceURL [i])) {

					var clientFormat = file.FileDataFormat;
					clientFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsAudioUnitCanonical;
					clientFormat.ChannelsPerFrame = 1;
					clientFormat.FramesPerPacket = 1;
					clientFormat.BitsPerChannel = 8 * sizeof (int);
					clientFormat.BytesPerPacket =
						clientFormat.BytesPerFrame = clientFormat.ChannelsPerFrame * sizeof (int);

					file.ClientDataFormat = clientFormat;

					// set the client format to be what we want back
					double rateRatio = GraphSampleRate / clientFormat.SampleRate;

					var numFrames = file.FileLengthFrames;
					numFrames = (uint)(numFrames * rateRatio); // account for any sample rate conversion
					Debug.Print ("Number of Sample Frames after rate conversion (if any): {0}", numFrames);

					// set up our buffer
					soundBuffer[i].TotalFrames = numFrames;

					UInt32 samples = (uint) (numFrames * clientFormat.ChannelsPerFrame);
					var data_size = (int)(sizeof(uint) * samples);
					soundBuffer[i].Data = Marshal.AllocHGlobal (data_size);

					// set up a AudioBufferList to read data into
					var bufList = new AudioBuffers (1);
					bufList [0] = new AudioBuffer {
						NumberChannels = 1,
						Data = soundBuffer [i].Data,
						DataByteSize = data_size
					};

					ExtAudioFileError error;
					file.Read ((uint) numFrames, bufList, out error);
					if (error != ExtAudioFileError.OK)
						throw new ApplicationException ();
				}
			}
		}
Example #48
0
        AudioUnitStatus renderDelegate(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
        {
            var err = rioUnit.Render (ref actionFlags, timeStamp, 1, numberFrames, data);

            if (err != AudioUnitStatus.OK) {
                return err;
            }

            if (FFTBufferManager == null)
                return AudioUnitStatus.OK;

            if (FFTBufferManager.NeedsNewAudioData) {
                FFTBufferManager.GrabAudioData (data);
            }

            Silence (data,(int)numberFrames);

            return AudioUnitStatus.OK;
        }
		unsafe AudioUnitStatus HandleRenderDelegate (AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
		{
			var sndbuf = soundBuffer [busNumber];

			var sample = sndbuf.SampleNum;      // frame number to start from
			var bufSamples = sndbuf.TotalFrames;  // total number of frames in the sound buffer
			var input = (int*) sndbuf.Data;

			var outA = (int*) data [0].Data; // output audio buffer for L channel
			var outB = (int*) data [1].Data; // output audio buffer for R channel

			// for demonstration purposes we've configured 2 stereo input busses for the mixer unit
			// but only provide a single channel of data from each input bus when asked and silence for the other channel
			// alternating as appropriate when asked to render bus 0 or bus 1's input
			for (var i = 0; i < numberFrames; ++i) {

				if (busNumber == 1) {
					outA [i] = 0;
					outB [i] = input [sample++];
				} else {
					outA [i] = input[sample++];
					outB [i] = 0;
				}

				if (sample > bufSamples) {
					// start over from the beginning of the data, our audio simply loops
					Debug.Print ("Looping data for bus {0} after {1} source frames rendered", busNumber, sample - 1);
					sample = 0;
				}
			}

			// keep track of where we are in the source data buffer
			sndbuf.SampleNum = sample;

			return AudioUnitStatus.OK;
		}
        // Input data proc callback
        AudioConverterError EncoderDataProc(ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription)
        {
            // figure out how much to read
            int maxPackets = afio.SrcBufferSize / afio.SrcSizePerPacket;
            if (numberDataPackets > maxPackets)
                numberDataPackets = maxPackets;

            // read from the file
            int outNumBytes = 16384;

            // modified for iOS7 (ReadPackets depricated)
			afio.PacketDescriptions = afio.SourceFile.ReadPacketData(false, afio.SrcFilePos, ref numberDataPackets, afio.SrcBuffer, ref outNumBytes);

			if (afio.PacketDescriptions.Length == 0 && numberDataPackets > 0)
				throw new ApplicationException(afio.PacketDescriptions.ToString());

            // advance input file packet position
            afio.SrcFilePos += numberDataPackets;

            // put the data pointer into the buffer list
            data.SetData(0, afio.SrcBuffer, outNumBytes);

            // don't forget the packet descriptions if required
            if (dataPacketDescription != null)
                dataPacketDescription = afio.PacketDescriptions;

            return AudioConverterError.None;
        }
		unsafe void UpdateVolumes (AudioBuffers bufferList, nint numberFrames)
		{
			// Calculate root mean square (RMS) for left and right audio channel.
			// http://en.wikipedia.org/wiki/Root_mean_square
			for (int i = 0; i < bufferList.Count; i++) {
				AudioBuffer pBuffer = bufferList [i];
				long cSamples = numberFrames * (context.IsNonInterleaved ? 1 : pBuffer.NumberChannels);

				float* pData = (float*)(void*)pBuffer.Data;

				float rms = 0;
				for (int j = 0; j < cSamples; j++)
					rms += pData [j] * pData [j];

				if (cSamples > 0)
					rms = (float)Math.Sqrt (rms / cSamples);

				if (i == 0)
					context.LeftChannelVolume = rms;
				if (i == 1 || (i == 1 && bufferList.Count == 1))
					context.RightChannelVolume = rms;
			}

			// Pass calculated left and right channel volume to VU meters.
			UpdateVolumes (context.LeftChannelVolume, context.RightChannelVolume);
		}
		void prepareExtAudioFile ()
		{
			// Opening Audio File
			_extAudioFile = ExtAudioFile.OpenUrl (_url);

			// Getting file data format
			_srcFormat = _extAudioFile.FileDataFormat;

			// Setting the channel number of the output format same to the input format
			_dstFormat = AudioStreamBasicDescription.CreateLinearPCM (channelsPerFrame: (uint)_srcFormat.ChannelsPerFrame, bitsPerChannel: 32);
			_dstFormat.FormatFlags |= AudioFormatFlags.IsNonInterleaved;

			// setting reading format as audio unit cannonical format
			_extAudioFile.ClientDataFormat = _dstFormat;

			// getting total frame
			_totalFrames = _extAudioFile.FileLengthFrames;

			// Allocating AudioBufferList
			_buffer = new AudioBuffers (_srcFormat.ChannelsPerFrame);
			for (int i = 0; i < _buffer.Count; ++i) {
				int size = (int)(sizeof(uint) * _totalFrames);
				_buffer.SetData (i, Marshal.AllocHGlobal (size), size);
			}
			_numberOfChannels = _srcFormat.ChannelsPerFrame;

			// Reading all frame into the buffer
			ExtAudioFileError status;
			_extAudioFile.Read ((uint)_totalFrames, _buffer, out status);
			if (status != ExtAudioFileError.OK)
				throw new ApplicationException ();
		}
Example #53
0
        AudioUnitStatus AudioInputCallBack(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioUnit audioUnit)
        {
            var buffer = new AudioBuffer()
                {
                    NumberChannels = 1,
                    DataByteSize = (int)numberFrames * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal((int)numberFrames * 2)
                };

            var bufferList = new AudioBuffers(1);
            bufferList[0] = buffer;

            var status = audioUnit.Render(ref actionFlags, timeStamp, busNumber, numberFrames, bufferList);

            var send = new byte[buffer.DataByteSize];
            System.Runtime.InteropServices.Marshal.Copy(buffer.Data, send, 0, send.Length);

            var handler = DataAvailable;
            if (handler != null)
                handler(this, send);

            Console.Write("\n Buffer: ");
            foreach (byte b in send)
                Console.Write("\\x" + b);
            Console.Write("\n");

            System.Runtime.InteropServices.Marshal.FreeHGlobal(buffer.Data);

            return AudioUnitStatus.OK;
        }