Exemplo n.º 1
0
 private unsafe void _PlayNext(AudioQueueBuffer *buffer, int offset)
 {
     byte[] data = m_AudioStream.GetNextBuffer(m_SampleFormat, m_BufferFrameCount, m_BufferMs - offset, m_DacLatency);
     Marshal.Copy(data, 0, buffer->AudioData, data.Length);
     buffer->AudioDataByteSize = buffer->AudioDataBytesCapacity;
     m_AudioQueue.EnqueueBuffer(buffer, null);
 }
Exemplo n.º 2
0
        //
        // Simple tone generator
        //
        void GenerateTone(AudioQueueBuffer *buffer)
        {
            // Make the buffer length a multiple of the wavelength for the output frequency.
            uint   sampleCount  = buffer->AudioDataBytesCapacity / 2;
            double bufferLength = sampleCount;
            double wavelength   = sampleRate / outputFrequency;
            double repetitions  = Math.Floor(bufferLength / wavelength);

            if (repetitions > 0)
            {
                sampleCount = (uint)Math.Round(wavelength * repetitions);
            }

            double x, y;
            double sd       = 1.0 / sampleRate;
            double amp      = 0.9;
            double max16bit = Int16.MaxValue;
            int    i;
            short *p = (short *)buffer->AudioData;

            for (i = 0; i < sampleCount; i++)
            {
                x = i * sd * outputFrequency;
                switch (outputWaveForm)
                {
                case WaveForm.Sine:
                    y = Math.Sin(x * 2.0 * Math.PI);
                    break;

                case WaveForm.Triangle:
                    x = x % 1.0;
                    if (x < 0.25)
                    {
                        y = x * 4.0;                                 // up 0.0 to 1.0
                    }
                    else if (x < 0.75)
                    {
                        y = (1.0 - x) * 4.0 - 2.0;                                 // down 1.0 to -1.0
                    }
                    else
                    {
                        y = (x - 1.0) * 4.0;                                 // up -1.0 to 0.0
                    }
                    break;

                case WaveForm.Sawtooth:
                    y = 0.8 - (x % 1.0) * 1.8;
                    break;

                case WaveForm.Square:
                    y = ((x % 1.0) < 0.5)? 0.7: -0.7;
                    break;

                default: y = 0; break;
                }
                p[i] = (short)(y * max16bit * amp);
            }

            buffer->AudioDataByteSize = sampleCount * 2;
        }
Exemplo n.º 3
0
        unsafe static void HandleOutput(AudioFile audioFile, AudioQueue queue, AudioQueueBuffer *audioQueueBuffer, ref int packetsToRead, ref long currentPacket, ref bool done, ref bool flushed, ref AudioStreamPacketDescription[] packetDescriptions)
        {
            int bytes;
            int packets;

            if (done)
            {
                return;
            }

            packets = packetsToRead;
            bytes   = (int)audioQueueBuffer->AudioDataBytesCapacity;

            packetDescriptions = audioFile.ReadPacketData(false, currentPacket, ref packets, audioQueueBuffer->AudioData, ref bytes);

            if (packets > 0)
            {
                audioQueueBuffer->AudioDataByteSize = (uint)bytes;
                queue.EnqueueBuffer(audioQueueBuffer, packetDescriptions);
                currentPacket += packets;
            }
            else
            {
                if (!flushed)
                {
                    queue.Flush();
                    flushed = true;
                }

                queue.Stop(false);
                done = true;
            }
        }
Exemplo n.º 4
0
        public unsafe AudioQueueStatus EnqueueBuffer(AudioQueueBuffer *audioQueueBuffer, AudioStreamPacketDescription [] desc)
        {
            if (audioQueueBuffer == null)
            {
                throw new ArgumentNullException("audioQueueBuffer");
            }

            return(AudioQueueEnqueueBuffer(handle, audioQueueBuffer, desc == null ? 0 : desc.Length, desc));
        }
Exemplo n.º 5
0
		public override bool FinishedLaunching(UIApplication app, NSDictionary options)
		{
			//
			// Setup audio system
			//
			AudioSession.Initialize ();
			AudioSession.Category = AudioSessionCategory.MediaPlayback;


			// 
			// Format description, we generate LinearPCM as short integers
			//
			sampleRate = AudioSession.CurrentHardwareSampleRate;
			var format = new AudioStreamBasicDescription () {
				SampleRate = sampleRate,
				Format = AudioFormatType.LinearPCM,
				FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
				BitsPerChannel = 16,
				ChannelsPerFrame = 1,
				BytesPerFrame = 2,
				BytesPerPacket = 2, 
				FramesPerPacket = 1,
			};

			// 
			// Create an output queue
			//
			var queue = new OutputAudioQueue (format);
			var bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz 

			// 
			// Create three buffers, generate a tone, and output the tones
			//
			var buffers = new AudioQueueBuffer* [numBuffers];
			for (int i = 0; i < numBuffers; i++){
				queue.AllocateBuffer (bufferByteSize, out buffers [i]);
				GenerateTone (buffers [i]);
				queue.EnqueueBuffer (buffers [i], null);
			}

			//
			// Output callback: invoked when the audio system is done with the
			// buffer, this implementation merely recycles it.
			//
			queue.OutputCompleted += (object sender, OutputCompletedEventArgs e) => {
				if (alternate){
					outputWaveForm +=1;
					if (outputWaveForm > WaveForm.Square)
						outputWaveForm = WaveForm.Sine;
					GenerateTone (e.UnsafeBuffer);
				}
				queue.EnqueueBuffer (e.UnsafeBuffer, null);
			};

			queue.Start ();
			return true;
		}
Exemplo n.º 6
0
        public unsafe AudioQueueStatus AllocateBuffer(int bufferSize, out AudioQueueBuffer *audioQueueBuffer)
        {
            IntPtr           buf;
            AudioQueueStatus result;

            result           = AudioQueueAllocateBuffer(handle, bufferSize, out buf);
            audioQueueBuffer = (AudioQueueBuffer *)buf;
            return(result);
        }
Exemplo n.º 7
0
        public AudioQueueStatus EnqueueBuffer(IntPtr audioQueueBuffer, int bytes, AudioStreamPacketDescription [] desc)
        {
            if (audioQueueBuffer == IntPtr.Zero)
            {
                throw new ArgumentNullException("audioQueueBuffer");
            }

            unsafe {
                AudioQueueBuffer *buffer = (AudioQueueBuffer *)audioQueueBuffer;
                buffer->AudioDataByteSize = (uint)bytes;
                return(EnqueueBuffer(buffer, desc));
            }
        }
Exemplo n.º 8
0
 static unsafe void ReadBufferProc(void *pUserData, AudioQueue *pQueue, AudioQueueBuffer *pBuffer)
 {
     lock (API.StopLock)
     {
         try
         {
             ReadBufferInternal(pUserData, pQueue, pBuffer);
         }
         catch (Exception e)
         {
             Console.WriteLine(e.ToString());
         }
     }
 }
Exemplo n.º 9
0
        private unsafe void _UnsafeStart(int bufferSize)
        {
            AudioQueueBuffer *[] buffers = new AudioQueueBuffer *[m_NumBuffers];
            for (int i = 0; i < m_NumBuffers; i++)
            {
                m_AudioQueue.AllocateBuffer(bufferSize, out buffers[i]);
                _PlayNext(buffers[i], m_BufferDurationMs * i);
            }

            m_AudioQueue.BufferCompleted += (object sender, BufferCompletedEventArgs e) =>
            {
                _PlayNext(e.UnsafeBuffer, m_BufferDurationMs * (m_NumBuffers - 1));
            };
        }
Exemplo n.º 10
0
        public unsafe AudioQueueStatus RenderOffline(double timeStamp, AudioQueueBuffer *audioQueueBuffer, int frameCount)
        {
            if (audioQueueBuffer == null)
            {
                throw new ArgumentNullException("audioQueueBuffer");
            }

            var stamp = new AudioTimeStamp()
            {
                SampleTime = timeStamp,
                Flags      = AudioTimeStamp.AtsFlags.SampleTimeValid
            };

            return(AudioQueueOfflineRender(handle, ref stamp, audioQueueBuffer, frameCount));
        }
Exemplo n.º 11
0
        unsafe private bool ReadFileIntoBuffer(AudioQueueBuffer *buffer, out uint numBytesReadFromFile, out uint numPackets)
        {
            numBytesReadFromFile = 0;
            numPackets           = (uint)soundEffect.packetsToRead;

            if (AudioFileReadPackets(audioFile.Handle, false, ref numBytesReadFromFile,
                                     packetDescriptionArray, currentPacket, ref numPackets,
                                     buffer->AudioData) != 0)
            {
                // An error occured
                queue.Stop(false);
                return(false);
            }

            return(true);
        }
Exemplo n.º 12
0
        // THIS HAPPENS ON ANOTHER THREAD
        unsafe bool HandleOutputBuffer(AudioQueueBuffer *buffer, bool priming)
        {
            uint numBytesReadFromFile;
            uint numPackets;

            if (!ReadFileIntoBuffer(buffer, out numBytesReadFromFile, out numPackets))
            {
                return(false);                // ERROR
            }
            if (loop && numPackets == 0)
            {
                currentPacket = 0;                 // Restart from beginning
                if (!ReadFileIntoBuffer(buffer, out numBytesReadFromFile, out numPackets))
                {
                    return(false);                    // ERROR
                }
            }

            if (numPackets > 0)            // have we recieved data?
            {
                buffer->AudioDataByteSize = numBytesReadFromFile;
                AudioQueueStatus status = AudioQueueEnqueueBuffer(queue.Handle,
                                                                  new IntPtr(buffer), (packetDescriptionArray != IntPtr.Zero ? numPackets : 0),
                                                                  packetDescriptionArray);
                currentPacket += numPackets;
            }
            else
            {
                if (!priming)                // Stop the queue (if priming - queue isn't running anyway)
                {
                    queue.Stop(false);
                }
                return(false);                // No audio remains
            }

            return(true);            // More audio available
        }
Exemplo n.º 13
0
 public static extern unsafe OSStatus AudioQueueEnqueueBuffer(AudioQueue *pQueue, AudioQueueBuffer *pBuffer, int numPacketDescs, AudioStreamPacketDescription *pDescriptors);
Exemplo n.º 14
0
 extern unsafe static AudioQueueStatus AudioQueueEnqueueBuffer(
     IntPtr AQ,
     AudioQueueBuffer *audioQueueBuffer,
     int nPackets,
     AudioStreamPacketDescription [] desc);
Exemplo n.º 15
0
 public unsafe OutputCompletedEventArgs(AudioQueueBuffer *audioQueueBuffer)
 {
     IntPtrBuffer = (IntPtr)audioQueueBuffer;
 }
Exemplo n.º 16
0
 extern unsafe static AudioQueueStatus AudioQueueOfflineRender(IntPtr aq, ref AudioTimeStamp stamp, AudioQueueBuffer *buffer, int frames);
Exemplo n.º 17
0
        public override bool FinishedLaunching(UIApplication app, NSDictionary options)
        {
            //
            // Setup audio system
            //
            var session = AVAudioSession.SharedInstance();

            session.SetCategory(new NSString("AVAudioSessionCategoryPlayback"), AVAudioSessionCategoryOptions.DefaultToSpeaker, out error);
            if (error != null)
            {
                Console.WriteLine(error);
            }
            //
            // Format description, we generate LinearPCM as short integers
            //
            sampleRate = session.SampleRate;
            var format = new AudioStreamBasicDescription
            {
                SampleRate       = sampleRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
                BitsPerChannel   = 16,
                ChannelsPerFrame = 1,
                BytesPerFrame    = 2,
                BytesPerPacket   = 2,
                FramesPerPacket  = 1,
            };

            //
            // Create an output queue
            //
            var queue          = new OutputAudioQueue(format);
            var bufferByteSize = (sampleRate > 16000) ? 2176 : 512; // 40.5 Hz : 31.25 Hz

            //
            // Create three buffers, generate a tone, and output the tones
            //
            var buffers = new AudioQueueBuffer * [numBuffers];

            for (int i = 0; i < numBuffers; i++)
            {
                queue.AllocateBuffer(bufferByteSize, out buffers[i]);
                GenerateTone(buffers[i]);
                queue.EnqueueBuffer(buffers[i], null);
            }

            //
            // Output callback: invoked when the audio system is done with the
            // buffer, this implementation merely recycles it.
            //
            queue.BufferCompleted += (object sender, BufferCompletedEventArgs e) =>
            {
                if (alternate)
                {
                    outputWaveForm += 1;
                    if (outputWaveForm > WaveForm.Square)
                    {
                        outputWaveForm = WaveForm.Sine;
                    }
                    GenerateTone(e.UnsafeBuffer);
                }
                queue.EnqueueBuffer(e.UnsafeBuffer, null);
            };

            queue.Start();
            return(true);
        }
Exemplo n.º 18
0
        unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl)
        {
            AudioStreamBasicDescription dataFormat;
            AudioQueueBuffer *          buffer = null;
            long currentPacket = 0;
            int  packetsToRead = 0;

            AudioStreamPacketDescription[] packetDescs = null;
            bool flushed = false;
            bool done    = false;
            int  bufferSize;

            using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) {
                dataFormat = audioFile.StreamBasicDescription;

                using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) {
                    queue.BufferCompleted += (sender, e) => {
                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
                    };

                    // we need to calculate how many packets we read at a time and how big a buffer we need
                    // we base this on the size of the packets in the file and an approximate duration for each buffer
                    bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;

                    // first check to see what the max size of a packet is - if it is bigger
                    // than our allocation default size, that needs to become larger
                    // adjust buffer size to represent about a second of audio based on this format
                    CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);

                    if (isVBR)
                    {
                        packetDescs = new AudioStreamPacketDescription [packetsToRead];
                    }
                    else
                    {
                        packetDescs = null;                         // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                    }

                    if (audioFile.MagicCookie.Length != 0)
                    {
                        queue.MagicCookie = audioFile.MagicCookie;
                    }

                    // allocate the input read buffer
                    queue.AllocateBuffer(bufferSize, out buffer);

                    // prepare the capture format
                    var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32);
                    captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

                    queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout);

                    // prepare the target format
                    var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame);

                    using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
                        captureFile.ClientDataFormat = captureFormat;

                        int          captureBufferSize = bufferSize / 2;
                        AudioBuffers captureABL        = new AudioBuffers(1);

                        AudioQueueBuffer *captureBuffer;
                        queue.AllocateBuffer(captureBufferSize, out captureBuffer);

                        captureABL [0] = new AudioBuffer()
                        {
                            Data           = captureBuffer->AudioData,
                            NumberChannels = captureFormat.ChannelsPerFrame
                        };

                        queue.Start();

                        double ts = 0;
                        queue.RenderOffline(ts, captureBuffer, 0);

                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);

                        while (true)
                        {
                            int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;

                            queue.RenderOffline(ts, captureBuffer, reqFrames);

                            captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize);
                            var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame;

                            // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes",
                            // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);

                            captureFile.WriteAsync((uint)writeFrames, captureABL);

                            if (flushed)
                            {
                                break;
                            }

                            ts += writeFrames;
                        }

                        CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false);
                    }
                }
            }
        }
Exemplo n.º 19
0
 public static extern unsafe OSStatus AudioQueueFreeBuffer(AudioQueue *pQueue, AudioQueueBuffer *pBuffer);
Exemplo n.º 20
0
        static unsafe void ReadBufferInternal(void *pUserData, AudioQueue *pQueue, AudioQueueBuffer *pBuffer)
        {
            AudioStream *pThis = (AudioStream *)pUserData;

            if (pThis == null)
            {
                Console.WriteLine("ReadBufferProc: pThis is null");
            }

            if (!pThis->IsRunning)
            {
                return;
            }

            if (pQueue == null)
            {
                Console.WriteLine("ReadBufferProc: pQueue is null");
            }

            if (pBuffer == null)
            {
                Console.WriteLine("ReadBufferProc: pBuffer is null");
            }

            if (pBuffer->AudioData == null)
            {
                Console.WriteLine("ReadBufferProc: pBuffer->AudioData is null");
            }

            if (pBuffer->PacketDescriptors == null)
            {
                Console.WriteLine("ReadBufferProc: pBuffer->PacketDescriptors is null");
            }

            if (pThis->AudioFile == null)
            {
                Console.WriteLine("ReadBufferProc: pThis->AudioFile is null");
            }

            int numPacketsReadFromFile = pThis->NumPacketsToRead;
            int numBytesReadFromFile   = 0;

            OSStatus status = API.AudioFileReadPackets(pThis->AudioFile, 0, &numBytesReadFromFile, pBuffer->PacketDescriptors, pThis->CurrentPacket, &numPacketsReadFromFile, pBuffer->AudioData);

            API.CheckStatus(status);

            if (status == 0 &&
                numPacketsReadFromFile == 0 &&
                pThis->Looping)
            {
                // we ran out of packets and they are
                // asking to loop, so try and reset
                pThis->CurrentPacket   = 0;
                numPacketsReadFromFile = pThis->NumPacketsToRead;
                numBytesReadFromFile   = 0;
                status = API.AudioFileReadPackets(pThis->AudioFile, 0, &numBytesReadFromFile, pBuffer->PacketDescriptors, pThis->CurrentPacket, &numPacketsReadFromFile, pBuffer->AudioData);
                API.CheckStatus(status);
            }

            if (numPacketsReadFromFile > 0)
            {
                pBuffer->AudioDataByteSize     = numBytesReadFromFile;
                pBuffer->PacketDescriptorCount = numPacketsReadFromFile;

                status = API.AudioQueueEnqueueBuffer(pThis->Queue, pBuffer, (pBuffer->PacketDescriptors != null ? pBuffer->PacketDescriptorCount : 0), pBuffer->PacketDescriptors);
                API.CheckStatus(status);

                pThis->CurrentPacket += numPacketsReadFromFile;
            }
            else
            {
                status = API.AudioQueueStop(pThis->Queue, 0);
                API.CheckStatus(status);
            }
        }