public void Setup()
        {
            if (soundEffect.IsDisposed)
            {
                throw new ObjectDisposedException(soundEffect.ToString());
            }

            buffers = new IntPtr[numberOfBuffers];

            queue = new OutputAudioQueue(soundEffect.description);
            queue.OutputCompleted   += new EventHandler <OutputCompletedEventArgs>(HandleOutputBuffer);
            IsRunningChangedCallback = new AudioQueue.AudioQueuePropertyChanged(IsRunningChanged);
            queue.AddListener(AudioQueueProperty.IsRunning, IsRunningChangedCallback);

            // Set hardware mode
            unsafe
            {
                const AudioQueueProperty HardwareCodecPolicy = (AudioQueueProperty)1634820976;                 // 'aqcp'
                const uint PreferSoftware = 3, PreferHardware = 4;
                uint       policy = hardware ? PreferHardware : PreferSoftware;
                queue.SetProperty(HardwareCodecPolicy, Marshal.SizeOf(typeof(uint)), new IntPtr(&policy));
            }

            AllocatePacketDescriptionsArray();
            queue.MagicCookie = audioFile.MagicCookie;
            AllocateBuffers();
            queue.Volume = 1;
            PrimeBuffers();
        }
        void prepareAudioQueue()
        {
            _audioFile = AudioFile.Open(_url, AudioFilePermission.Read, AudioFileType.AIFF);

            // Getting AudioStreamBasicDescription
            var audioFormat = _audioFile.StreamBasicDescription;

            // Creating an audio output queue object instance            
            _audioQueue = new OutputAudioQueue(audioFormat);
            _audioQueue.OutputCompleted += new EventHandler<OutputCompletedEventArgs>(_audioQueue_OutputCompleted);

            // Getting packet size
            int maxPacketSize = _audioFile.MaximumPacketSize;
            _startingPacketCount = 0;

            _numPacketsToRead = 1024;
            _bufferByteSize = _numPacketsToRead * maxPacketSize;

            // enqueue buffers
            IntPtr bufferPtr;
            for (int index = 0; index < 3; index++)
            {
                _audioQueue.AllocateBuffer(_bufferByteSize, out bufferPtr);
                outputCallback(bufferPtr);
            }
            _isPrepared = true;
        }
        void prepareAudioQueue()
        {
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate       = _samplingRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.IsPacked,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,  // monoral
                BitsPerChannel   = 16, // 16-bit
                BytesPerPacket   = 2,
                BytesPerFrame    = 2,
                Reserved         = 0
            };

            _audioQueue = new OutputAudioQueue(audioFormat);
            _audioQueue.OutputCompleted += new EventHandler <OutputCompletedEventArgs>(_audioQueue_OutputCompleted);

            _tmpBuf = new short[_numPacketsToRead];
            //_numPacketsToRead  = 256;
            int    bufferByteSize = _numPacketsToRead * audioFormat.BytesPerPacket;
            IntPtr bufferPtr;

            for (int index = 0; index < 3; index++)
            {
                _audioQueue.AllocateBuffer(bufferByteSize, out bufferPtr);
                outputCallback(bufferPtr);
            }
            _isPrepared = true;
        }
        void outputCallback(IntPtr bufPtr)
        {
            // reading packets
            var buffer = new byte[_bufferByteSize];
            var descs  = _audioFile.ReadPacketData(_startingPacketCount, _numPacketsToRead, buffer);

            if (_startingPacketCount < _audioFile.DataPacketCount)
            {
                unsafe
                {
                    fixed(byte *ptr = buffer)
                    {
                        OutputAudioQueue.FillAudioData(bufPtr, 0, new IntPtr((void *)ptr), 0, _bufferByteSize);
                    }
                }

                _startingPacketCount += _numPacketsToRead;
                // enqueue a buffer
                _audioQueue.EnqueueBuffer(bufPtr, _bufferByteSize, null);
            }
            else
            {
                if (_isPlaying)
                {
                    Stop();
                }
            }
        }
 void OnPropertyFound(object sender, PropertyFoundEventArgs e)
 {
     if (e.Property == AudioFileStreamProperty.ReadyToProducePackets) {
         outputQueue = new OutputAudioQueue (audioFileStream.StreamBasicDescription);
         outputQueue.OutputCompleted += OnOutputQueueOutputCompleted;
     }
 }
        /// <summary>
        /// When a AudioProperty in the fed packets is found this callback is called
        /// </summary>
        private void AudioPropertyFound(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.ReadyToProducePackets)
            {
                Started = false;

                if (OutputQueue != null)
                {
                    OutputQueue.Dispose();
                }

                OutputQueue = new OutputAudioQueue(_audioFileStream.StreamBasicDescription);
                OutputReady?.Invoke(OutputQueue);

                _currentByteCount            = 0;
                OutputQueue.BufferCompleted += HandleBufferCompleted;
                _outputBuffers = new List <AudioBuffer>();

                for (int i = 0; i < MaxBufferCount; i++)
                {
                    OutputQueue.AllocateBuffer(BufferSize, out IntPtr outBuffer);
                    _outputBuffers.Add(new AudioBuffer()
                    {
                        Buffer             = outBuffer,
                        PacketDescriptions = new List <AudioStreamPacketDescription>()
                    });
                }

                _currentBuffer = _outputBuffers.First();

                OutputQueue.MagicCookie = _audioFileStream.MagicCookie;
            }
        }
Example #7
0
        /// <summary>
        /// Stops the currently playing audio
        /// </summary>
        public void StopPlaying()
        {
            //This lock is used to ensure that starting and stopping of songs do not happen at the same time.
            lock (startStopSyncObject)
            {
                if (!IsPlaying)
                {
                    throw new InvalidOperationException("Audio is not playing");
                }
#if __ANDROID__
                //We use pause instead of stop because pause stops playing immediately
                playingTrack.Pause();

                //Lock track disposal so the track is never in a state where it is disposed/released but not null
                lock (trackDisposedOfSyncObject)
                {
                    playingTrack.Release();
                    playingTrack.Dispose();
                    playingTrack = null;
                }
#endif
#if __IOS__
                //Pass true to stop immediately
                audioQueue.Stop(true);

                //Lock track disposal so the track is never in a state where it is disposed but not null
                lock (trackDisposedOfSyncObject)
                {
                    audioQueue.Dispose();
                    audioQueue = null;
                }
#endif
            }
        }
        /// <summary>
        /// When a AudioProperty in the fed packets is found this callback is called
        /// </summary>
        void AudioPropertyFound(object sender, PropertyFoundEventArgs args)
        {
            switch (args.Property)
            {
            case AudioFileStreamProperty.ReadyToProducePackets:
                Started = false;


                if (OutputQueue != null)
                {
                    OutputQueue.Dispose();
                }

                OutputQueue                  = new OutputAudioQueue(fileStream.StreamBasicDescription);
                currentByteCount             = 0;
                OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;
                outputBuffers                = new List <AudioBuffer>();

                for (int i = 0; i < MaxBufferCount; i++)
                {
                    IntPtr outBuffer;
                    OutputQueue.AllocateBuffer(BufferSize, out outBuffer);
                    outputBuffers.Add(new AudioBuffer()
                    {
                        Buffer = outBuffer, PacketDescriptions = new List <AudioStreamPacketDescription>()
                    });
                }

                currentBuffer = outputBuffers.First();

                OutputQueue.MagicCookie = fileStream.MagicCookie;
                break;
            }
        }
 void prepareAudioQueue()
 {            
     AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
     {
         SampleRate = _samplingRate,
         Format = AudioFormatType.LinearPCM,
         FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.IsPacked,
         FramesPerPacket = 1,
         ChannelsPerFrame = 1, // monoral
         BitsPerChannel = 16, // 16-bit
         BytesPerPacket = 2,
         BytesPerFrame = 2,
         Reserved = 0
     };
     _audioQueue = new OutputAudioQueue( audioFormat );
     _audioQueue.OutputCompleted += new EventHandler<OutputCompletedEventArgs>(_audioQueue_OutputCompleted);
     
     _tmpBuf = new short[_numPacketsToRead];
     //_numPacketsToRead  = 256;
     int bufferByteSize = _numPacketsToRead * audioFormat.BytesPerPacket;
     IntPtr bufferPtr;
     for (int index = 0; index < 3; index++)
     {
         _audioQueue.AllocateBuffer(bufferByteSize, out bufferPtr);
         outputCallback(bufferPtr);
     }
     _isPrepared = true;            
 }
Example #10
0
        /// <summary>
        /// Cleaning up all the native Resource
        /// </summary>
        protected virtual void Dispose(bool disposing)
        {
            if (disposing)
            {
                if (OutputQueue != null)
                {
                    OutputQueue.Stop(true);
                }

                if (outputBuffers != null)
                {
                    foreach (var b in outputBuffers)
                    {
                        OutputQueue.FreeBuffer(b.Buffer);
                    }

                    outputBuffers.Clear();
                    outputBuffers = null;
                }

                if (fileStream != null)
                {
                    fileStream.Close();
                    fileStream = null;
                }

                if (OutputQueue != null)
                {
                    OutputQueue.Dispose();
                    OutputQueue = null;
                }
            }
        }
Example #11
0
		public void Setup()
		{
			if(soundEffect.IsDisposed)
				throw new ObjectDisposedException(soundEffect.ToString());

			buffers = new IntPtr[numberOfBuffers];

			queue = new OutputAudioQueue(soundEffect.description);
			queue.OutputCompleted += new EventHandler<OutputCompletedEventArgs>(HandleOutputBuffer);
			IsRunningChangedCallback = new AudioQueue.AudioQueuePropertyChanged(IsRunningChanged);
			queue.AddListener(AudioQueueProperty.IsRunning, IsRunningChangedCallback);

			// Set hardware mode
			unsafe
			{
				const AudioQueueProperty HardwareCodecPolicy = (AudioQueueProperty)1634820976; // 'aqcp'
				const uint PreferSoftware = 3, PreferHardware = 4;
				uint policy = hardware ? PreferHardware : PreferSoftware;
				queue.SetProperty(HardwareCodecPolicy, Marshal.SizeOf(typeof(uint)), new IntPtr(&policy));
			}

			AllocatePacketDescriptionsArray();
			queue.MagicCookie = audioFile.MagicCookie;
			AllocateBuffers();
			queue.Volume = 1;
			PrimeBuffers();
		}
        void prepareAudioQueue()
        {
            _audioFile = AudioFile.Open(_url, AudioFilePermission.Read, AudioFileType.AIFF);

            // Getting AudioStreamBasicDescription
            var audioFormat = _audioFile.StreamBasicDescription;

            // Creating an audio output queue object instance
            _audioQueue = new OutputAudioQueue(audioFormat);
            _audioQueue.OutputCompleted += new EventHandler <OutputCompletedEventArgs>(_audioQueue_OutputCompleted);

            // Getting packet size
            int maxPacketSize = _audioFile.MaximumPacketSize;

            _startingPacketCount = 0;

            _numPacketsToRead = 1024;
            _bufferByteSize   = _numPacketsToRead * maxPacketSize;

            // enqueue buffers
            IntPtr bufferPtr;

            for (int index = 0; index < 3; index++)
            {
                _audioQueue.AllocateBuffer(_bufferByteSize, out bufferPtr);
                outputCallback(bufferPtr);
            }
            _isPrepared = true;
        }
        /// <summary>
        /// Plays a single note. Separate from the rest of the song playing code
        /// </summary>
        public static void PlayNote(Instrument.Note note)
        {
            lock (syncObj)
            {
#if __ANDROID__
                if (playingTrack != null)
                {
                    //We use pause instead of stop because pause stops playing immediately
                    playingTrack.Pause();
                    playingTrack.Release();
                    playingTrack.Dispose();
                }
#endif
#if __IOS__
                if (audioQueue != null)
                {
                    //Pass true to stop immediately
                    audioQueue.Stop(true);
                    audioQueue.Dispose();
                }
#endif

#if __ANDROID__
                playingTrack = new AudioTrack(
                    // Stream type
                    Android.Media.Stream.Music,
                    // Frequency
                    SongPlayer.PLAYBACK_RATE,
                    // Mono or stereo
                    ChannelOut.Mono,
                    // Audio encoding
                    Android.Media.Encoding.Pcm16bit,
                    // Length of the audio clip in bytes
                    (note.data.Length * 2),
                    // Mode. Stream or static.
                    AudioTrackMode.Static);

                playingTrack.Write(note.data, 0, note.data.Length);
                playingTrack.Play();
#endif
#if __IOS__
                audioQueue = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM(SongPlayer.PLAYBACK_RATE, 1, 16, false));
                unsafe
                {
                    AudioQueueBuffer *buffer;
                    audioQueue.AllocateBuffer(note.data.Length * 2, out buffer);

                    fixed(short *beatData = note.data)
                    {
                        buffer->CopyToAudioData((IntPtr)beatData, note.data.Length * 2);
                    }

                    audioQueue.EnqueueBuffer((IntPtr)buffer, note.data.Length * 2, null);
                }

                audioQueue.Start();
#endif
            }
        }
Example #14
0
 void OnPropertyFound(object sender, PropertyFoundEventArgs e)
 {
     if (e.Property == AudioFileStreamProperty.ReadyToProducePackets)
     {
         outputQueue = new OutputAudioQueue(audioFileStream.StreamBasicDescription);
         outputQueue.OutputCompleted += OnOutputQueueOutputCompleted;
     }
 }
Example #15
0
        public override bool FinishedLaunching(UIApplication application, NSDictionary launchOptions)
        {
            // create a new window instance based on the screen size
            Window = new UIWindow(UIScreen.MainScreen.Bounds);
            UIViewController controller = new UIViewController();

            controller.View.BackgroundColor = UIColor.DarkGray;
            controller.Title = "Xamarin iOS Debugger";

            UINavigationController navController = new UINavigationController(controller);

            Window.RootViewController = navController;

            // make the window visible
            Window.MakeKeyAndVisible();

            //var documents = Environment.GetFolderPath(Environment.SpecialFolder.MyDocuments);
            //var filename = Path.Combine(documents, "Write.txt");
            //File.WriteAllText(filename, "Write this text into a file!");

            OutputAudioQueue audioQueue = new OutputAudioQueue(new AudioStreamBasicDescription()
            {
                Format           = AudioFormatType.MPEG4AAC_HE,
                BytesPerPacket   = 0,
                BitsPerChannel   = 0,
                Reserved         = 0,
                FormatFlags      = 0,
                BytesPerFrame    = 0, //Set this field to 0 for compressed formats.
                SampleRate       = 16000,
                ChannelsPerFrame = 1,
                FramesPerPacket  = 1024 //for AAC.
            });
            const int BufferCountMax  = 1000;
            const int AudioBufferSize = 1024 * 8;

            for (int i = 0; i < BufferCountMax; i++)
            {
                AudioQueueStatus aqs = audioQueue.AllocateBufferWithPacketDescriptors(
                    AudioBufferSize,
                    1,
                    out IntPtr ipBuffer
                    );

                if (aqs == AudioQueueStatus.Ok)
                {
                    //_queueAudioOutputBuffers.Enqueue(ipBuffer);
                    //_qFreeAudioOutputBuffers.Enqueue(ipBuffer);
                    byte[] abData = new byte[512];
                    Marshal.Copy(abData, 0, ipBuffer, abData.Length);
                }
                else
                {
                    Debug.WriteLine("AudioQueueStatus: " + aqs);
                }
            }

            return(true);
        }
Example #16
0
        public void ChannelAssignments()
        {
            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(
                                new AudioQueueChannelAssignment("11", 0),
                                new AudioQueueChannelAssignment("22", 1)
                                ));
        }
Example #17
0
		public override bool FinishedLaunching(UIApplication app, NSDictionary options)
		{
			//
			// Setup audio system
			//
			AudioSession.Initialize ();
			AudioSession.Category = AudioSessionCategory.MediaPlayback;


			// 
			// Format description, we generate LinearPCM as short integers
			//
			sampleRate = AudioSession.CurrentHardwareSampleRate;
			var format = new AudioStreamBasicDescription () {
				SampleRate = sampleRate,
				Format = AudioFormatType.LinearPCM,
				FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
				BitsPerChannel = 16,
				ChannelsPerFrame = 1,
				BytesPerFrame = 2,
				BytesPerPacket = 2, 
				FramesPerPacket = 1,
			};

			// 
			// Create an output queue
			//
			var queue = new OutputAudioQueue (format);
			var bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz 

			// 
			// Create three buffers, generate a tone, and output the tones
			//
			var buffers = new AudioQueueBuffer* [numBuffers];
			for (int i = 0; i < numBuffers; i++){
				queue.AllocateBuffer (bufferByteSize, out buffers [i]);
				GenerateTone (buffers [i]);
				queue.EnqueueBuffer (buffers [i], null);
			}

			//
			// Output callback: invoked when the audio system is done with the
			// buffer, this implementation merely recycles it.
			//
			queue.OutputCompleted += (object sender, OutputCompletedEventArgs e) => {
				if (alternate){
					outputWaveForm +=1;
					if (outputWaveForm > WaveForm.Square)
						outputWaveForm = WaveForm.Sine;
					GenerateTone (e.UnsafeBuffer);
				}
				queue.EnqueueBuffer (e.UnsafeBuffer, null);
			};

			queue.Start ();
			return true;
		}
Example #18
0
        /// <summary>
        /// Start the platform-specific audio object and give it some initial data (beat0 and beat1)
        /// </summary>
        private void StartStreamingAudio(short[] beat0, short[] beat1)
        {
#if __ANDROID__
            playingTrack = new AudioTrack(
                // Stream type
                Android.Media.Stream.Music,
                // Frequency
                PLAYBACK_RATE,
                // Mono or stereo
                ChannelOut.Mono,
                // Audio encoding
                Android.Media.Encoding.Pcm16bit,
                // Length of the audio clip  in bytes
                (samplesPerBeat * 2) * 2, //Multiply by 2 because we want two beats to fit in the playingTrack's memory
                // Mode. Stream or static.
                AudioTrackMode.Stream);

            //Set up notifications at the end of beats
            playingTrack.PeriodicNotification += OnStreamingAudioPeriodicNotification;
            playingTrack.SetPositionNotificationPeriod(samplesPerBeat);

            //Write the initial data and begin playing
            playingTrack.Write(beat0, 0, beat0.Length);
            playingTrack.Write(beat1, 0, beat1.Length);
            playingTrack.Play();
#endif
#if __IOS__
            audioQueue = new OutputAudioQueue(streamDesc);
            unsafe
            {
                //Allocate two buffers to store audio data
                AudioQueueBuffer *buffer0;
                AudioQueueBuffer *buffer1;
                audioQueue.AllocateBuffer(beat0.Length * 2, out buffer0);
                audioQueue.AllocateBuffer(beat1.Length * 2, out buffer1);

                //Copy initial audio data to the buffers
                fixed(short *beatData0 = beat0)
                {
                    buffer0->CopyToAudioData((IntPtr)beatData0, beat0.Length * 2);
                }

                fixed(short *beatData1 = beat1)
                {
                    buffer1->CopyToAudioData((IntPtr)beatData1, beat1.Length * 2);
                }

                //Add the buffers to the queue
                audioQueue.EnqueueBuffer((IntPtr)buffer0, beat0.Length * 2, null);
                audioQueue.EnqueueBuffer((IntPtr)buffer1, beat1.Length * 2, null);
            }

            //Set up periodic notifications
            audioQueue.BufferCompleted += OnStreamingAudioPeriodicNotification;
            audioQueue.Start();
#endif
        }
Example #19
0
 // event handler - never executed
 void OnPropertyFound(object sender, PropertyFoundEventArgs e)
 {
     if (e.Property == AudioFileStreamProperty.ReadyToProducePackets)
     {
         oaq = new OutputAudioQueue(afs.StreamBasicDescription);
         oaq.BufferCompleted += OnBufferCompleted;
         OutputReady(oaq);
     }
 }
Example #20
0
 static void IOSAppendStreamingAudio(short[] data, OutputAudioQueue audioQueue)
 {
     unsafe
     {
         fixed(short *p = data)
         {
             audioQueue.EnqueueBuffer((IntPtr)p, data.Length * 2, null);
         }
     }
 }
        public void stop(bool shouldStopImmediate)
        {
            if (null == _audioQueue)
            {
                return;
            }

            _audioQueue.Stop(shouldStopImmediate);
            _audioQueue.Dispose();
            _audioQueue = null;
            _isPrepared = false;
        }
Example #22
0
        protected virtual void Dispose(bool disposing)
        {
            if (disposing)
            {
                UnregisterRestartable();
                queue.RemoveListener(AudioQueueProperty.IsRunning, IsRunningChangedCallback);
                queue.Dispose();                 // this will dispose of the buffers allocated
                queue = null;
            }

            // Dispose of unmanaged resources...
            Marshal.FreeHGlobal(packetDescriptionArray);
        }
Example #23
0
        public void ChannelAssignments()
        {
            if (!TestRuntime.CheckSystemAndSDKVersion(6, 0))
            {
                Assert.Inconclusive("Requires iOS 6");
            }

            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(
                                new AudioQueueChannelAssignment("11", 0),
                                new AudioQueueChannelAssignment("22", 1)
                                ));
        }
Example #24
0
 public void Stop()
 {
     if (OutputQueue == null)
     {
         return;
     }
     OutputQueue.Stop(true);
     OutputQueue.Reset();
     foreach (var buf in Buffers)
     {
         OutputQueue.FreeBuffer(buf.Data);
     }
     Buffers = null;
     Marshal.FreeHGlobal(RawBuffer);
     OutputQueue.Dispose();
     OutputQueue = null;
 }
 public CoreAudioController()
 {
     var ad = new AudioStreamBasicDescription () {
         SampleRate = 44100.0,
         Format = AudioFormatType.LinearPCM,
         FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.IsPacked,
         FramesPerPacket = 1,
         ChannelsPerFrame = 1,
         BitsPerChannel = 16,
         BytesPerPacket = 2,
         BytesPerFrame = 2,
         Reserved = 0};
     audioq = new OutputAudioQueue (ad);
     audioq.OutputCompleted += delegate(object sender, OutputCompletedEventArgs e) {
         EnqueueNextBuffer (e.IntPtrBuffer);
     };
     audioq.AllocateBuffer (synth_buffer.Length * ad.BytesPerPacket, out audio_buffer);
 }
Example #26
0
        public void ResetOutputQueue()
        {
            if (OutputQueue != null)
            {
                OutputQueue.Stop(true);
                OutputQueue.Reset();

                foreach (AudioBuffer buf in outputBuffers)
                {
                    buf.PacketDescriptions.Clear();
                    OutputQueue.FreeBuffer(buf.Buffer);
                }

                outputBuffers = null;
                OutputQueue.Dispose();
                OutputQueue = null;
            }
        }
        void outputCallback(IntPtr bufPtr)
        {
            int    numPackets = _numPacketsToRead;
            int    numBytes   = _numPacketsToRead * 2;
            double freq       = 440 * 2.0 * Math.PI / _samplingRate;

            for (int i = 0; i < _tmpBuf.Length; i++)
            {
                double wave   = Math.Sin(_phase);
                short  sample = (short)(wave * 32767); // 16-bit integer
                _tmpBuf[i] = sample;
                _phase    += freq;
            }

            unsafe
            {
                fixed(short *ptr = _tmpBuf)
                {
                    OutputAudioQueue.FillAudioData(bufPtr, 0, new IntPtr((void *)ptr), 0, numBytes);
                }
            }
            _audioQueue.EnqueueBuffer(bufPtr, numBytes, null);

            /* original
             * var buffer = (AudioQueueBuffer)Marshal.PtrToStructure(bufPtr, typeof(AudioQueueBuffer));
             * int numPackets = _numPacketsToRead;
             * int numBytes = _numPacketsToRead * 2;
             * double freq = 440 * 2.0 * Math.PI / 44100.0;
             * unsafe {
             *  short *output = (short * )buffer.AudioData.ToPointer();
             *  double phase = _phase;
             *  for (int i = 0; i < numPackets; i++)
             *  {
             *      double wave = Math.Sin(phase);
             *      short sample = (short)(wave * 32767); // 16-bit integer
             * output++ = sample;
             *      phase = phase + freq;
             *  }
             *  _phase = phase;
             * }
             * buffer.AudioDataByteSize = (uint)numBytes;
             * _audioQueue.EnqueueBuffer(bufPtr, numBytes, null);
             */
        }
Example #28
0
        void Init(int samplingRate)
        {
            var interleaved = false;
            var desc        = new AudioStreamBasicDescription()
            {
                SampleRate       = samplingRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
                BitsPerChannel   = 16,
                ChannelsPerFrame = channels,
                BytesPerFrame    = 2 * (interleaved ? channels : 1),
                BytesPerPacket   = 2,
                FramesPerPacket  = 1,
            };

            Descriptions[0].StartOffset  = 0;
            Descriptions[0].DataByteSize = BufferSizeInByte;

            OutputQueue = new OutputAudioQueue(desc);
            if (OutputQueue == null)
            {
                return;
            }

            // AudioQueue.FillAudioData write "raw samples" into AudioQueueBuffer
            RawBuffer = Marshal.AllocHGlobal(BufferSizeInByte);

            Buffers = new List <AudioBuffer>();
            for (int i = 0; i < MaxBufferCount; i++)
            {
                IntPtr outBuffer;
                OutputQueue.AllocateBuffer(BufferSizeInByte, out outBuffer);
                Buffers.Add(new AudioBuffer()
                {
                    Data = outBuffer
                });
            }

            CurBuffIndex = 0;
            BufReadyNum  = 0;

            OutputQueue.BufferCompleted += OutputQueue_BufferCompleted;
        }
Example #29
0
        protected override void _Start()
        {
            AudioStreamBasicDescription format = new AudioStreamBasicDescription
            {
                SampleRate       = m_SampleFormat.Rate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
                BitsPerChannel   = m_SampleFormat.Bits,
                ChannelsPerFrame = m_SampleFormat.Channels,
                BytesPerFrame    = m_SampleFormat.FrameSize,
                BytesPerPacket   = m_SampleFormat.FrameSize,
                FramesPerPacket  = 1
            };

            m_AudioQueue = new OutputAudioQueue(format);
            m_AudioQueueTimeProvider.Init(m_AudioQueue, m_SampleFormat);
            _OnSettingsUpdated();
            int bufferSize = m_BufferFrameCount * m_SampleFormat.FrameSize;

            _UnsafeStart(bufferSize);
            m_AudioQueue.Start();
        }
Example #30
0
        public unsafe Task PlayOnce(System.IO.Stream stream)
        {
            return(Task.Run(() =>
            {
                try
                {
                    int sampleRate = 16000;
                    uint channels = 1;
                    uint bitsPerSample = 16;

                    if (_queue != null)
                    {
                        _queue.BufferCompleted -= Queue_BufferCompleted;
                        _queue.Stop(true);
                    }

                    var format = AudioStreamBasicDescription.CreateLinearPCM(sampleRate, channels, bitsPerSample);
                    _queue = new OutputAudioQueue(format);
                    _queue.BufferCompleted += Queue_BufferCompleted;
                    _queue.Volume = 1;

                    var buffer1 = new byte[stream.Length];
                    stream.Read(buffer1, 0, buffer1.Length);
                    _queue.AllocateBuffer(buffer1.Length, out AudioQueueBuffer * buffer);

                    GCHandle pinned = GCHandle.Alloc(buffer1, GCHandleType.Pinned);
                    IntPtr address = pinned.AddrOfPinnedObject();
                    buffer->CopyToAudioData(address, buffer1.Length);
                    buffer->AudioDataByteSize = (uint)buffer1.Length;

                    _queue.EnqueueBuffer(buffer, null);
                    _queue.Start();
                }
                catch (Exception ex)
                {
                }
            }));
        }
Example #31
0
        public void ChannelAssignments()
        {
            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            var route   = global::AVFoundation.AVAudioSession.SharedInstance().CurrentRoute;
            var outputs = route.Outputs;

            if (outputs.Length > 0)
            {
                var port        = outputs [0];
                var assignments = new List <AudioQueueChannelAssignment> ();
                var id          = port.UID;
                for (int i = 0; i < aq.AudioStreamDescription.ChannelsPerFrame; i++)
                {
                    assignments.Add(new AudioQueueChannelAssignment(id, (uint)i));
                }
                Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(assignments.ToArray()));
            }
            else
            {
                Assert.Ignore("No outputs in the current route ({0})", route.Description);
            }
        }
        /// <summary>
        /// Cleaning up all the native Resource
        /// </summary>
        protected virtual void Dispose(bool disposing)
        {
            if (disposing)
            {
                if (OutputQueue != null)
                {
                    OutputQueue?.Stop(true);
                }

                if (_outputBuffers != null)
                {
                    foreach (var b in _outputBuffers)
                    {
                        if (b != null)
                        {
                            OutputQueue?.FreeBuffer(b.Buffer);
                        }
                    }

                    _outputBuffers.Clear();
                    _outputBuffers = null;
                }

                if (_audioFileStream != null)
                {
                    _audioFileStream.Close();
                    _audioFileStream = null;
                }

                if (OutputQueue != null)
                {
                    OutputQueue?.Dispose();
                    OutputQueue = null;
                }
            }
        }
Example #33
0
        unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl)
        {
            AudioStreamBasicDescription dataFormat;
            AudioQueueBuffer *          buffer = null;
            long currentPacket = 0;
            int  packetsToRead = 0;

            AudioStreamPacketDescription[] packetDescs = null;
            bool flushed = false;
            bool done    = false;
            int  bufferSize;

            using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) {
                dataFormat = audioFile.StreamBasicDescription;

                using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) {
                    queue.BufferCompleted += (sender, e) => {
                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
                    };

                    // we need to calculate how many packets we read at a time and how big a buffer we need
                    // we base this on the size of the packets in the file and an approximate duration for each buffer
                    bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;

                    // first check to see what the max size of a packet is - if it is bigger
                    // than our allocation default size, that needs to become larger
                    // adjust buffer size to represent about a second of audio based on this format
                    CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);

                    if (isVBR)
                    {
                        packetDescs = new AudioStreamPacketDescription [packetsToRead];
                    }
                    else
                    {
                        packetDescs = null;                         // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                    }

                    if (audioFile.MagicCookie.Length != 0)
                    {
                        queue.MagicCookie = audioFile.MagicCookie;
                    }

                    // allocate the input read buffer
                    queue.AllocateBuffer(bufferSize, out buffer);

                    // prepare the capture format
                    var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32);
                    captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

                    queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout);

                    // prepare the target format
                    var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame);

                    using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
                        captureFile.ClientDataFormat = captureFormat;

                        int          captureBufferSize = bufferSize / 2;
                        AudioBuffers captureABL        = new AudioBuffers(1);

                        AudioQueueBuffer *captureBuffer;
                        queue.AllocateBuffer(captureBufferSize, out captureBuffer);

                        captureABL [0] = new AudioBuffer()
                        {
                            Data           = captureBuffer->AudioData,
                            NumberChannels = captureFormat.ChannelsPerFrame
                        };

                        queue.Start();

                        double ts = 0;
                        queue.RenderOffline(ts, captureBuffer, 0);

                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);

                        while (true)
                        {
                            int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;

                            queue.RenderOffline(ts, captureBuffer, reqFrames);

                            captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize);
                            var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame;

                            // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes",
                            // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);

                            captureFile.WriteAsync((uint)writeFrames, captureABL);

                            if (flushed)
                            {
                                break;
                            }

                            ts += writeFrames;
                        }

                        CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false);
                    }
                }
            }
        }
        protected virtual void Dispose(bool disposing)
        {
            // Release unmanaged buffers, flush output, close files.
            if (disposing){
                if (outputBuffers != null)
                    foreach (var b in outputBuffers)
                        OutputQueue.FreeBuffer (b);

                if (fileStream != null){
                    fileStream.Close ();
                    fileStream = null;
                }
                if (OutputQueue != null){
                    OutputQueue.Dispose ();
                    OutputQueue = null;
                }
            }
        }
        void AudioPropertyFound(object sender, PropertyFoundEventArgs args)
        {
            switch (args.Property){
                //
                // Enough data has been read that we can start producing output
                //
            case AudioFileStreamProperty.ReadyToProducePackets:
                bytesFilled = 0;
                fillBufferIndex = 0;
                packetsFilled = 0;
                started = false;
                OutputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
                OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;

                outputBuffers = new IntPtr [4];
                inuse = new bool [4];

                // Allocate audio queue buffers
                for (int i = 0; i < outputBuffers.Length; i++)
                    OutputQueue.AllocateBuffer (bufferSize, out outputBuffers [i]);

                OutputQueue.MagicCookie = fileStream.MagicCookie;
                OutputQueue.AddListener (AudioQueueProperty.IsRunning, delegate (AudioQueueProperty p) {
                    var h = Finished;
                    if (h != null)
                        h (this, EventArgs.Empty);
                });

                break;
            }

            Console.WriteLine ("Property: {0}", args);
        }
		unsafe static void RenderAudio (CFUrl sourceUrl, CFUrl destinationUrl)
		{
			AudioStreamBasicDescription dataFormat;
			AudioQueueBuffer *buffer = null;
			long currentPacket = 0;
			int packetsToRead = 0;
			AudioStreamPacketDescription [] packetDescs = null;
			bool flushed = false;
			bool done = false;
			int bufferSize;
			
			using (var audioFile = AudioFile.Open (sourceUrl, AudioFilePermission.Read, (AudioFileType) 0)) {
				dataFormat = audioFile.StreamBasicDescription;
				
				using (var queue = new OutputAudioQueue (dataFormat, CFRunLoop.Current, CFRunLoop.CFRunLoopCommonModes)) {
					queue.OutputCompleted += (sender, e) => 
					{
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
					};
					
					// we need to calculate how many packets we read at a time and how big a buffer we need
					// we base this on the size of the packets in the file and an approximate duration for each buffer
					bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;
					
					// first check to see what the max size of a packet is - if it is bigger
					// than our allocation default size, that needs to become larger
					// adjust buffer size to represent about a second of audio based on this format 
					CalculateBytesForTime (dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);
				
					if (isVBR) {
						packetDescs = new AudioStreamPacketDescription [packetsToRead];
					} else {
						packetDescs = null; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
					}
				
					if (audioFile.MagicCookie.Length != 0)
						queue.MagicCookie = audioFile.MagicCookie;
		
					// allocate the input read buffer
					queue.AllocateBuffer (bufferSize, out buffer);
					
					// prepare the capture format
					var captureFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame, 32);
					captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

					queue.SetOfflineRenderFormat (captureFormat, audioFile.ChannelLayout);
					
					// prepare the target format
					var dstFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame);

					using (var captureFile = ExtAudioFile.CreateWithUrl (destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
						captureFile.ClientDataFormat = captureFormat;
						
						int captureBufferSize = bufferSize / 2;
						AudioBuffers captureABL = new AudioBuffers (1);
						
						AudioQueueBuffer *captureBuffer;
						queue.AllocateBuffer (captureBufferSize, out captureBuffer);
						
						captureABL[0] = new AudioBuffer () {
							Data = captureBuffer->AudioData,
							NumberChannels = captureFormat.ChannelsPerFrame
						};

						queue.Start ();

						double ts = 0;
						queue.RenderOffline (ts, captureBuffer, 0);
						
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
						
						while (true) {
							int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;
							
							queue.RenderOffline (ts, captureBuffer, reqFrames);

							captureABL.SetData (0, captureBuffer->AudioData, (int) captureBuffer->AudioDataByteSize);
							var writeFrames = captureABL[0].DataByteSize / captureFormat.BytesPerFrame;
							
							// Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes", 
							//	ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);
							
							captureFile.WriteAsync ((uint) writeFrames, captureABL);
							
							if (flushed)
								break;
							
							ts += writeFrames;
						}
					
						CFRunLoop.Current.RunInMode (CFRunLoop.CFDefaultRunLoopMode, 1, false);
					}
				}
			}
		}
Example #37
0
		protected virtual void Dispose(bool disposing)
		{
			if(disposing)
			{
				UnregisterRestartable();
				queue.RemoveListener(AudioQueueProperty.IsRunning, IsRunningChangedCallback);
				queue.Dispose(); // this will dispose of the buffers allocated
				queue = null;
			}

			// Dispose of unmanaged resources...
			Marshal.FreeHGlobal(packetDescriptionArray);
		}
        void StreamPropertyListenerProc(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.DataFormat) {
                dataFormat = audioFileStream.DataFormat;
                return;
            }

            if (args.Property != AudioFileStreamProperty.ReadyToProducePackets)
                return;

            if (audioQueue != null) {
                // TODO: Dispose
                //throw new NotImplementedException ();
            }

            audioQueue = new OutputAudioQueue (dataFormat);
            audioQueue.VolumeRampTime = 2.0f;
            audioQueue.OutputCompleted += HandleOutputCompleted;
        }
Example #39
0
		void Reset ()
		{
			if (fileStream != null) {
				fileStream.Dispose ();
				fileStream = null;
			}

			if (outputQueue != null) {
				outputQueue.RemoveListener (AudioQueueProperty.IsRunning, EmitFinishedEvent);

				outputQueue.Stop (true);
				outputQueue.Reset ();
				foreach (AudioBuffer buf in outputBuffers.Values) {
					buf.PacketDescriptions.Clear ();
					outputQueue.FreeBuffer (buf.Buffer);
				}
				outputQueue.Dispose ();

				availableBuffers = null;
				outputBuffers = null;
				outputQueue = null;
			}
		}
        public void stop(bool shouldStopImmediate)
        {
            if (null == _audioQueue)
                return;

            _audioQueue.Stop(shouldStopImmediate);
            _audioQueue.Dispose();
            _audioQueue = null;
            _isPrepared = false;
        }
        void StreamPropertyListenerProc(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.DataFormat)
            {
                dataFormat = audioFileStream.DataFormat;
                return;
            }

            if (args.Property != AudioFileStreamProperty.ReadyToProducePackets)
            {
                return;
            }

            if (audioQueue != null)
            {
                // TODO: Dispose
                throw new NotImplementedException();
            }

            audioQueue = new OutputAudioQueue(dataFormat);
            audioQueue.BufferCompleted += HandleBufferCompleted;

            AudioQueueStatus status;

            aqTap = audioQueue.CreateProcessingTap(TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
            if (status != AudioQueueStatus.Ok)
            {
                throw new ApplicationException("Could not create AQ tap");
            }

            // create an augraph to process in the tap. needs to convert from tapFormat to effect format and back

            /* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
             * before and after the effect, usually because effects want floats, and everything else in iOS
             * core audio works with ints (or, in rare cases, fixed-point).
             * the graph looks like this:
             * [render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
             * prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
             * knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
             * pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
             * render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
             * is described as "batshit crazy", but it seems to work pretty well in practice.
             */

            auGraph = new AUGraph();
            auGraph.Open();
            var effectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.NewTimePitch));

            effectUnit = auGraph.GetNodeInfo(effectNode);

            var convertToEffectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.AU));
            var convertToEffectUnit = auGraph.GetNodeInfo(convertToEffectNode);

            var convertFromEffectNode = auGraph.AddNode(AudioComponentDescription.CreateConverter(AudioTypeConverter.AU));
            var convertFromEffectUnit = auGraph.GetNodeInfo(convertFromEffectNode);

            var genericOutputNode = auGraph.AddNode(AudioComponentDescription.CreateOutput(AudioTypeOutput.Generic));

            genericOutputUnit = auGraph.GetNodeInfo(genericOutputNode);

            // set the format conversions throughout the graph
            var effectFormat = effectUnit.GetAudioFormat(AudioUnitScopeType.Output);
            var tapFormat    = aqTap.ProcessingFormat;

            convertToEffectUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Input);
            convertToEffectUnit.SetAudioFormat(effectFormat, AudioUnitScopeType.Output);

            convertFromEffectUnit.SetAudioFormat(effectFormat, AudioUnitScopeType.Input);
            convertFromEffectUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Output);

            genericOutputUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Input);
            genericOutputUnit.SetAudioFormat(tapFormat, AudioUnitScopeType.Output);

            // set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
            const uint maxFramesPerSlice = 4096;

            if (convertToEffectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (effectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (convertFromEffectUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }
            if (genericOutputUnit.SetMaximumFramesPerSlice(maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
            {
                throw new ApplicationException();
            }

            // connect the nodes
            auGraph.ConnnectNodeInput(convertToEffectNode, 0, effectNode, 0);
            auGraph.ConnnectNodeInput(effectNode, 0, convertFromEffectNode, 0);
            auGraph.ConnnectNodeInput(convertFromEffectNode, 0, genericOutputNode, 0);

            // set up the callback into the first convert unit
            if (convertToEffectUnit.SetRenderCallback(ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
            {
                throw new ApplicationException();
            }

            var res = auGraph.Initialize();

            if (res != AUGraphError.OK)
            {
                throw new ApplicationException();
            }
        }
Example #42
0
		/// <summary>
		/// When a AudioProperty in the fed packets is found this callback is called
		/// </summary>
		void AudioPropertyFound (object sender, PropertyFoundEventArgs args)
		{
			lock (locker) {
				if (args.Property == AudioFileStreamProperty.ReadyToProducePackets) {
					if (outputQueue != null)
						outputQueue.Dispose ();

					availableBuffers = new Queue<AudioBuffer> ();
					outputBuffers = new Dictionary<IntPtr, AudioBuffer> ();
					outputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
					outputQueue.AddListener (AudioQueueProperty.IsRunning, EmitFinishedEvent);
					outputQueue.Volume = Volume;
					outputQueue.AddListener (AudioQueueProperty.ConverterError, (AudioQueueProperty property) => {
						LoggingService.LogInfo ("Got an error reading the file: {0}", outputQueue.ConverterError);
					});
					if (OutputReady != null)
						OutputReady (outputQueue);

					outputQueue.BufferCompleted += HandleBufferCompleted;
					outputQueue.MagicCookie = fileStream.MagicCookie;
				}
			}
		}
		/// <summary>
		/// When a AudioProperty in the fed packets is found this callback is called
		/// </summary>
		void AudioPropertyFound (object sender, PropertyFoundEventArgs args)
		{
			switch (args.Property) {
			case AudioFileStreamProperty.ReadyToProducePackets:
				Started = false;
				
				
				if (OutputQueue != null)
					OutputQueue.Dispose ();
				
				OutputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
				currentByteCount = 0;
				OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;
				outputBuffers = new List<AudioBuffer>();
				
				for (int i = 0; i < MaxBufferCount; i++)
				{
					IntPtr outBuffer;
					OutputQueue.AllocateBuffer (BufferSize, out outBuffer);
					outputBuffers.Add (new AudioBuffer () { Buffer = outBuffer, PacketDescriptions = new List<AudioStreamPacketDescription>() });
				}
				
				currentBuffer = outputBuffers.First ();
				
				OutputQueue.MagicCookie = fileStream.MagicCookie;				
				break;
			}
		}
		/// <summary>
		/// Cleaning up all the native Resource
		/// </summary>
		protected virtual void Dispose (bool disposing)
		{
			if (disposing) {
				if (OutputQueue != null)
					OutputQueue.Stop (false);
				
				if (outputBuffers != null)
					foreach (var b in outputBuffers)
						OutputQueue.FreeBuffer (b.Buffer);
				
				if (fileStream != null) {
					fileStream.Close ();
					fileStream = null;
				}
				
				if (OutputQueue != null) {
					OutputQueue.Dispose ();
					OutputQueue = null;
				}
			}
		}
Example #45
0
        public override bool FinishedLaunching(UIApplication app, NSDictionary options)
        {
            //
            // Setup audio system
            //
            var session = AVAudioSession.SharedInstance();

            session.SetCategory(new NSString("AVAudioSessionCategoryPlayback"), AVAudioSessionCategoryOptions.DefaultToSpeaker, out error);
            if (error != null)
            {
                Console.WriteLine(error);
            }
            //
            // Format description, we generate LinearPCM as short integers
            //
            sampleRate = session.SampleRate;
            var format = new AudioStreamBasicDescription
            {
                SampleRate       = sampleRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
                BitsPerChannel   = 16,
                ChannelsPerFrame = 1,
                BytesPerFrame    = 2,
                BytesPerPacket   = 2,
                FramesPerPacket  = 1,
            };

            //
            // Create an output queue
            //
            var queue          = new OutputAudioQueue(format);
            var bufferByteSize = (sampleRate > 16000) ? 2176 : 512; // 40.5 Hz : 31.25 Hz

            //
            // Create three buffers, generate a tone, and output the tones
            //
            var buffers = new AudioQueueBuffer * [numBuffers];

            for (int i = 0; i < numBuffers; i++)
            {
                queue.AllocateBuffer(bufferByteSize, out buffers[i]);
                GenerateTone(buffers[i]);
                queue.EnqueueBuffer(buffers[i], null);
            }

            //
            // Output callback: invoked when the audio system is done with the
            // buffer, this implementation merely recycles it.
            //
            queue.BufferCompleted += (object sender, BufferCompletedEventArgs e) =>
            {
                if (alternate)
                {
                    outputWaveForm += 1;
                    if (outputWaveForm > WaveForm.Square)
                    {
                        outputWaveForm = WaveForm.Sine;
                    }
                    GenerateTone(e.UnsafeBuffer);
                }
                queue.EnqueueBuffer(e.UnsafeBuffer, null);
            };

            queue.Start();
            return(true);
        }
		void StreamPropertyListenerProc (object sender, PropertyFoundEventArgs args)
		{
			if (args.Property == AudioFileStreamProperty.DataFormat) {
				dataFormat = audioFileStream.DataFormat;
				return;
			}

			if (args.Property != AudioFileStreamProperty.ReadyToProducePackets) 
				return;

			if (audioQueue != null) {
				// TODO: Dispose
				throw new NotImplementedException ();
			}

			audioQueue = new OutputAudioQueue (dataFormat);
			audioQueue.OutputCompleted += HandleOutputCompleted;

			AudioQueueStatus status;
			aqTap = audioQueue.CreateProcessingTap (TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
			if (status != AudioQueueStatus.Ok)
				throw new ApplicationException ("Could not create AQ tap");

			// create an augraph to process in the tap. needs to convert from tapFormat to effect format and back
			/* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
			before and after the effect, usually because effects want floats, and everything else in iOS
			core audio works with ints (or, in rare cases, fixed-point).
			the graph looks like this:
			[render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
			prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
			knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
			pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
			render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
			is described as "batshit crazy", but it seems to work pretty well in practice.
			*/

			auGraph = new AUGraph ();
			auGraph.Open ();
			var effectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.NewTimePitch));
			effectUnit = auGraph.GetNodeInfo (effectNode);

			var convertToEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertToEffectUnit = auGraph.GetNodeInfo (convertToEffectNode);

			var convertFromEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertFromEffectUnit = auGraph.GetNodeInfo (convertFromEffectNode);

			var genericOutputNode = auGraph.AddNode (AudioComponentDescription.CreateOutput (AudioTypeOutput.Generic));
			genericOutputUnit = auGraph.GetNodeInfo (genericOutputNode);

			// set the format conversions throughout the graph
			var effectFormat = effectUnit.GetAudioFormat (AudioUnitScopeType.Output);
			var tapFormat = aqTap.ProcessingFormat;

			convertToEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			convertToEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Output);

			convertFromEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Input);
			convertFromEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			// set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
			const uint maxFramesPerSlice = 4096;
			if (convertToEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (effectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (convertFromEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (genericOutputUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();

			// connect the nodes
			auGraph.ConnnectNodeInput (convertToEffectNode, 0, effectNode, 0);
			auGraph.ConnnectNodeInput (effectNode, 0, convertFromEffectNode, 0);
			auGraph.ConnnectNodeInput (convertFromEffectNode, 0, genericOutputNode, 0);

			// set up the callback into the first convert unit
			if (convertToEffectUnit.SetRenderCallback (ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			var res = auGraph.Initialize ();
			if (res != AUGraphError.OK)
				throw new ApplicationException ();
		}