示例#1
0
        public void Init(int sampleRate, int bufferSize)
        {
            CheckMicrophonePermission();
            this.SampleRate = sampleRate;
            this.bufferSize = bufferSize;
            var audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate       = this.SampleRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsFloat,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,
                BitsPerChannel   = this.BitsPerSample,
                BytesPerPacket   = 2,
                BytesPerFrame    = 2,
                Reserved         = 0
            };

            audioQueue = new InputAudioQueue(audioFormat);
            audioQueue.InputCompleted += QueueInputCompleted;

            var bufferByteSize = this.bufferSize * audioFormat.BytesPerPacket;

            IntPtr bufferPtr;

            for (var index = 0; index < 3; index++)
            {
                audioQueue.AllocateBufferWithPacketDescriptors(bufferByteSize, this.bufferSize, out bufferPtr);
                audioQueue.EnqueueBuffer(bufferPtr, bufferByteSize, null);
            }
        }
示例#2
0
        AudioStreamBasicDescription CanonicalASBD(double sampleRate, int channel)
        {
            // setting AudioStreamBasicDescription
            int AudioUnitSampleTypeSize;

            if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
            {
                AudioUnitSampleTypeSize = sizeof(float);
            }
            else
            {
                AudioUnitSampleTypeSize = sizeof(int);
            }
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate = sampleRate,
                Format     = AudioFormatType.LinearPCM,
                //    kAudioFormatFlagsCanonical  = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
                FormatFlags      = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked),
                ChannelsPerFrame = channel,
                BytesPerPacket   = AudioUnitSampleTypeSize * channel,
                BytesPerFrame    = AudioUnitSampleTypeSize * channel,
                FramesPerPacket  = 1,
                BitsPerChannel   = 8 * AudioUnitSampleTypeSize,
                Reserved         = 0
            };

            return(audioFormat);
        }
示例#3
0
        void PrepareAudioQueue(MonoTouch.CoreFoundation.CFUrl url)
        {
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate       = _samplingRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsBigEndian | AudioFormatFlags.LinearPCMIsPacked,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,  // monoral
                BitsPerChannel   = 16, // 16-bit
                BytesPerPacket   = 2,
                BytesPerFrame    = 2,
                Reserved         = 0
            };

            _audioFile = AudioFile.Create(url, AudioFileType.AIFF, audioFormat, AudioFileFlags.EraseFlags);

            _queue = new InputAudioQueue(audioFormat);
            _queue.InputCompleted += new EventHandler <InputCompletedEventArgs>(_queue_InputCompleted);

            _startingPacketCount = 0;
            _numPacketsToWrite   = 1024;
            _bufferByteSize      = (int)(_numPacketsToWrite * audioFormat.BytesPerPacket);

            // preparing queue buffer
            IntPtr bufferPtr;

            for (int index = 0; index < 3; index++)
            {
                //_queue.AllocateBuffer(_bufferByteSize, out bufferPtr);
                _queue.AllocateBufferWithPacketDescriptors(_bufferByteSize, _numPacketsToWrite, out bufferPtr);
                _queue.EnqueueBuffer(bufferPtr, _bufferByteSize, null);
            }
        }
示例#4
0
        public SongPlayer()
        {
            playingNotes = new List <PlayingNote>();
#if __IOS__
            streamDesc = AudioStreamBasicDescription.CreateLinearPCM(PLAYBACK_RATE, 1, 16, false);
#endif
        }
 void prepareAudioQueue()
 {            
     AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
     {
         SampleRate = _samplingRate,
         Format = AudioFormatType.LinearPCM,
         FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.IsPacked,
         FramesPerPacket = 1,
         ChannelsPerFrame = 1, // monoral
         BitsPerChannel = 16, // 16-bit
         BytesPerPacket = 2,
         BytesPerFrame = 2,
         Reserved = 0
     };
     _audioQueue = new OutputAudioQueue( audioFormat );
     _audioQueue.OutputCompleted += new EventHandler<OutputCompletedEventArgs>(_audioQueue_OutputCompleted);
     
     _tmpBuf = new short[_numPacketsToRead];
     //_numPacketsToRead  = 256;
     int bufferByteSize = _numPacketsToRead * audioFormat.BytesPerPacket;
     IntPtr bufferPtr;
     for (int index = 0; index < 3; index++)
     {
         _audioQueue.AllocateBuffer(bufferByteSize, out bufferPtr);
         outputCallback(bufferPtr);
     }
     _isPrepared = true;            
 }
示例#6
0
        void InitAudioQueue()
        {
            var audioFormat = new AudioStreamBasicDescription
            {
                SampleRate       = SampleRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,
                BitsPerChannel   = BitsPerSample,
                BytesPerPacket   = 2,
                BytesPerFrame    = 2,
                Reserved         = 0
            };

            audioQueue = new InputAudioQueue(audioFormat);
            audioQueue.InputCompleted += QueueInputCompleted;

            var bufferByteSize = bufferSize * audioFormat.BytesPerPacket;

            for (var index = 0; index < 3; index++)
            {
                audioQueue.AllocateBufferWithPacketDescriptors(bufferByteSize, bufferSize, out IntPtr bufferPtr);
                audioQueue.EnqueueBuffer(bufferPtr, bufferByteSize, null);
            }
        }
示例#7
0
        public SoundCapture(int sampleSize, int buffermilliseconds)
        {
            if (buffermilliseconds > 1000)
            {
                throw new ArgumentOutOfRangeException(nameof(buffermilliseconds));
            }
            pushsize = sampleSize / (1000 / buffermilliseconds);

            description = new AudioStreamBasicDescription
            {
                SampleRate       = sampleSize,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.IsSignedInteger,
                BitsPerChannel   = 16,
                ChannelsPerFrame = 1,
                BytesPerFrame    = 2,
                FramesPerPacket  = 1,
                BytesPerPacket   = 2,
                Reserved         = 0
            };

            audioQueue = new InputAudioQueue(description);
            for (var i = 0; i < 3; i++)
            {
                var ptr = IntPtr.Zero;
                audioQueue.AllocateBufferWithPacketDescriptors(pushsize * description.BytesPerPacket, pushsize, out ptr);
                audioQueue.EnqueueBuffer(ptr, pushsize, null);
                bufferPtrs.Add(ptr);
            }
            audioQueue.InputCompleted += AudioQueueOnInputCompleted;
        }
        void PrepareExtAudioFile()
        {
            extAudioFile = ExtAudioFile.OpenUrl(url);
            CheckValue(extAudioFile, "ExtAudioFile.OpenUrl failed");

            srcFormat = extAudioFile.FileDataFormat;

            // This is how you say,“When you convert the data, this is the format I’d like to receive.”
            // The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats.
            extAudioFile.ClientDataFormat = dstFormat;

            // getting total frame
            TotalFrames = extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
            buffer = new AudioBuffers(srcFormat.ChannelsPerFrame);
            for (int i = 0; i < buffer.Count; ++i)
            {
                int size = (int)(sizeof(int) * TotalFrames);
                buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
            numberOfChannels = srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;

            extAudioFile.Read((uint)TotalFrames, buffer, out status);
            if (status != ExtAudioFileError.OK)
            {
                throw new ApplicationException();
            }
        }
示例#9
0
        public static ExtAudioFile CreateWithUrl(CFUrl url,
                                                 AudioFileType fileType,
                                                 AudioStreamBasicDescription inStreamDesc,
                                                 //AudioChannelLayout channelLayout,
                                                 AudioFileFlags flag)
        {
            if (url == null)
            {
                throw new ArgumentNullException("url");
            }

            ExtAudioFileError err;
            var audioFile = CreateWithUrl(url.Handle, fileType, inStreamDesc, flag, out err);

            if (err != ExtAudioFileError.OK)             // if (err != 0)  <- to keep old implementation
            {
                throw new ArgumentException(String.Format("Error code:{0}", err));
            }
            if (audioFile == null)             // if (ptr == IntPtr.Zero)  <- to keep old implementation
            {
                throw new InvalidOperationException("Can not get object instance");
            }

            return(audioFile);
        }
示例#10
0
        public void Init(int sampleRate = 44100, int bufferSize = 8192)
        {
            _sampleRate = sampleRate;
            _bufferSize = bufferSize;

            var description = new AudioStreamBasicDescription
            {
                SampleRate       = sampleRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.IsSignedInteger,
                BitsPerChannel   = 16,
                ChannelsPerFrame = 1,
                BytesPerFrame    = 2,
                FramesPerPacket  = 1,
                BytesPerPacket   = 2,
                Reserved         = 0
            };

            _audioQueue = new InputAudioQueue(description);
            _bufferPtrs = new List <IntPtr>();
            for (var i = 0; i < 3; i++)
            {
                IntPtr ptr;
                _audioQueue.AllocateBufferWithPacketDescriptors(bufferSize * description.BytesPerPacket, bufferSize,
                                                                out ptr);
                _audioQueue.EnqueueBuffer(ptr, bufferSize, null);
                _bufferPtrs.Add(ptr);
            }
            _audioQueue.InputCompleted += AudioQueueOnInputCompleted;
        }
        void PrepareAudioQueue(MonoTouch.CoreFoundation.CFUrl url)
        {
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate = _samplingRate,
                Format = AudioFormatType.LinearPCM,
                FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsBigEndian | AudioFormatFlags.LinearPCMIsPacked,
                FramesPerPacket = 1,
                ChannelsPerFrame = 1, // monoral
                BitsPerChannel = 16, // 16-bit
                BytesPerPacket = 2,
                BytesPerFrame = 2,
                Reserved = 0
            };

            _audioFile = AudioFile.Create(url, AudioFileType.AIFF, audioFormat, AudioFileFlags.EraseFlags);
            
            _queue = new InputAudioQueue(audioFormat);
            _queue.InputCompleted += new EventHandler<InputCompletedEventArgs>(_queue_InputCompleted);

            _startingPacketCount = 0;
            _numPacketsToWrite = 1024;
            _bufferByteSize = (int)(_numPacketsToWrite * audioFormat.BytesPerPacket);

            // preparing queue buffer
            IntPtr bufferPtr;
            for (int index = 0; index < 3; index++)
            {
                //_queue.AllocateBuffer(_bufferByteSize, out bufferPtr);
                _queue.AllocateBufferWithPacketDescriptors(_bufferByteSize, _numPacketsToWrite, out bufferPtr);
                _queue.EnqueueBuffer(bufferPtr, _bufferByteSize, null);
            }
        }
        void prepareAudioQueue()
        {
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate       = _samplingRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.IsPacked,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,  // monoral
                BitsPerChannel   = 16, // 16-bit
                BytesPerPacket   = 2,
                BytesPerFrame    = 2,
                Reserved         = 0
            };

            _audioQueue = new OutputAudioQueue(audioFormat);
            _audioQueue.OutputCompleted += new EventHandler <OutputCompletedEventArgs>(_audioQueue_OutputCompleted);

            _tmpBuf = new short[_numPacketsToRead];
            //_numPacketsToRead  = 256;
            int    bufferByteSize = _numPacketsToRead * audioFormat.BytesPerPacket;
            IntPtr bufferPtr;

            for (int index = 0; index < 3; index++)
            {
                _audioQueue.AllocateBuffer(bufferByteSize, out bufferPtr);
                outputCallback(bufferPtr);
            }
            _isPrepared = true;
        }
示例#13
0
        public SoundRecorder(int sampleRate, VoiceActivityDetector vad)
        {
            this.vad = vad;

            if (!AVAudioSession.SharedInstance().Category == AVAudioSession.CategoryRecord &&
                !AVAudioSession.SharedInstance().Category == AVAudioSession.CategoryPlayAndRecord)
            {
                throw new AIServiceException("AVAudioCategory not set to RECORD or PLAY_AND_RECORD. Please set it using AVAudioSession class.");
            }

            audioStreamDescription = new AudioStreamBasicDescription
            {
                Format      = AudioFormatType.LinearPCM,
                FormatFlags =
                    AudioFormatFlags.LinearPCMIsSignedInteger |
                    AudioFormatFlags.LinearPCMIsPacked,

                SampleRate       = sampleRate,
                BitsPerChannel   = BitsPerChannel,
                ChannelsPerFrame = Channels,
                BytesPerFrame    = (BitsPerChannel / 8) * Channels,
                FramesPerPacket  = 1,
                Reserved         = 0,
            };

            audioStreamDescription.BytesPerPacket = audioStreamDescription.BytesPerFrame * audioStreamDescription.FramesPerPacket;

            inputQueue = CreateInputQueue(audioStreamDescription);
        }
示例#14
0
        static void CalculateBytesForTime(AudioStreamBasicDescription desc, int maxPacketSize, double seconds, out int bufferSize, out int packetCount)
        {
            const int maxBufferSize = 0x10000;
            const int minBufferSize = 0x4000;

            if (desc.FramesPerPacket > 0)
            {
                bufferSize = (int)(desc.SampleRate / desc.FramesPerPacket * seconds * maxPacketSize);
            }
            else
            {
                bufferSize = maxBufferSize > maxPacketSize ? maxBufferSize : maxPacketSize;
            }

            if (bufferSize > maxBufferSize && bufferSize > maxPacketSize)
            {
                bufferSize = maxBufferSize;
            }
            else if (bufferSize < minBufferSize)
            {
                bufferSize = minBufferSize;
            }

            packetCount = bufferSize / maxPacketSize;
        }
示例#15
0
        public void ProcessingTap()
        {
            if (!TestRuntime.CheckSystemAndSDKVersion(6, 0))
            {
                Assert.Inconclusive("AudioQueueProcessingTapNew requires iOS 6");
            }

            var aq = new InputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());
            AudioQueueStatus ret;
            bool             called = false;

            using (var tap = aq.CreateProcessingTap(
                       delegate(AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data) {
                called = true;
                return(33);
            }, AudioQueueProcessingTapFlags.PreEffects, out ret)) {
                Assert.AreEqual(AudioQueueStatus.Ok, ret, "#1");

                unsafe {
                    AudioQueueBuffer *buffer;
                    Assert.AreEqual(AudioQueueStatus.Ok, aq.AllocateBuffer(5000, out buffer), "#2");
                    Assert.AreEqual(AudioQueueStatus.Ok, aq.EnqueueBuffer(buffer), "#3");
                    //Assert.AreEqual (AudioQueueStatus.Ok, aq.Start (), "#4");
                }
            }

            //Assert.That (called, Is.True, "#10");
        }
示例#16
0
        public static ExtAudioFile CreateWithUrl(CFUrl url,
                                                 AudioFileType fileType,
                                                 AudioStreamBasicDescription inStreamDesc,
                                                 //AudioChannelLayout channelLayout,
                                                 AudioFileFlags flag)
        {
            if (url == null)
            {
                throw new ArgumentNullException("url");
            }

            int    err;
            IntPtr ptr = new IntPtr();

            unsafe {
                err = ExtAudioFileCreateWithUrl(url.Handle, fileType, ref inStreamDesc, IntPtr.Zero, (uint)flag,
                                                (IntPtr)(&ptr));
            }
            if (err != 0)
            {
                throw new ArgumentException(String.Format("Error code:{0}", err));
            }
            if (ptr == IntPtr.Zero)
            {
                throw new InvalidOperationException("Can not get object instance");
            }

            return(new ExtAudioFile(ptr));
        }
示例#17
0
        public void CreateLinearPCM()
        {
            var pcm = AudioStreamBasicDescription.CreateLinearPCM();

            Assert.IsNotNull(pcm.FormatName);
            Assert.IsFalse(pcm.IsVariableBitrate);
        }
        /// <summary>
        /// Plays a single note. Separate from the rest of the song playing code
        /// </summary>
        public static void PlayNote(Instrument.Note note)
        {
            lock (syncObj)
            {
#if __ANDROID__
                if (playingTrack != null)
                {
                    //We use pause instead of stop because pause stops playing immediately
                    playingTrack.Pause();
                    playingTrack.Release();
                    playingTrack.Dispose();
                }
#endif
#if __IOS__
                if (audioQueue != null)
                {
                    //Pass true to stop immediately
                    audioQueue.Stop(true);
                    audioQueue.Dispose();
                }
#endif

#if __ANDROID__
                playingTrack = new AudioTrack(
                    // Stream type
                    Android.Media.Stream.Music,
                    // Frequency
                    SongPlayer.PLAYBACK_RATE,
                    // Mono or stereo
                    ChannelOut.Mono,
                    // Audio encoding
                    Android.Media.Encoding.Pcm16bit,
                    // Length of the audio clip in bytes
                    (note.data.Length * 2),
                    // Mode. Stream or static.
                    AudioTrackMode.Static);

                playingTrack.Write(note.data, 0, note.data.Length);
                playingTrack.Play();
#endif
#if __IOS__
                audioQueue = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM(SongPlayer.PLAYBACK_RATE, 1, 16, false));
                unsafe
                {
                    AudioQueueBuffer *buffer;
                    audioQueue.AllocateBuffer(note.data.Length * 2, out buffer);

                    fixed(short *beatData = note.data)
                    {
                        buffer->CopyToAudioData((IntPtr)beatData, note.data.Length * 2);
                    }

                    audioQueue.EnqueueBuffer((IntPtr)buffer, note.data.Length * 2, null);
                }

                audioQueue.Start();
#endif
            }
        }
示例#19
0
        public void Properties()
        {
            var b = new InputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            b.HardwareCodecPolicy = AudioQueueHardwareCodecPolicy.PreferHardware;

            Assert.That(b.HardwareCodecPolicy, Is.EqualTo(AudioQueueHardwareCodecPolicy.PreferHardware), "#1");
        }
示例#20
0
        static void PrepareProxy(IntPtr tap, IntPtr maxFrames, ref AudioStreamBasicDescription processingFormat)
        {
            MTAudioProcessingTap apt;

            lock (handles)
                apt = handles [tap];
            apt.callbacks.Prepare(apt, (nint)maxFrames, ref processingFormat);
        }
示例#21
0
		public override bool FinishedLaunching(UIApplication app, NSDictionary options)
		{
			//
			// Setup audio system
			//
			AudioSession.Initialize ();
			AudioSession.Category = AudioSessionCategory.MediaPlayback;


			// 
			// Format description, we generate LinearPCM as short integers
			//
			sampleRate = AudioSession.CurrentHardwareSampleRate;
			var format = new AudioStreamBasicDescription () {
				SampleRate = sampleRate,
				Format = AudioFormatType.LinearPCM,
				FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
				BitsPerChannel = 16,
				ChannelsPerFrame = 1,
				BytesPerFrame = 2,
				BytesPerPacket = 2, 
				FramesPerPacket = 1,
			};

			// 
			// Create an output queue
			//
			var queue = new OutputAudioQueue (format);
			var bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz 

			// 
			// Create three buffers, generate a tone, and output the tones
			//
			var buffers = new AudioQueueBuffer* [numBuffers];
			for (int i = 0; i < numBuffers; i++){
				queue.AllocateBuffer (bufferByteSize, out buffers [i]);
				GenerateTone (buffers [i]);
				queue.EnqueueBuffer (buffers [i], null);
			}

			//
			// Output callback: invoked when the audio system is done with the
			// buffer, this implementation merely recycles it.
			//
			queue.OutputCompleted += (object sender, OutputCompletedEventArgs e) => {
				if (alternate){
					outputWaveForm +=1;
					if (outputWaveForm > WaveForm.Square)
						outputWaveForm = WaveForm.Sine;
					GenerateTone (e.UnsafeBuffer);
				}
				queue.EnqueueBuffer (e.UnsafeBuffer, null);
			};

			queue.Start ();
			return true;
		}
 public bool Start(IAudioStream stream)
 {
     this.stream = stream;
     this.stream.OnBroadcast += HandleOnBroadcast;
     this.description = new AudioStreamBasicDescription (AudioFormatType.LinearPCM) 
     {
         BitsPerChannel = stream.BitsPerSample / stream.ChannelCount,
     };
 }
示例#23
0
        public static ExtAudioFile?CreateWithUrl(CFUrl url, AudioFileType fileType, AudioStreamBasicDescription inStreamDesc, AudioFileFlags flag, out ExtAudioFileError error)
        {
            if (url is null)
            {
                ObjCRuntime.ThrowHelper.ThrowArgumentNullException(nameof(url));
            }

            return(CreateWithUrl(url.Handle, fileType, inStreamDesc, flag, out error));
        }
        unsafe void TapPrepare(MTAudioProcessingTap tap, nint maxFrames, ref AudioStreamBasicDescription processingFormat)
        {
            // Store sample rate for CenterFrequency property
            context.SampleRate = processingFormat.SampleRate;

            /* Verify processing format (this is not needed for Audio Unit, but for RMS calculation). */
            VerifyProcessingFormat(processingFormat);

            if (processingFormat.FormatFlags.HasFlag(AudioFormatFlags.IsNonInterleaved))
            {
                context.IsNonInterleaved = true;
            }

            /* Create bandpass filter Audio Unit */

            var audioComponentDescription = AudioComponentDescription.CreateEffect(AudioTypeEffect.BandPassFilter);
            // TODO: https://trello.com/c/GZUGUyH0
            var audioComponent = AudioComponent.FindNextComponent(null, ref audioComponentDescription);

            if (audioComponent == null)
            {
                return;
            }

            AudioUnitStatus error = AudioUnitStatus.NoError;

            AudioUnit.AudioUnit audioUnit = audioComponent.CreateAudioUnit();
            try {
                audioUnit.SetAudioFormat(processingFormat, AudioUnitScopeType.Input);
                audioUnit.SetAudioFormat(processingFormat, AudioUnitScopeType.Output);
            } catch (AudioUnitException) {
                error = AudioUnitStatus.FormatNotSupported;
            }

            if (error == AudioUnitStatus.NoError)
            {
                error = audioUnit.SetRenderCallback(Render, AudioUnitScopeType.Input);
            }

            if (error == AudioUnitStatus.NoError)
            {
                error = audioUnit.SetMaximumFramesPerSlice((uint)maxFrames, AudioUnitScopeType.Global);
            }

            if (error == AudioUnitStatus.NoError)
            {
                error = (AudioUnitStatus)audioUnit.Initialize();
            }

            if (error != AudioUnitStatus.NoError)
            {
                audioUnit.Dispose();
                audioUnit = null;
            }

            context.AudioUnit = audioUnit;
        }
示例#25
0
        public void ChannelAssignments()
        {
            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(
                                new AudioQueueChannelAssignment("11", 0),
                                new AudioQueueChannelAssignment("22", 1)
                                ));
        }
示例#26
0
        public static ExtAudioFile CreateWithUrl(CFUrl url, AudioFileType fileType, AudioStreamBasicDescription inStreamDesc, AudioFileFlags flag, out ExtAudioFileError error)
        {
            if (url == null)
            {
                throw new ArgumentNullException("url");
            }

            return(CreateWithUrl(url.Handle, fileType, inStreamDesc, flag, out error));
        }
 public bool Start(IAudioStream stream)
 {
     this.stream              = stream;
     this.stream.OnBroadcast += HandleOnBroadcast;
     this.description         = new AudioStreamBasicDescription(AudioFormatType.LinearPCM)
     {
         BitsPerChannel = stream.BitsPerSample / stream.ChannelCount,
     };
 }
示例#28
0
        public void GetFormatInfo()
        {
            var asbd = new AudioStreamBasicDescription(AudioFormatType.MPEG4AAC);

            Assert.AreEqual(AudioFormatError.None, AudioStreamBasicDescription.GetFormatInfo(ref asbd));

            Assert.IsNotNull(AudioStreamBasicDescription.GetAvailableEncodeChannelLayoutTags(asbd));
            Assert.IsNotNull(AudioStreamBasicDescription.GetAvailableEncodeNumberChannels(asbd));
            Assert.IsNotNull(asbd.GetOutputFormatList());
        }
示例#29
0
        public void Properties()
        {
            TestRuntime.RequestMicrophonePermission();

            var b = new InputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            b.HardwareCodecPolicy = AudioQueueHardwareCodecPolicy.PreferHardware;

            Assert.That(b.HardwareCodecPolicy, Is.EqualTo(AudioQueueHardwareCodecPolicy.PreferHardware), "#1");
        }
示例#30
0
        public void GetFirstPlayableFormat()
        {
            var asbd = new AudioStreamBasicDescription(AudioFormatType.MPEG4AAC);

            AudioStreamBasicDescription.GetFormatInfo(ref asbd);

            var ofl = asbd.GetOutputFormatList();

            Assert.IsNotNull(AudioFormat.GetFirstPlayableFormat(ofl));
        }
示例#31
0
        void prepareAudioUnit()
        {
            // Creating AudioComponentDescription instance of RemoteIO Audio Unit
            AudioComponentDescription cd = new AudioComponentDescription()
            {
                componentType         = AudioComponentDescription.AudioComponentType.kAudioUnitType_Output,
                componentSubType      = AudioComponentDescription.AudioComponentSubType.kAudioUnitSubType_RemoteIO,
                componentManufacturer = AudioComponentDescription.AudioComponentManufacturerType.kAudioUnitManufacturer_Apple,
                componentFlags        = 0,
                componentFlagsMask    = 0
            };

            // Getting AudioComponent from the description
            _component = AudioComponent.FindComponent(cd);

            // Getting Audiounit
            _audioUnit = AudioUnit.CreateInstance(_component);

            // setting AudioStreamBasicDescription
            int AudioUnitSampleTypeSize;

            if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
            {
                AudioUnitSampleTypeSize = sizeof(float);
            }
            else
            {
                AudioUnitSampleTypeSize = sizeof(int);
            }
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate = _sampleRate,
                Format     = AudioFormatType.LinearPCM,
                //kAudioFormatFlagsAudioUnitCanonical = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved | (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift),
                FormatFlags      = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked | (int)AudioFormatFlags.IsNonInterleaved | (int)(kAudioUnitSampleFractionBits << (int)AudioFormatFlags.LinearPCMSampleFractionShift)),
                ChannelsPerFrame = 2,
                BytesPerPacket   = AudioUnitSampleTypeSize,
                BytesPerFrame    = AudioUnitSampleTypeSize,
                FramesPerPacket  = 1,
                BitsPerChannel   = 8 * AudioUnitSampleTypeSize,
                Reserved         = 0
            };

            _audioUnit.SetAudioFormat(audioFormat, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input, 0);

            // setting callback

            /*
             * if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
             *  _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(simulator_callback);
             * else
             *  _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(device_callback);
             * */
            _audioUnit.RenderCallback += new EventHandler <AudioUnitEventArgs>(device_callback);
        }
        void ResetFormat(int numChannels)
        {
            var format = new AudioStreamBasicDescription(AudioFormatType.LinearPCM);

            format.FormatFlags      = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked;
            format.BitsPerChannel   = 8 * sizeof(short);
            format.ChannelsPerFrame = numChannels;
            format.FramesPerPacket  = 1;
            format.BytesPerFrame    = format.BytesPerPacket = numChannels * sizeof(short);
            _format = format;
        }
        void prepareAudioUnit()
        {
            // AudioSession
            AudioSession.Initialize();
            AudioSession.SetActive(true);
            AudioSession.Category = AudioSessionCategory.PlayAndRecord;
            AudioSession.PreferredHardwareIOBufferDuration = 0.01f;            

            // Getting a RemoteUI AudioUni AudioComponent
            _component = AudioComponent.FindComponent(AudioTypeOutput.Remote);

            // Getting Audiounit
            _audioUnit = AudioUnit.CreateInstance(_component);

            // turning on microphone
            _audioUnit.SetEnableIO(true,
                AudioUnitScopeType.Input,
                1 // Remote Input
                );

            // setting AudioStreamBasicDescription
            int AudioUnitSampleTypeSize = (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR) ? sizeof(float) : sizeof(uint);
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate = _sampleRate,
                Format = AudioFormatType.LinearPCM,
                //kAudioFormatFlagsAudioUnitCanonical = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved | (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift),
                FormatFlags = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked | (int)AudioFormatFlags.IsNonInterleaved | (int)(kAudioUnitSampleFractionBits << (int)AudioFormatFlags.LinearPCMSampleFractionShift)),
                ChannelsPerFrame = 2,
                BytesPerPacket = AudioUnitSampleTypeSize,
                BytesPerFrame = AudioUnitSampleTypeSize,
                FramesPerPacket = 1,
                BitsPerChannel = 8 * AudioUnitSampleTypeSize,
                Reserved = 0
            };
            _audioUnit.SetAudioFormat(audioFormat, 
                AudioUnitScopeType.Input, 
                0 // Remote output
                );
            _audioUnit.SetAudioFormat(audioFormat, 
                AudioUnitScopeType.Output, 
                1 // Remote input
                );

            // setting callback
            if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
                _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(simulator_callback);
            else
                _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(device_callback);

            // initialize
            _audioUnit.Initialize();
        }
示例#34
0
        /// <summary>
        /// Queues up a file to record to the next time the beat is played
        /// </summary>
        /// <param name="fileName">File name.</param>
        public void QueueFileRecording(string fileName)
        {
            _file = ExtAudioFile.CreateWithUrl(
                new Foundation.NSUrl(fileName, false),
                AudioFileType.WAVE,
                AudioStreamBasicDescription.CreateLinearPCM(),
                AudioFileFlags.EraseFlags,
                out ExtAudioFileError e
                );

            _fileRecordingQueued = true;
        }
示例#35
0
        private InputAudioQueue CreateInputQueue(AudioStreamBasicDescription streamDescription)
        {
            var queue = new InputAudioQueue(streamDescription);

            for (int count = 0; count < CountAudioBuffers; count++)
            {
                IntPtr bufferPointer;
                queue.AllocateBuffer(AudioBufferLength, out bufferPointer);
                queue.EnqueueBuffer(bufferPointer, AudioBufferLength, null);
            }
            queue.InputCompleted += HandleInputCompleted;
            return(queue);
        }
 public static _AudioConverter CreateInstance(AudioStreamBasicDescription srcFormat, AudioStreamBasicDescription destFormat)            
 {
     _AudioConverter inst = new _AudioConverter();
     int err_code;
     unsafe{
         IntPtr ptr = inst._audioConverter;
         IntPtr pptr =(IntPtr)(&ptr);
         err_code = AudioConverterNew(ref srcFormat, ref destFormat, pptr);
     }
     if (err_code != 0)
     {
         throw new ArgumentException(String.Format("Error code:{0}", err_code));
     }
     return inst;
 }
示例#37
0
        internal SoundEffect(string assetName, bool isMusic)
        {
            // use of CFUrl.FromFile is necessary in case assetName contains spaces (which must be url-encoded)
            audioFile = AudioFile.Open(CFUrl.FromFile(assetName), AudioFilePermission.Read, 0);

            if(audioFile == null)
                throw new Content.ContentLoadException("Could not open sound effect " + assetName);

            description = audioFile.StreamBasicDescription;
            DeriveBufferSize(0.5);
            isVBR = (description.BytesPerPacket == 0 || description.FramesPerPacket == 0);

            if(!isMusic)
                firstInstance = new SoundEffectInstance(this, false);
        }
 public CoreAudioController()
 {
     var ad = new AudioStreamBasicDescription () {
         SampleRate = 44100.0,
         Format = AudioFormatType.LinearPCM,
         FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.IsPacked,
         FramesPerPacket = 1,
         ChannelsPerFrame = 1,
         BitsPerChannel = 16,
         BytesPerPacket = 2,
         BytesPerFrame = 2,
         Reserved = 0};
     audioq = new OutputAudioQueue (ad);
     audioq.OutputCompleted += delegate(object sender, OutputCompletedEventArgs e) {
         EnqueueNextBuffer (e.IntPtrBuffer);
     };
     audioq.AllocateBuffer (synth_buffer.Length * ad.BytesPerPacket, out audio_buffer);
 }
        void prepareAUGraph()
        {
            // Creating audio graph instance
            _auGraph = new AUGraph ();
			_auGraph.Open ();

            // getting audio node and audio unit
            var cd = AudioComponentDescription.CreateOutput (AudioTypeOutput.Remote);
            int remoteIONode = _auGraph.AddNode(cd);
            AudioUnit remoteIOUnit = _auGraph.GetNodeInfo(remoteIONode);

            // turning on microphone    
            
            remoteIOUnit.SetEnableIO(true,                
                AudioUnitScopeType.Input,
                1 // remote input                
                );

            // audio canonical format
            AudioStreamBasicDescription audioFormat = CanonicalASBD(44100, 1);
            remoteIOUnit.SetAudioFormat(audioFormat,
                AudioUnitScopeType.Output, // output bus of Remote input
                1 // Remote input
                );
            remoteIOUnit.SetAudioFormat(audioFormat,
                 AudioUnitScopeType.Input,
                 0 // Remote output,
                 );

            // Connecting Remote Input to Remote Output
            _auGraph.ConnnectNodeInput(
                remoteIONode, 1,
                remoteIONode, 0);

            // getting output audio format
            _audioUnitOutputFormat = remoteIOUnit.GetAudioFormat(
                AudioUnitScopeType.Output,  // Remote output bus
                0 // Remote output
                );
            
            _auGraph.RenderCallback += new EventHandler<AudioGraphEventArgs>(_auGraph_RenderCallback);
            // graph initialization
            _auGraph.Initialize();
        }
        void StreamPropertyListenerProc(object sender, PropertyFoundEventArgs args)
        {
            if (args.Property == AudioFileStreamProperty.DataFormat) {
                dataFormat = audioFileStream.DataFormat;
                return;
            }

            if (args.Property != AudioFileStreamProperty.ReadyToProducePackets)
                return;

            if (audioQueue != null) {
                // TODO: Dispose
                //throw new NotImplementedException ();
            }

            audioQueue = new OutputAudioQueue (dataFormat);
            audioQueue.VolumeRampTime = 2.0f;
            audioQueue.OutputCompleted += HandleOutputCompleted;
        }
示例#41
0
        public IOSAudioProcessor()
        {
            var inputComponent = AudioComponent.FindNextComponent(
                null,
                new AudioComponentDescription
                {
                    ComponentFlags = 0,
                    ComponentFlagsMask = 0,
                    ComponentManufacturer = AudioComponentManufacturerType.Apple,
                    ComponentSubType = (int)AudioTypeOutput.Remote,
                    ComponentType = AudioComponentType.Output
                });

            recorder = inputComponent.CreateAudioUnit();
            recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus);
            recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus);

            var audioFormat = new AudioStreamBasicDescription
                {
                    SampleRate = StudentDemo.Globals.SAMPLERATE,
                    Format = AudioFormatType.LinearPCM,
                    FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked,
                    FramesPerPacket = 1,
                    ChannelsPerFrame = 1,
                    BitsPerChannel = 16,
                    BytesPerPacket = 2,
                    BytesPerFrame = 2
                };

            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus);
            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus);

            recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus);

            // TODO: Disable buffers (requires interop)
            aBuffer = new AudioBuffer
                {
                    NumberChannels = 1,
                    DataByteSize = 512 * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2)
                };
        }
        void prepareExtAudioFile()
        {
            // Opening Audio File
            _extAudioFile = ExtAudioFile.OpenURL(_url);

            // Getting file data format
            _srcFormat = _extAudioFile.FileDataFormat;

            // Setting the channel number of the output format same to the input format
            _dstFormat = AudioUnitUtils.AUCanonicalASBD(_sampleRate, _srcFormat.ChannelsPerFrame);

            // setting reading format as audio unit cannonical format
            _extAudioFile.ClientDataFormat = _dstFormat;

            // getting total frame
            _totalFrames = _extAudioFile.FileLengthFrames;
            
            // Seeking to the file head
            _extAudioFile.Seek(0);
        }
 public static AudioStreamBasicDescription AUCanonicalASBD(double sampleRate, int channel)
 {
     // setting AudioStreamBasicDescription
     //int AudioUnitSampleTypeSize = (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR) ? sizeof(float) : sizeof(int);
     int AudioUnitSampleTypeSize = 4;
     AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
     {
         SampleRate = sampleRate,
         Format = AudioFormatType.LinearPCM,
         //kAudioFormatFlagsAudioUnitCanonical = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved | (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift),
         FormatFlags      = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked | (int)AudioFormatFlags.IsNonInterleaved | (int)(kAudioUnitSampleFractionBits << (int)AudioFormatFlags.LinearPCMSampleFractionShift)),
         ChannelsPerFrame = channel,
         BytesPerPacket   = AudioUnitSampleTypeSize,
         BytesPerFrame    = AudioUnitSampleTypeSize,
         FramesPerPacket  = 1,
         BitsPerChannel   = 8 * AudioUnitSampleTypeSize,
         Reserved = 0
     };
     return audioFormat;
 }
示例#44
0
		/// <summary>
		/// Starts the recording.
		/// </summary>
		/// <param name="rate">The rate.</param>
		private void StartRecording(int rate)
		{
			if (Active)
			{
				Clear();
			}

			SampleRate = rate;

			var audioFormat = new AudioStreamBasicDescription
								  {
									  SampleRate = SampleRate,
									  Format = AudioFormatType.LinearPCM,
									  FormatFlags =
										  AudioFormatFlags.LinearPCMIsSignedInteger
										  | AudioFormatFlags.LinearPCMIsPacked,
									  FramesPerPacket = 1,
									  ChannelsPerFrame = 1,
									  BitsPerChannel = BitsPerSample,
									  BytesPerPacket = 2,
									  BytesPerFrame = 2,
									  Reserved = 0
								  };

			_audioQueue = new InputAudioQueue(audioFormat);
			_audioQueue.InputCompleted += QueueInputCompleted;

			var bufferByteSize = _bufferSize * audioFormat.BytesPerPacket;

			IntPtr bufferPtr;
			for (var index = 0; index < 3; index++)
			{
				_audioQueue.AllocateBufferWithPacketDescriptors(bufferByteSize, _bufferSize, out bufferPtr);
				_audioQueue.EnqueueBuffer(bufferPtr, bufferByteSize, null);
			}

			_audioQueue.Start();
		}
 void HandleOnBroadcast (object sender, SimplyMobile.Core.EventArgs<byte[]> e)
 {
     var desc = new AudioStreamBasicDescription (AudioFormatType.LinearPCM);
 }
		void StreamPropertyListenerProc (object sender, PropertyFoundEventArgs args)
		{
			if (args.Property == AudioFileStreamProperty.DataFormat) {
				dataFormat = audioFileStream.DataFormat;
				return;
			}

			if (args.Property != AudioFileStreamProperty.ReadyToProducePackets) 
				return;

			if (audioQueue != null) {
				// TODO: Dispose
				throw new NotImplementedException ();
			}

			audioQueue = new OutputAudioQueue (dataFormat);
			audioQueue.OutputCompleted += HandleOutputCompleted;

			AudioQueueStatus status;
			aqTap = audioQueue.CreateProcessingTap (TapProc, AudioQueueProcessingTapFlags.PreEffects, out status);
			if (status != AudioQueueStatus.Ok)
				throw new ApplicationException ("Could not create AQ tap");

			// create an augraph to process in the tap. needs to convert from tapFormat to effect format and back
			/* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed
			before and after the effect, usually because effects want floats, and everything else in iOS
			core audio works with ints (or, in rare cases, fixed-point).
			the graph looks like this:
			[render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output]
			prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback
			knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to
			pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the
			render-callback	to provide it to the front of the stream. in some locales, this kind of shell game
			is described as "batshit crazy", but it seems to work pretty well in practice.
			*/

			auGraph = new AUGraph ();
			auGraph.Open ();
			var effectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.NewTimePitch));
			effectUnit = auGraph.GetNodeInfo (effectNode);

			var convertToEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertToEffectUnit = auGraph.GetNodeInfo (convertToEffectNode);

			var convertFromEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU));
			var convertFromEffectUnit = auGraph.GetNodeInfo (convertFromEffectNode);

			var genericOutputNode = auGraph.AddNode (AudioComponentDescription.CreateOutput (AudioTypeOutput.Generic));
			genericOutputUnit = auGraph.GetNodeInfo (genericOutputNode);

			// set the format conversions throughout the graph
			var effectFormat = effectUnit.GetAudioFormat (AudioUnitScopeType.Output);
			var tapFormat = aqTap.ProcessingFormat;

			convertToEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			convertToEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Output);

			convertFromEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Input);
			convertFromEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input);
			genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output);

			// set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess
			const uint maxFramesPerSlice = 4096;
			if (convertToEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (effectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (convertFromEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();
			if (genericOutputUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK)
				throw new ApplicationException ();

			// connect the nodes
			auGraph.ConnnectNodeInput (convertToEffectNode, 0, effectNode, 0);
			auGraph.ConnnectNodeInput (effectNode, 0, convertFromEffectNode, 0);
			auGraph.ConnnectNodeInput (convertFromEffectNode, 0, genericOutputNode, 0);

			// set up the callback into the first convert unit
			if (convertToEffectUnit.SetRenderCallback (ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError)
				throw new ApplicationException ();

			var res = auGraph.Initialize ();
			if (res != AUGraphError.OK)
				throw new ApplicationException ();
		}
示例#47
0
        public MonoTouch.AudioToolbox.AudioStreamBasicDescription GetAudioFormat(AudioUnitScopeType scope, uint audioUnitElement)
        {
            MonoTouch.AudioToolbox.AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription();
            uint size = (uint)Marshal.SizeOf(audioFormat);
            
            int err = AudioUnitGetProperty(_audioUnit,
                AudioUnitPropertyIDType.kAudioUnitProperty_StreamFormat,
                scope,
                audioUnitElement,
                ref audioFormat,
                ref size);
            if (err != 0)
                throw new ArgumentException(String.Format("Error code:{0}", err));

            return audioFormat;
        }
        void PrepareExtAudioFile()
        {
			extAudioFile = ExtAudioFile.OpenUrl(url);
			CheckValue (extAudioFile, "ExtAudioFile.OpenUrl failed");

			srcFormat = extAudioFile.FileDataFormat;

			// This is how you say,“When you convert the data, this is the format I’d like to receive.”
			// The client data format must be PCM. In other words, you can’t use a single ExtAudioFile to convert between two compressed formats.
            extAudioFile.ClientDataFormat = dstFormat;

            // getting total frame
			TotalFrames = extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
			buffer = new AudioBuffers(srcFormat.ChannelsPerFrame);
            for (int i = 0; i < buffer.Count; ++i)
            {
                int size = (int)(sizeof(int) * TotalFrames);
                buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
			numberOfChannels = srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;
            extAudioFile.Read((uint)TotalFrames, buffer, out status);
            if (status != ExtAudioFileError.OK)
                throw new ApplicationException();
        }
		void PrepareAudioUnit()
		{
			// All iPhones and iPads have microphones, but early iPod touches did not
			if (!AudioSession.AudioInputAvailable) {
				var noInputAlert = new UIAlertView ("No audio input", "No audio input device is currently attached", null, "Ok");
				noInputAlert.Show ();
				return;
			}

			// Getting AudioComponent Remote output
			audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote);
			CheckValue (audioComponent);

			// creating an audio unit instance
			audioUnit = new AudioUnit.AudioUnit(audioComponent);

			AudioUnitStatus status;
			status = audioUnit.SetEnableIO(true, AudioUnitScopeType.Input, 1);
			CheckStatus (status);
			status = audioUnit.SetEnableIO(true, AudioUnitScopeType.Output, 0);
			CheckStatus (status);

			dstFormat = new AudioStreamBasicDescription {
				SampleRate = AudioSession.CurrentHardwareSampleRate,
				Format = AudioFormatType.LinearPCM,
				FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsNonInterleaved,
				BytesPerPacket = 4,
				FramesPerPacket = 1,
				BytesPerFrame = 4,
				ChannelsPerFrame = 2,
				BitsPerChannel = 16
			};

			audioUnit.SetAudioFormat(dstFormat, AudioUnitScopeType.Input, 0);
			audioUnit.SetAudioFormat(dstFormat, AudioUnitScopeType.Output, 1);

			status = audioUnit.SetRenderCallback(RenderCallback, AudioUnitScopeType.Input, 0);
			CheckStatus (status);
		}
		static void CalculateBytesForTime (AudioStreamBasicDescription desc, int maxPacketSize, double seconds, out int bufferSize, out int packetCount)
		{
			const int maxBufferSize = 0x10000;
			const int minBufferSize = 0x4000;
			
			if (desc.FramesPerPacket > 0) {
				bufferSize = (int) (desc.SampleRate / desc.FramesPerPacket * seconds * maxPacketSize);
			} else {
				bufferSize = maxBufferSize > maxPacketSize ? maxBufferSize : maxPacketSize;
			}
			
			if (bufferSize > maxBufferSize && bufferSize > maxPacketSize) {
				bufferSize = maxBufferSize;
			} else if (bufferSize < minBufferSize) {
				bufferSize = minBufferSize;
			}
			
			packetCount = bufferSize / maxPacketSize;
		}
        void prepareAUGraph()
        {
            // Creating audio graph instance
            _auGraph = AUGraph.CreateInstance();

            // getting audio node and audio unit
            AudioComponentDescription cd = new AudioComponentDescription()
            {
                componentType = AudioComponentDescription.AudioComponentType.kAudioUnitType_Output,
                componentSubType = AudioComponentDescription.AudioComponentSubType.kAudioUnitSubType_RemoteIO,
                componentManufacturer = AudioComponentDescription.AudioComponentManufacturerType.kAudioUnitManufacturer_Apple,
                componentFlags = 0,
                componentFlagsMask = 0
            };
            int remoteIONode = _auGraph.AddNode(cd);
            AudioUnit remoteIOUnit = _auGraph.GetNodeInfo(remoteIONode);

            // turning on microphone    
            
            remoteIOUnit.SetEnableIO(true,                
                AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input,
                1 // remote input                
                );

            // audio canonical format
            AudioStreamBasicDescription audioFormat = CanonicalASBD(44100, 1);
            remoteIOUnit.SetAudioFormat(audioFormat,
                AudioUnit.AudioUnitScopeType.kAudioUnitScope_Output, // output bus of Remote input
                1 // Remote input
                );
            remoteIOUnit.SetAudioFormat(audioFormat,
                 AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input,
                 0 // Remote output,
                 );

            // Connecting Remote Input to Remote Output
            _auGraph.ConnnectNodeInput(
                remoteIONode, 1,
                remoteIONode, 0);

            // getting output audio format
            _audioUnitOutputFormat = remoteIOUnit.GetAudioFormat(
                AudioUnit.AudioUnitScopeType.kAudioUnitScope_Output,  // Remote output bus
                0 // Remote output
                );
            
            _auGraph.RenderCallback += new EventHandler<AudioGraphEventArgs>(_auGraph_RenderCallback);
            // graph initialization
            _auGraph.Initialize();
        }
示例#52
0
 static extern int ExtAudioFileCreateWithURL(IntPtr inURL,
     [MarshalAs(UnmanagedType.U4)] AudioFileType inFileType,
     ref AudioStreamBasicDescription inStreamDesc,
     IntPtr inChannelLayout, //AudioChannelLayout inChannelLayout, AudioChannelLayout results in compilation error (error code 134.)
     UInt32 flags,
     IntPtr outExtAudioFile);            
示例#53
0
 static extern int ExtAudioFileSetProperty(
     IntPtr inExtAudioFile,
     ExtAudioFilePropertyIDType inPropertyID,
     uint ioPropertyDataSize,
     ref AudioStreamBasicDescription outPropertyData);
 AudioStreamBasicDescription CanonicalASBD(double sampleRate, int channel)
 {
     // setting AudioStreamBasicDescription
     int AudioUnitSampleTypeSize;
     if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
     {
         AudioUnitSampleTypeSize = sizeof(float);
     }
     else
     {
         AudioUnitSampleTypeSize = sizeof(int);
     }
     AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
     {
         SampleRate = sampleRate,
         Format = AudioFormatType.LinearPCM,
         //    kAudioFormatFlagsCanonical  = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
         FormatFlags = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked),
         ChannelsPerFrame = channel,
         BytesPerPacket = AudioUnitSampleTypeSize * channel,
         BytesPerFrame = AudioUnitSampleTypeSize * channel,
         FramesPerPacket = 1,
         BitsPerChannel = 8 * AudioUnitSampleTypeSize,
         Reserved = 0
     };
     
     return audioFormat;
 }
示例#55
0
        private void startTalking(UdpClient audioCaller)
        {
            //Stop old recording session

            //Generate new WaveFormat
            //    recorder.WaveFormat = new WaveFormat(16000, 16, 1);
            //    recorder.BufferMilliseconds = 50;
            //    recorder.DataAvailable += SendAudio; //Add event to SendAudio

            //			recorder = new InputAudioQueue (playerFormat);
            //
            //
            //			for (int i = 0; i < BUFFERCOUNT; i++) {
            //				IntPtr aBUff;
            //				//recorder.AllocateBuffer (AUDIOBUFFERSIZE, out aBUff);
            //				byteSize = AUDIOBUFFERSIZE * playerFormat.BytesPerPacket;
            //				recorder.AllocateBufferWithPacketDescriptors (byteSize, AUDIOBUFFERSIZE, out aBUff);
            //				recorder.EnqueueBuffer (aBUff, byteSize, null);
            //				Console.WriteLine ("Buffer allocated, enqueueing");
            //			}

            //New stuffs

            var inputComponent = AudioComponent.FindNextComponent(
                null,
                new AudioComponentDescription
                {
                    ComponentFlags = 0,
                    ComponentFlagsMask = 0,
                    ComponentManufacturer = AudioComponentManufacturerType.Apple,
                    ComponentSubType = (int)AudioTypeOutput.Remote,
                    ComponentType = AudioComponentType.Output
                });

            recorder = inputComponent.CreateAudioUnit();
            recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus);
            recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus);

            var audioFormat = new AudioStreamBasicDescription
                {
                    SampleRate = Globals.SAMPLERATE,
                    Format = AudioFormatType.LinearPCM,
                    FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked,
                    FramesPerPacket = 1,
                    ChannelsPerFrame = 1,
                    BitsPerChannel = 16,
                    BytesPerPacket = 2,
                    BytesPerFrame = 2
                };

            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus);
            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus);

            recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus);

            // TODO: Disable buffers (requires interop)
            aBuffer = new AudioBuffer
                {
                    NumberChannels = 1,
                    DataByteSize = 512 * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2)
                };
            isTalking = true;
            //recorder.InputCompleted += SendAudio;
            //recorder.Start ();

            recorder.Initialize ();
            recorder.Start ();
        }
示例#56
0
 public static ExtAudioFile CreateWithURL(MonoTouch.CoreFoundation.CFUrl url,
     AudioFileType fileType, 
     AudioStreamBasicDescription inStreamDesc, 
     //AudioChannelLayout channelLayout, 
     AudioFileFlags flag)
 {             
     int err;
     IntPtr ptr = new IntPtr();
     unsafe {                
         err = ExtAudioFileCreateWithURL(url.Handle, fileType, ref inStreamDesc, IntPtr.Zero, (uint)flag,
             (IntPtr)(&ptr));
     }            
     if (err != 0)
     {
         throw new ArgumentException(String.Format("Error code:{0}", err));
     }
     if (ptr == IntPtr.Zero)
     {
         throw new InvalidOperationException("Can not get object instance");
     }
     
     return new ExtAudioFile(ptr);         
 }
        void prepareExtAudioFile()
        {
            // Opening Audio File
            _extAudioFile = ExtAudioFile.OpenUrl(_url);

            // Getting file data format
            _srcFormat = _extAudioFile.FileDataFormat;

            // Setting the channel number of the output format same to the input format
            _dstFormat = AudioUnitUtils.AUCanonicalASBD(_sampleRate, _srcFormat.ChannelsPerFrame);

            // setting reading format as audio unit cannonical format
            _extAudioFile.ClientDataFormat = _dstFormat;

            // getting total frame
            _totalFrames = _extAudioFile.FileLengthFrames;

            // Aloocating AudoBufferList
            _buffer = new MutableAudioBufferList(_srcFormat.ChannelsPerFrame, (int) (sizeof(uint) * _totalFrames));
            _numberOfChannels = _srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            _extAudioFile.Read((int) _totalFrames, _buffer);
        }
		bool DoConvertFile (CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
		{
			AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription ();

			// in this sample we should never be on the main thread here
			Debug.Assert (!NSThread.IsMain);

			// transition thread state to State::Running before continuing
			AppDelegate.ThreadStateSetRunning ();
			
			Debug.WriteLine ("DoConvertFile");

			// get the source file
			var sourceFile = AudioFile.Open (sourceURL, AudioFilePermission.Read);
			
			// get the source data format
			var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

			// setup the output file format
			dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate); // set sample rate
			if (outputFormat == AudioFormatType.LinearPCM) {
				// if the output format is PC create a 16-bit int PCM file format description as an example
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
				dstFormat.BitsPerChannel = 16;
				dstFormat.BytesPerPacket = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
				dstFormat.FramesPerPacket = 1;
				dstFormat.FormatFlags = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
			} else {
				// compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame); // for iLBC num channels must be 1
				
				// use AudioFormat API to fill out the rest of the description
				var fie = AudioStreamBasicDescription.GetFormatInfo (ref dstFormat);
				if (fie != AudioFormatError.None) {
					Debug.Print ("Cannot create destination format {0:x}", fie);

					AppDelegate.ThreadStateSetDone ();
					return false;
				}
			}

			// create the AudioConverter
			AudioConverterError ce;
			var converter = AudioConverter.Create (srcFormat, dstFormat, out ce);
			Debug.Assert (ce == AudioConverterError.None);

			converter.InputData += EncoderDataProc;

			// if the source has a cookie, get it and set it on the Audio Converter
			ReadCookie (sourceFile, converter);

			// get the actual formats back from the Audio Converter
			srcFormat = converter.CurrentInputStreamDescription;
			dstFormat = converter.CurrentOutputStreamDescription;

			// if encoding to AAC set the bitrate to 192k which is a nice value for this demo
			// kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
			if (dstFormat.Format == AudioFormatType.MPEG4AAC) {
				uint outputBitRate = 192000; // 192k

				// ignore errors as setting may be invalid depending on format specifics such as samplerate
				try {
					converter.EncodeBitRate = outputBitRate;
				} catch {
				}

				// get it back and print it out
				outputBitRate = converter.EncodeBitRate;
				Debug.Print ("AAC Encode Bitrate: {0}", outputBitRate);
			}

			// can the Audio Converter resume conversion after an interruption?
			// this property may be queried at any time after construction of the Audio Converter after setting its output format
			// there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
			// construction time since it means less code to execute during or after interruption time
			bool canResumeFromInterruption;
			try {
				canResumeFromInterruption = converter.CanResumeFromInterruption;
				Debug.Print ("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
			} catch (Exception e) {
				// if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
				// then the codec being used is not a hardware codec so we're not concerned about codec state
				// we are always going to be able to resume conversion after an interruption

				canResumeFromInterruption = false;
				Debug.Print ("CanResumeFromInterruption: {0}", e.Message);
			}
			
			// create the destination file 
			var destinationFile = AudioFile.Create (destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

			// set up source buffers and data proc info struct
			afio = new AudioFileIO (32768);
			afio.SourceFile = sourceFile;
			afio.SrcFormat = srcFormat;

			if (srcFormat.BytesPerPacket == 0) {
				// if the source format is VBR, we need to get the maximum packet size
				// use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
				// in the file (without actually scanning the whole file to find the largest packet,
				// as may happen with kAudioFilePropertyMaximumPacketSize)
				afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

				// how many packets can we read for our buffer size?
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
				
				// allocate memory for the PacketDescription structures describing the layout of each packet
				afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
			} else {
				// CBR source format
				afio.SrcSizePerPacket = srcFormat.BytesPerPacket;
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
			}

			// set up output buffers
			int outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
			const int theOutputBufSize = 32768;
			var outputBuffer = Marshal.AllocHGlobal (theOutputBufSize);
			AudioStreamPacketDescription[] outputPacketDescriptions = null;

			if (outputSizePerPacket == 0) {
				// if the destination format is VBR, we need to get max size per packet from the converter
				outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

				// allocate memory for the PacketDescription structures describing the layout of each packet
				outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
			}
			int numOutputPackets = theOutputBufSize / outputSizePerPacket;
			
			// if the destination format has a cookie, get it and set it on the output file
			WriteCookie (converter, destinationFile);
			
			// write destination channel layout
			if (srcFormat.ChannelsPerFrame > 2) {
				WriteDestinationChannelLayout (converter, sourceFile, destinationFile);
			}

			long totalOutputFrames = 0; // used for debugging
			long outputFilePos = 0;
			AudioBuffers fillBufList = new AudioBuffers (1);
			bool error = false;

			// loop to convert data
			Debug.WriteLine ("Converting...");
			while (true) {
				// set up output buffer list
				fillBufList [0] = new AudioBuffer () {
					NumberChannels = dstFormat.ChannelsPerFrame,
					DataByteSize = theOutputBufSize,
					Data = outputBuffer
				};

				// this will block if we're interrupted
				var wasInterrupted = AppDelegate.ThreadStatePausedCheck();
				
				if (wasInterrupted && !canResumeFromInterruption) {
					// this is our interruption termination condition
					// an interruption has occured but the Audio Converter cannot continue
					Debug.WriteLine ("Cannot resume from interruption");
					error = true;
					break;
				}

				// convert data
				int ioOutputDataPackets = numOutputPackets;
				var fe = converter.FillComplexBuffer (ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
				// if interrupted in the process of the conversion call, we must handle the error appropriately
				if (fe != AudioConverterError.None) {
					Debug.Print ("FillComplexBuffer: {0}", fe);
					error = true;
					break;
				}

				if (ioOutputDataPackets == 0) {
					// this is the EOF conditon
					break;
				}

				// write to output file
				var inNumBytes = fillBufList [0].DataByteSize;

				var we = destinationFile.WritePackets (false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
				if (we != 0) {
					Debug.Print ("WritePackets: {0}", we);
					error = true;
					break;
				}

				// advance output file packet position
				outputFilePos += ioOutputDataPackets;
					
				if (dstFormat.FramesPerPacket != 0) { 
					// the format has constant frames per packet
					totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
				} else {
					// variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
					for (var i = 0; i < ioOutputDataPackets; ++i)
						totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
				}

			}

			Marshal.FreeHGlobal (outputBuffer);

			if (!error) {
				// write out any of the leading and trailing frames for compressed formats only
				if (dstFormat.BitsPerChannel == 0) {
					// our output frame count should jive with
					Debug.Print ("Total number of output frames counted: {0}", totalOutputFrames); 
					WritePacketTableInfo (converter, destinationFile);
				}
					
				// write the cookie again - sometimes codecs will update cookies at the end of a conversion
				WriteCookie (converter, destinationFile);
			}

			converter.Dispose ();
			destinationFile.Dispose ();
			sourceFile.Dispose ();

			// transition thread state to State.Done before continuing
			AppDelegate.ThreadStateSetDone ();

			return !error;
		}
示例#59
0
        private void Init()
        {
            var audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate = this.SampleRate,
                Format = AudioFormatType.LinearPCM,
                FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
                FramesPerPacket = 1,
                ChannelsPerFrame = 1,
                BitsPerChannel = this.BitsPerSample,
                BytesPerPacket = 2,
                BytesPerFrame = 2,
                Reserved = 0
            };

            audioQueue = new InputAudioQueue(audioFormat);
            audioQueue.InputCompleted += QueueInputCompleted;

            var bufferByteSize = this.bufferSize * audioFormat.BytesPerPacket;

            IntPtr bufferPtr;
            for (var index = 0; index < 3; index++)
            {
                audioQueue.AllocateBufferWithPacketDescriptors(bufferByteSize, this.bufferSize, out bufferPtr);
                audioQueue.EnqueueBuffer(bufferPtr, bufferByteSize, null);
            }
        }
        void prepareAudioUnit()
        {
            // Getting the RemoteUI AudioComponent
            _component = AudioComponent.FindComponent(AudioTypeOutput.Remote);
           
            // Getting Audiounit
            _audioUnit = AudioUnit.CreateInstance(_component);

            // setting AudioStreamBasicDescription
            int AudioUnitSampleTypeSize;
            if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
            {
                AudioUnitSampleTypeSize = sizeof(float);
            }
            else
            {
                AudioUnitSampleTypeSize = sizeof(int);
            }
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate = _sampleRate,
                Format = AudioFormatType.LinearPCM,
                //kAudioFormatFlagsAudioUnitCanonical = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved | (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift),
                FormatFlags = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked | (int)AudioFormatFlags.IsNonInterleaved | (int)(kAudioUnitSampleFractionBits << (int)AudioFormatFlags.LinearPCMSampleFractionShift)),
                ChannelsPerFrame = 2,
                BytesPerPacket = AudioUnitSampleTypeSize,
                BytesPerFrame = AudioUnitSampleTypeSize,
                FramesPerPacket = 1,
                BitsPerChannel = 8 * AudioUnitSampleTypeSize,
                Reserved = 0
            };
            _audioUnit.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, 0);            

            // setting callback
            if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR)
                _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(simulator_callback);
            else
                _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(device_callback);
        }