bool createAUGraph () { AUGraphError result = 0; int samplerNode, ioNode; var cd = new AudioComponentDescription () { ComponentManufacturer = AudioComponentManufacturerType.Apple, ComponentFlags = 0, ComponentFlagsMask = 0 }; processingGraph = new AUGraph (); cd.ComponentType = AudioComponentType.MusicDevice; cd.ComponentSubType = (int)AudioTypeMusicDevice.Sampler; //0x73616d70; samplerNode = processingGraph.AddNode (cd); cd.ComponentType = AudioComponentType.Output; cd.ComponentSubType = (int)AudioTypeOutput.Remote; //0x72696f63; ioNode = processingGraph.AddNode (cd); processingGraph.Open (); result = processingGraph.ConnnectNodeInput (samplerNode, 0, ioNode, 0); if (result != AUGraphError.OK) throw new Exception ("Unable to open the audio processing graph. Error code: " + result); samplerUnit = processingGraph.GetNodeInfo (samplerNode); ioUnit = processingGraph.GetNodeInfo (ioNode); return true; }
static int renderCallback(IntPtr inRefCon, ref AudioUnit.AudioUnitRenderActionFlags _ioActionFlags, ref AudioTimeStamp _inTimeStamp, uint _inBusNumber, uint _inNumberFrames, AudioBufferList _ioData) { // getting audiounit instance var handler = GCHandle.FromIntPtr(inRefCon); var inst = (AUGraph)handler.Target; // evoke event handler with an argument if (inst.RenderCallback != null) { var args = new AudioGraphEventArgs( _ioActionFlags, _inTimeStamp, _inBusNumber, _inNumberFrames, _ioData); inst.RenderCallback(inst, args); } return 0; // noerror }
bool CreateAUGraph () { processingGraph = new AUGraph (); int samplerNode, ioNode; var musicSampler = new AudioComponentDescription () { ComponentManufacturer = AudioComponentManufacturerType.Apple, ComponentType = AudioComponentType.MusicDevice, ComponentSubType = (int)AudioTypeMusicDevice.Sampler }; samplerNode = processingGraph.AddNode (musicSampler); var remoteOutput = new AudioComponentDescription () { ComponentManufacturer = AudioComponentManufacturerType.Apple, ComponentType = AudioComponentType.Output, ComponentSubType = (int)AudioTypeOutput.Remote }; ioNode = processingGraph.AddNode (remoteOutput); processingGraph.Open (); processingGraph.ConnnectNodeInput ( sourceNode: samplerNode, sourceOutputNumber: 0, destNode: ioNode, destInputNumber: 0); samplerUnit = processingGraph.GetNodeInfo (samplerNode); ioUnit = processingGraph.GetNodeInfo (ioNode); return true; }
public AudioGraphEventArgs(AudioUnit.AudioUnitRenderActionFlags _ioActionFlags, MonoTouch.AudioToolbox.AudioTimeStamp _inTimeStamp, uint _inBusNumber, uint _inNumberFrames, AudioBufferList _ioData) : base(_ioActionFlags, _inTimeStamp, _inBusNumber, _inNumberFrames, _ioData) { }
public AudioUnitEventArgs(AudioUnit.AudioUnitRenderActionFlags _ioActionFlags, MonoTouch.AudioToolbox.AudioTimeStamp _inTimeStamp, uint _inBusNumber, uint _inNumberFrames, AudioBufferList _ioData) { ActionFlags = _ioActionFlags; this.TimeStamp = _inTimeStamp; BusNumber = _inBusNumber; NumberFrames = _inNumberFrames; Data = _ioData; }
void prepareAudioUnit() { // AudioSession AudioSession.Initialize(); AudioSession.SetActive(true); AudioSession.Category = AudioSessionCategory.PlayAndRecord; AudioSession.PreferredHardwareIOBufferDuration = 0.01f; // Getting a RemoteUI AudioUni AudioComponent _component = AudioComponent.FindComponent(AudioTypeOutput.Remote); // Getting Audiounit _audioUnit = AudioUnit.CreateInstance(_component); // turning on microphone _audioUnit.SetEnableIO(true, AudioUnitScopeType.Input, 1 // Remote Input ); // setting AudioStreamBasicDescription int AudioUnitSampleTypeSize = (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR) ? sizeof(float) : sizeof(uint); AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription() { SampleRate = _sampleRate, Format = AudioFormatType.LinearPCM, //kAudioFormatFlagsAudioUnitCanonical = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved | (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift), FormatFlags = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked | (int)AudioFormatFlags.IsNonInterleaved | (int)(kAudioUnitSampleFractionBits << (int)AudioFormatFlags.LinearPCMSampleFractionShift)), ChannelsPerFrame = 2, BytesPerPacket = AudioUnitSampleTypeSize, BytesPerFrame = AudioUnitSampleTypeSize, FramesPerPacket = 1, BitsPerChannel = 8 * AudioUnitSampleTypeSize, Reserved = 0 }; _audioUnit.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, 0 // Remote output ); _audioUnit.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, 1 // Remote input ); // setting callback if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR) _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(simulator_callback); else _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(device_callback); // initialize _audioUnit.Initialize(); }
public IOSAudioProcessor() { var inputComponent = AudioComponent.FindNextComponent( null, new AudioComponentDescription { ComponentFlags = 0, ComponentFlagsMask = 0, ComponentManufacturer = AudioComponentManufacturerType.Apple, ComponentSubType = (int)AudioTypeOutput.Remote, ComponentType = AudioComponentType.Output }); recorder = inputComponent.CreateAudioUnit(); recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus); recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus); var audioFormat = new AudioStreamBasicDescription { SampleRate = StudentDemo.Globals.SAMPLERATE, Format = AudioFormatType.LinearPCM, FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked, FramesPerPacket = 1, ChannelsPerFrame = 1, BitsPerChannel = 16, BytesPerPacket = 2, BytesPerFrame = 2 }; recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus); recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus); recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus); // TODO: Disable buffers (requires interop) aBuffer = new AudioBuffer { NumberChannels = 1, DataByteSize = 512 * 2, Data = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2) }; }
static int device_renderCallback(IntPtr inRefCon, ref AudioUnit.AudioUnitRenderActionFlags _ioActionFlags, ref AudioTimeStamp _inTimeStamp, uint _inBusNumber, uint _inNumberFrames, AudioBufferList _ioData) { System.Diagnostics.Debug.WriteLine("o"); var handler = GCHandle.FromIntPtr(inRefCon); var inst = (RemoteOutput)handler.Target; var waveDef = inst._waveDef[_inBusNumber]; double dphai = 2 * Math.PI * waveDef.frequency / waveDef.sampleRate; double phase = waveDef.phase; // Getting a pointer to a buffer to be filled IntPtr outL = _ioData.mBuffers[0].mData; IntPtr outR = _ioData.mBuffers[1].mData; // filling sin waveform. // AudioUnitSampleType is different between a simulator (float32) and a real device (int32). unsafe { var outLPtr = (int*)outL.ToPointer(); var outRPtr = (int*)outR.ToPointer(); for (int i = 0; i < _inNumberFrames; i++) { int sample = (int)(Math.Sin(phase) * int.MaxValue / 128); // signal waveform format is fixed-point (8.24) *outLPtr++ = sample; *outRPtr++ = sample; phase += dphai; } } waveDef.phase = phase % (2 * Math.PI); return 0; }
void StreamPropertyListenerProc (object sender, PropertyFoundEventArgs args) { if (args.Property == AudioFileStreamProperty.DataFormat) { dataFormat = audioFileStream.DataFormat; return; } if (args.Property != AudioFileStreamProperty.ReadyToProducePackets) return; if (audioQueue != null) { // TODO: Dispose throw new NotImplementedException (); } audioQueue = new OutputAudioQueue (dataFormat); audioQueue.OutputCompleted += HandleOutputCompleted; AudioQueueStatus status; aqTap = audioQueue.CreateProcessingTap (TapProc, AudioQueueProcessingTapFlags.PreEffects, out status); if (status != AudioQueueStatus.Ok) throw new ApplicationException ("Could not create AQ tap"); // create an augraph to process in the tap. needs to convert from tapFormat to effect format and back /* note: this is invalidname's recipe to do an in-place effect when a format conversion is needed before and after the effect, usually because effects want floats, and everything else in iOS core audio works with ints (or, in rare cases, fixed-point). the graph looks like this: [render-callback] -> [converter] -> [effect] -> [converter] -> [generic-output] prior to calling AudioUnitRender() on generic-output the ioData to a pointer that render-callback knows about, and NULLs the ioData provided to AudioUnitRender(). the NULL tells generic-output to pull from its upstream units (ie, the augraph), and copying off the ioData pointer allows the render-callback to provide it to the front of the stream. in some locales, this kind of shell game is described as "batshit crazy", but it seems to work pretty well in practice. */ auGraph = new AUGraph (); auGraph.Open (); var effectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.NewTimePitch)); effectUnit = auGraph.GetNodeInfo (effectNode); var convertToEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU)); var convertToEffectUnit = auGraph.GetNodeInfo (convertToEffectNode); var convertFromEffectNode = auGraph.AddNode (AudioComponentDescription.CreateConverter (AudioTypeConverter.AU)); var convertFromEffectUnit = auGraph.GetNodeInfo (convertFromEffectNode); var genericOutputNode = auGraph.AddNode (AudioComponentDescription.CreateOutput (AudioTypeOutput.Generic)); genericOutputUnit = auGraph.GetNodeInfo (genericOutputNode); // set the format conversions throughout the graph var effectFormat = effectUnit.GetAudioFormat (AudioUnitScopeType.Output); var tapFormat = aqTap.ProcessingFormat; convertToEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input); convertToEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Output); convertFromEffectUnit.SetAudioFormat (effectFormat, AudioUnitScopeType.Input); convertFromEffectUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output); genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Input); genericOutputUnit.SetAudioFormat (tapFormat, AudioUnitScopeType.Output); // set maximum fames per slice higher (4096) so we don't get kAudioUnitErr_TooManyFramesToProcess const uint maxFramesPerSlice = 4096; if (convertToEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK) throw new ApplicationException (); if (effectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK) throw new ApplicationException (); if (convertFromEffectUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK) throw new ApplicationException (); if (genericOutputUnit.SetMaximumFramesPerSlice (maxFramesPerSlice, AudioUnitScopeType.Global) != AudioUnitStatus.OK) throw new ApplicationException (); // connect the nodes auGraph.ConnnectNodeInput (convertToEffectNode, 0, effectNode, 0); auGraph.ConnnectNodeInput (effectNode, 0, convertFromEffectNode, 0); auGraph.ConnnectNodeInput (convertFromEffectNode, 0, genericOutputNode, 0); // set up the callback into the first convert unit if (convertToEffectUnit.SetRenderCallback (ConvertInputRenderCallback, AudioUnitScopeType.Global) != AudioUnitStatus.NoError) throw new ApplicationException (); var res = auGraph.Initialize (); if (res != AUGraphError.OK) throw new ApplicationException (); }
void prepareAudioUnit() { // creating an AudioComponentDescription of the RemoteIO AudioUnit AudioComponentDescription cd = new AudioComponentDescription() { componentType = AudioComponentDescription.AudioComponentType.kAudioUnitType_Output, componentSubType = AudioComponentDescription.AudioComponentSubType.kAudioUnitSubType_RemoteIO, componentManufacturer = AudioComponentDescription.AudioComponentManufacturerType.kAudioUnitManufacturer_Apple, componentFlags = 0, componentFlagsMask = 0 }; // Getting AudioComponent using the audio component description _audioComponent = AudioComponent.FindComponent(cd); // creating an audio unit instance _audioUnit = AudioUnit.CreateInstance(_audioComponent); // setting audio format _audioUnit.SetAudioFormat(_dstFormat, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input, 0 // Remote Output ); // setting callback method _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(_audioUnit_RenderCallback); _audioUnit.Initialize(); }
static extern int AUGraphAddRenderNotify(IntPtr inGraph, AudioUnit.AURenderCallback inCallback, IntPtr inRefCon );
void prepareAudioUnit() { // Getting the RemoteUI AudioComponent _component = AudioComponent.FindComponent(AudioTypeOutput.Remote); // Getting Audiounit _audioUnit = AudioUnit.CreateInstance(_component); // setting AudioStreamBasicDescription int AudioUnitSampleTypeSize; if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR) { AudioUnitSampleTypeSize = sizeof(float); } else { AudioUnitSampleTypeSize = sizeof(int); } AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription() { SampleRate = _sampleRate, Format = AudioFormatType.LinearPCM, //kAudioFormatFlagsAudioUnitCanonical = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved | (kAudioUnitSampleFractionBits << kLinearPCMFormatFlagsSampleFractionShift), FormatFlags = (AudioFormatFlags)((int)AudioFormatFlags.IsSignedInteger | (int)AudioFormatFlags.IsPacked | (int)AudioFormatFlags.IsNonInterleaved | (int)(kAudioUnitSampleFractionBits << (int)AudioFormatFlags.LinearPCMSampleFractionShift)), ChannelsPerFrame = 2, BytesPerPacket = AudioUnitSampleTypeSize, BytesPerFrame = AudioUnitSampleTypeSize, FramesPerPacket = 1, BitsPerChannel = 8 * AudioUnitSampleTypeSize, Reserved = 0 }; _audioUnit.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, 0); // setting callback if (MonoTouch.ObjCRuntime.Runtime.Arch == MonoTouch.ObjCRuntime.Arch.SIMULATOR) _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(simulator_callback); else _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(device_callback); }
void prepareAudioUnit() { // Getting Remote AudioComponent output _audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote); // creating an audio unit instance _audioUnit = AudioUnit.CreateInstance(_audioComponent); // setting audio format _audioUnit.SetAudioFormat(_dstFormat, AudioUnitScopeType.Input, 0 // Remote Output ); // setting callback method _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(_audioUnit_RenderCallback); _audioUnit.Initialize(); }
public AudioVoice(AudioEngine engine, SoundEffectInstance effectInstance, WaveFormat desiredFormat) { if (engine == null) throw new ArgumentNullException("engine"); if (desiredFormat == null) throw new ArgumentNullException("desiredFormat"); audioEngine = engine; soundEffectInstance = effectInstance; waveFormat = desiredFormat; BusIndexMixer = uint.MaxValue; if (desiredFormat.BitsPerSample != 16) throw new AudioSystemInternalException("Invalid Audio Format. " + desiredFormat.BitsPerSample + " bits by sample is not supported."); lock (StaticMembersLock) { if (nbOfInstances == 0) { // Create the Audio Graph audioGraph = new AUGraph(); // Open the graph (does not initialize it yet) audioGraph.Open(); // Create the AudioComponentDescrition corresponding to the IO Remote output and MultiChannelMixer var remoteInOutComponentDesc = AudioComponentDescription.CreateOutput(AudioTypeOutput.Remote); var mixerMultiChannelComponentDesc = AudioComponentDescription.CreateMixer(AudioTypeMixer.MultiChannel); var mixer3DComponentDesc = AudioComponentDescription.CreateMixer(AudioTypeMixer.Spacial); // Add the Audio Unit nodes to the AudioGraph var outputUnitNodeId = audioGraph.AddNode(remoteInOutComponentDesc); var idChannelMixerNode = audioGraph.AddNode(mixerMultiChannelComponentDesc); var id3DMixerNode = audioGraph.AddNode(mixer3DComponentDesc); // Connect the nodes together CheckGraphError(audioGraph.ConnnectNodeInput(idChannelMixerNode, 0, outputUnitNodeId, 0), "Connection of the graph node failed."); CheckGraphError(audioGraph.ConnnectNodeInput(id3DMixerNode, 0, idChannelMixerNode, MaxNumberOfTracks), "Connection of the graph node failed."); // Get the MixerUnit objects unitChannelMixer = audioGraph.GetNodeInfo(idChannelMixerNode); unit3DMixer = audioGraph.GetNodeInfo(id3DMixerNode); // Set the mixers' output formats (the stream format is propagated along the linked input during the graph initialization) var desiredSampleRate = (engine.AudioSampleRate != 0) ? engine.AudioSampleRate : AudioUnitOutputSampleRate; unit3DMixer.SetAudioFormat(CreateLinear16BitsPcm(2, desiredSampleRate), AudioUnitScopeType.Output); unitChannelMixer.SetAudioFormat(CreateLinear16BitsPcm(2, desiredSampleRate), AudioUnitScopeType.Output); // set the element count to the max number of possible tracks before initializing the audio graph CheckUnitStatus(unitChannelMixer.SetElementCount(AudioUnitScopeType.Input, MaxNumberOfTracks+1), string.Format("Failed to set element count on ChannelMixer [{0}]", MaxNumberOfTracks+1)); // +1 for the 3DMixer output CheckUnitStatus(unit3DMixer.SetElementCount(AudioUnitScopeType.Input, MaxNumberOfTracks), string.Format("Failed to set element count on 3DMixer [{0}]", MaxNumberOfTracks)); // set a null renderer callback to the channel and 3d mixer input bus for (uint i = 0; i < MaxNumberOfTracks; i++) { CheckUnitStatus((AudioUnitStatus)SetInputRenderCallbackToNull(unit3DMixer.Handle, i), "Failed to set the render callback"); CheckUnitStatus((AudioUnitStatus)SetInputRenderCallbackToNull(unitChannelMixer.Handle, i), "Failed to set the render callback"); } // Initialize the graph (validation of the topology) CheckGraphError(audioGraph.Initialize(), "The audio graph initialization failed."); // Start audio rendering CheckGraphError(audioGraph.Start(), "Audio Graph could not start."); // disable all the input bus at the beginning for (uint i = 0; i < MaxNumberOfTracks; i++) { CheckUnitStatus(unitChannelMixer.SetParameter(AudioUnitParameterType.MultiChannelMixerEnable, 0f, AudioUnitScopeType.Input, i), "Failed to enable/disable the ChannelMixerInput."); CheckUnitStatus(unit3DMixer.SetParameter((AudioUnitParameterType)_3DMixerParametersIds.Enable, 0f, AudioUnitScopeType.Input, i), "Failed to enable/disable the 3DMixerInput."); } // At initialization all UnitElement are available. availableMixerBusIndices = new Queue<uint>(); for (uint i = 0; i < MaxNumberOfTracks; i++) availableMixerBusIndices.Enqueue(i); } ++nbOfInstances; // Create a AudioDataRendererInfo for the sounds. pAudioDataRendererInfo = (AudioDataRendererInfo*)Utilities.AllocateClearedMemory(sizeof(AudioDataRendererInfo)); pAudioDataRendererInfo->HandleChannelMixer = unitChannelMixer.Handle; pAudioDataRendererInfo->Handle3DMixer = unit3DMixer.Handle; } }
void prepareAudioUnit() { // Creating AudioComponentDescription instance of RemoteIO Audio Unit var cd = new AudioComponentDescription() { componentType = AudioComponentDescription.AudioComponentType.kAudioUnitType_Output, componentSubType = AudioComponentDescription.AudioComponentSubType.kAudioUnitSubType_RemoteIO, componentManufacturer = AudioComponentDescription.AudioComponentManufacturerType.kAudioUnitManufacturer_Apple, componentFlags = 0, componentFlagsMask = 0 }; // Getting AudioComponent from the description _component = AudioComponent.FindComponent(cd); // Getting Audiounit _audioUnit = AudioUnit.CreateInstance(_component); // turning on microphone _audioUnit.SetEnableIO(true, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input, 1 // Remote Input ); // setting AudioStreamBasicDescription var audioFormat = AudioUnitUtils.AUCanonicalASBD(44100.0, 2); _audioUnit.SetAudioFormat(audioFormat, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input, 0 // Remote output ); _audioUnit.SetAudioFormat(audioFormat, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Output, 1 // Remote input ); // setting callback _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(_audioUnit_RenderCallback); // initialize _audioUnit.Initialize(); }
public void AUGraphSetNodeInputCallback(int inDestNode, uint inDestInputNumber, AudioUnit.AURenderCallbackStrct inInputCallback) { int err = AUGraphSetNodeInputCallback(_auGraph, inDestNode,inDestInputNumber, inInputCallback); if (err != 0) throw new ArgumentException(String.Format("Error code:", err)); }
void prepareAudioUnit() { // AudioSession AudioSession.Initialize(); AudioSession.SetActive(true); AudioSession.Category = AudioSessionCategory.PlayAndRecord; AudioSession.PreferredHardwareIOBufferDuration = 0.005f; // Getting AudioComponent Remote output _audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote); // creating an audio unit instance _audioUnit = AudioUnit.CreateInstance(_audioComponent); // turning on microphone _audioUnit.SetEnableIO(true, AudioUnitScopeType.Input, 1 // Remote Input ); // setting audio format _audioUnit.SetAudioFormat(_dstFormat, AudioUnitScopeType.Input, 0 // Remote Output ); _audioUnit.SetAudioFormat( AudioUnitUtils.AUCanonicalASBD(_sampleRate, 2), AudioUnitScopeType.Output, 1 // Remote input ); // setting callback method _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(_audioUnit_RenderCallback); _audioUnit.Initialize(); _audioUnit.Start(); }
void prepareAudioUnit() { // AudioSession AudioSession.Initialize(); AudioSession.SetActive(true); AudioSession.Category = AudioSessionCategory.PlayAndRecord; AudioSession.PreferredHardwareIOBufferDuration = 0.005f; // creating an AudioComponentDescription of the RemoteIO AudioUnit AudioComponentDescription cd = new AudioComponentDescription() { componentType = AudioComponentDescription.AudioComponentType.kAudioUnitType_Output, componentSubType = AudioComponentDescription.AudioComponentSubType.kAudioUnitSubType_RemoteIO, componentManufacturer = AudioComponentDescription.AudioComponentManufacturerType.kAudioUnitManufacturer_Apple, componentFlags = 0, componentFlagsMask = 0 }; // Getting AudioComponent using the audio component description _audioComponent = AudioComponent.FindComponent(cd); // creating an audio unit instance _audioUnit = AudioUnit.CreateInstance(_audioComponent); // turning on microphone _audioUnit.SetEnableIO(true, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input, 1 // Remote Input ); // setting audio format _audioUnit.SetAudioFormat(_dstFormat, AudioUnit.AudioUnitScopeType.kAudioUnitScope_Input, 0 // Remote Output ); _audioUnit.SetAudioFormat( AudioUnitUtils.AUCanonicalASBD(_sampleRate, 2), AudioUnit.AudioUnitScopeType.kAudioUnitScope_Output, 1 // Remote input ); // setting callback method _audioUnit.RenderCallback += new EventHandler<AudioUnitEventArgs>(_audioUnit_RenderCallback); _audioUnit.Initialize(); _audioUnit.Start(); }
AudioUnitStatus AudioInputCallBack(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioUnit audioUnit) { MemoryStream ms = new MemoryStream(); String s = "a000"; byte[] bufWriter = Encoding.ASCII.GetBytes(s.ToCharArray(), 0, 4); ms.Write(bufWriter, 0, 4); bufWriter = BitConverter.GetBytes(AudioSessionId); if (BitConverter.IsLittleEndian) Array.Reverse(bufWriter); ms.Write(bufWriter, 0, 4); long time = (long) (DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalMilliseconds; //Console.WriteLine ((time - lasttime) + " ms delay"); lasttime = time; bufWriter = BitConverter.GetBytes(time); if (BitConverter.IsLittleEndian) Array.Reverse(bufWriter); ms.Write(bufWriter, 0, 8); var buffer = new AudioBuffer() { NumberChannels = 1, DataByteSize = (int)numberFrames * 2, Data = System.Runtime.InteropServices.Marshal.AllocHGlobal((int)numberFrames * 2) }; var bufferList = new AudioBuffers(1); bufferList[0] = buffer; var status = audioUnit.Render(ref actionFlags, timeStamp, busNumber, numberFrames, bufferList); var send = new byte[buffer.DataByteSize]; System.Runtime.InteropServices.Marshal.Copy(buffer.Data, send, 0, send.Length); ms.Write (send, 0, send.Length); Console.Write("\n Buffer: "); foreach (byte b in send) Console.Write("\\x" + b); Console.Write("\n"); System.Runtime.InteropServices.Marshal.FreeHGlobal(buffer.Data); byte[] sendbuf = ms.ToArray(); if (sendbuf.Length > 4096) throw new Exception("Packet size too large!"); Task tk = Task.Factory.StartNew(() => { try { var aSender = audioCaller.BeginSend(sendbuf, sendbuf.Length, null, null); aSender.AsyncWaitHandle.WaitOne(TimeSpan.FromSeconds(3)); if (aSender.IsCompleted) audioCaller.EndSend(aSender); } catch { } }); return AudioUnitStatus.OK; }
void prepareAudioUnit () { // AudioSession AudioSession.Initialize (); AudioSession.SetActive (true); AudioSession.Category = AudioSessionCategory.PlayAndRecord; AudioSession.PreferredHardwareIOBufferDuration = 0.005f; // Getting AudioComponent Remote output _audioComponent = AudioComponent.FindComponent (AudioTypeOutput.Remote); // creating an audio unit instance _audioUnit = new AudioUnit (_audioComponent); // turning on microphone _audioUnit.SetEnableIO (true, AudioUnitScopeType.Input, 1 // Remote Input ); // setting audio format _audioUnit.SetAudioFormat (_dstFormat, AudioUnitScopeType.Input, 0 // Remote Output ); var format = AudioStreamBasicDescription.CreateLinearPCM (_sampleRate, bitsPerChannel: 32); format.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsAudioUnitCanonical; _audioUnit.SetAudioFormat (format, AudioUnitScopeType.Output, 1); // setting callback method _audioUnit.SetRenderCallback (_audioUnit_RenderCallback, AudioUnitScopeType.Global); _audioUnit.Initialize (); _audioUnit.Start (); }
private void startTalking(UdpClient audioCaller) { //Stop old recording session //Generate new WaveFormat // recorder.WaveFormat = new WaveFormat(16000, 16, 1); // recorder.BufferMilliseconds = 50; // recorder.DataAvailable += SendAudio; //Add event to SendAudio // recorder = new InputAudioQueue (playerFormat); // // // for (int i = 0; i < BUFFERCOUNT; i++) { // IntPtr aBUff; // //recorder.AllocateBuffer (AUDIOBUFFERSIZE, out aBUff); // byteSize = AUDIOBUFFERSIZE * playerFormat.BytesPerPacket; // recorder.AllocateBufferWithPacketDescriptors (byteSize, AUDIOBUFFERSIZE, out aBUff); // recorder.EnqueueBuffer (aBUff, byteSize, null); // Console.WriteLine ("Buffer allocated, enqueueing"); // } //New stuffs var inputComponent = AudioComponent.FindNextComponent( null, new AudioComponentDescription { ComponentFlags = 0, ComponentFlagsMask = 0, ComponentManufacturer = AudioComponentManufacturerType.Apple, ComponentSubType = (int)AudioTypeOutput.Remote, ComponentType = AudioComponentType.Output }); recorder = inputComponent.CreateAudioUnit(); recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus); recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus); var audioFormat = new AudioStreamBasicDescription { SampleRate = Globals.SAMPLERATE, Format = AudioFormatType.LinearPCM, FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked, FramesPerPacket = 1, ChannelsPerFrame = 1, BitsPerChannel = 16, BytesPerPacket = 2, BytesPerFrame = 2 }; recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus); recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus); recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus); // TODO: Disable buffers (requires interop) aBuffer = new AudioBuffer { NumberChannels = 1, DataByteSize = 512 * 2, Data = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2) }; isTalking = true; //recorder.InputCompleted += SendAudio; //recorder.Start (); recorder.Initialize (); recorder.Start (); }
void SetupRemoteIO() { AudioComponentDescription desc = new AudioComponentDescription (); desc.ComponentType = AudioComponentType.Output; desc.ComponentSubType = 0x72696f63; desc.ComponentManufacturer = AudioComponentManufacturerType.Apple; desc.ComponentFlags = 0; desc.ComponentFlagsMask = 0; var component = AudioComponent.FindNextComponent (null, desc); rioUnit = new AudioUnit (component); rioUnit.SetEnableIO (true, AudioUnitScopeType.Input, 1); rioUnit.SetRenderCallback (renderDelegate, AudioUnitScopeType.Input, 0); audioFormat = new AudioStreamBasicDescription(); audioFormat.Format = AudioFormatType.LinearPCM; audioFormat.SampleRate = sampleRate; audioFormat.ChannelsPerFrame = 2; audioFormat.FramesPerPacket = 1; audioFormat.BitsPerChannel = 8 * sizeof(int); audioFormat.BytesPerPacket = sizeof(int); audioFormat.BytesPerFrame = sizeof(int); audioFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsAudioUnitCanonical; rioUnit.SetAudioFormat (audioFormat, AudioUnitScopeType.Input, 0); rioUnit.SetAudioFormat (audioFormat, AudioUnitScopeType.Output, 1); rioUnit.Initialize (); unitCreated = true; FFTBufferManager = new FFTBufferManager (maxFPS, this); FFTBufferManager.Setup (); rioUnit.Start (); unitIsRunning = true; }
public void InitializeAUGraph () { Debug.Print ("Initialize"); LoadFiles (); graph = new AUGraph (); // create two AudioComponentDescriptions for the AUs we want in the graph // output unit var outputNode = graph.AddNode (AudioComponentDescription.CreateOutput (AudioTypeOutput.Remote)); // mixer node var mixerNode = graph.AddNode (AudioComponentDescription.CreateMixer (AudioTypeMixer.MultiChannel)); // connect a node's output to a node's input if (graph.ConnnectNodeInput (mixerNode, 0, outputNode, 0) != AUGraphError.OK) throw new ApplicationException (); // open the graph AudioUnits are open but not initialized (no resource allocation occurs here) if (graph.TryOpen () != 0) throw new ApplicationException (); mixer = graph.GetNodeInfo (mixerNode); // set bus count const uint numbuses = 2; Debug.Print ("Set input bus count {0}", numbuses); if (mixer.SetElementCount (AudioUnitScopeType.Input, numbuses) != AudioUnitStatus.OK) throw new ApplicationException (); AudioStreamBasicDescription desc; for (uint i = 0; i < numbuses; ++i) { // setup render callback if (graph.SetNodeInputCallback (mixerNode, i, HandleRenderDelegate) != AUGraphError.OK) throw new ApplicationException (); // set input stream format to what we want desc = mixer.GetAudioFormat (AudioUnitScopeType.Input, i); //desc.ChangeNumberChannels(2, false); desc.SampleRate = GraphSampleRate; mixer.SetAudioFormat (desc, AudioUnitScopeType.Input, i); } // set output stream format to what we want desc = mixer.GetAudioFormat (AudioUnitScopeType.Output); //desc.ChangeNumberChannels(2, false); desc.SampleRate = GraphSampleRate; mixer.SetAudioFormat (desc, AudioUnitScopeType.Output); // now that we've set everything up we can initialize the graph, this will also validate the connections if (graph.Initialize () != AUGraphError.OK) throw new ApplicationException (); }
AudioUnitStatus AudioInputCallBack(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioUnit audioUnit) { var buffer = new AudioBuffer() { NumberChannels = 1, DataByteSize = (int)numberFrames * 2, Data = System.Runtime.InteropServices.Marshal.AllocHGlobal((int)numberFrames * 2) }; var bufferList = new AudioBuffers(1); bufferList[0] = buffer; var status = audioUnit.Render(ref actionFlags, timeStamp, busNumber, numberFrames, bufferList); var send = new byte[buffer.DataByteSize]; System.Runtime.InteropServices.Marshal.Copy(buffer.Data, send, 0, send.Length); var handler = DataAvailable; if (handler != null) handler(this, send); Console.Write("\n Buffer: "); foreach (byte b in send) Console.Write("\\x" + b); Console.Write("\n"); System.Runtime.InteropServices.Marshal.FreeHGlobal(buffer.Data); return AudioUnitStatus.OK; }
void prepareAudioUnit() { // Updated for deprecated AudioSession var session = AVAudioSession.SharedInstance(); NSError error; if (session == null) { var alert = new UIAlertView("Session error", "Unable to create audio session", null, "Cancel"); alert.Show(); alert.Clicked += delegate { alert.DismissWithClickedButtonIndex(0, true); return; }; } session.SetActive(true); session.SetCategory(AVAudioSessionCategory.PlayAndRecord); session.SetPreferredIOBufferDuration(0.005, out error); // Getting AudioComponent Remote output _audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote); // creating an audio unit instance _audioUnit = new AudioUnit(_audioComponent); // turning on microphone _audioUnit.SetEnableIO(true, AudioUnitScopeType.Input, 1 // Remote Input ); // setting audio format _audioUnit.SetAudioFormat(_dstFormat, AudioUnitScopeType.Input, 0 // Remote Output ); var format = AudioStreamBasicDescription.CreateLinearPCM(_sampleRate, bitsPerChannel: 32); format.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat; _audioUnit.SetAudioFormat(format, AudioUnitScopeType.Output, 1); // setting callback method _audioUnit.SetRenderCallback(_audioUnit_RenderCallback, AudioUnitScopeType.Global); _audioUnit.Initialize(); _audioUnit.Start(); }
static extern int AUGraphSetNodeInputCallback(IntPtr inUnit, Int32 inDestNode, UInt32 inDestInputNumber, AudioUnit.AURenderCallbackStrct inInputCallback);