示例#1
0
        public IOSAudioProcessor()
        {
            var inputComponent = AudioComponent.FindNextComponent(
                null,
                new AudioComponentDescription
                {
                    ComponentFlags = 0,
                    ComponentFlagsMask = 0,
                    ComponentManufacturer = AudioComponentManufacturerType.Apple,
                    ComponentSubType = (int)AudioTypeOutput.Remote,
                    ComponentType = AudioComponentType.Output
                });

            recorder = inputComponent.CreateAudioUnit();
            recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus);
            recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus);

            var audioFormat = new AudioStreamBasicDescription
                {
                    SampleRate = StudentDemo.Globals.SAMPLERATE,
                    Format = AudioFormatType.LinearPCM,
                    FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked,
                    FramesPerPacket = 1,
                    ChannelsPerFrame = 1,
                    BitsPerChannel = 16,
                    BytesPerPacket = 2,
                    BytesPerFrame = 2
                };

            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus);
            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus);

            recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus);

            // TODO: Disable buffers (requires interop)
            aBuffer = new AudioBuffer
                {
                    NumberChannels = 1,
                    DataByteSize = 512 * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2)
                };
        }
		unsafe static void RenderAudio (CFUrl sourceUrl, CFUrl destinationUrl)
		{
			AudioStreamBasicDescription dataFormat;
			AudioQueueBuffer *buffer = null;
			long currentPacket = 0;
			int packetsToRead = 0;
			AudioStreamPacketDescription [] packetDescs = null;
			bool flushed = false;
			bool done = false;
			int bufferSize;
			
			using (var audioFile = AudioFile.Open (sourceUrl, AudioFilePermission.Read, (AudioFileType) 0)) {
				dataFormat = audioFile.StreamBasicDescription;
				
				using (var queue = new OutputAudioQueue (dataFormat, CFRunLoop.Current, CFRunLoop.CFRunLoopCommonModes)) {
					queue.OutputCompleted += (sender, e) => 
					{
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
					};
					
					// we need to calculate how many packets we read at a time and how big a buffer we need
					// we base this on the size of the packets in the file and an approximate duration for each buffer
					bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;
					
					// first check to see what the max size of a packet is - if it is bigger
					// than our allocation default size, that needs to become larger
					// adjust buffer size to represent about a second of audio based on this format 
					CalculateBytesForTime (dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);
				
					if (isVBR) {
						packetDescs = new AudioStreamPacketDescription [packetsToRead];
					} else {
						packetDescs = null; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
					}
				
					if (audioFile.MagicCookie.Length != 0)
						queue.MagicCookie = audioFile.MagicCookie;
		
					// allocate the input read buffer
					queue.AllocateBuffer (bufferSize, out buffer);
					
					// prepare the capture format
					var captureFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame, 32);
					captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

					queue.SetOfflineRenderFormat (captureFormat, audioFile.ChannelLayout);
					
					// prepare the target format
					var dstFormat = AudioStreamBasicDescription.CreateLinearPCM (dataFormat.SampleRate, (uint) dataFormat.ChannelsPerFrame);

					using (var captureFile = ExtAudioFile.CreateWithUrl (destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
						captureFile.ClientDataFormat = captureFormat;
						
						int captureBufferSize = bufferSize / 2;
						AudioBuffers captureABL = new AudioBuffers (1);
						
						AudioQueueBuffer *captureBuffer;
						queue.AllocateBuffer (captureBufferSize, out captureBuffer);
						
						captureABL[0] = new AudioBuffer () {
							Data = captureBuffer->AudioData,
							NumberChannels = captureFormat.ChannelsPerFrame
						};

						queue.Start ();

						double ts = 0;
						queue.RenderOffline (ts, captureBuffer, 0);
						
						HandleOutput (audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
						
						while (true) {
							int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;
							
							queue.RenderOffline (ts, captureBuffer, reqFrames);

							captureABL.SetData (0, captureBuffer->AudioData, (int) captureBuffer->AudioDataByteSize);
							var writeFrames = captureABL[0].DataByteSize / captureFormat.BytesPerFrame;
							
							// Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes", 
							//	ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);
							
							captureFile.WriteAsync ((uint) writeFrames, captureABL);
							
							if (flushed)
								break;
							
							ts += writeFrames;
						}
					
						CFRunLoop.Current.RunInMode (CFRunLoop.CFDefaultRunLoopMode, 1, false);
					}
				}
			}
		}
		bool DoConvertFile (CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
		{
			AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription ();

			// in this sample we should never be on the main thread here
			Debug.Assert (!NSThread.IsMain);

			// transition thread state to State::Running before continuing
			AppDelegate.ThreadStateSetRunning ();
			
			Debug.WriteLine ("DoConvertFile");

			// get the source file
			var sourceFile = AudioFile.Open (sourceURL, AudioFilePermission.Read);
			
			// get the source data format
			var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

			// setup the output file format
			dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate); // set sample rate
			if (outputFormat == AudioFormatType.LinearPCM) {
				// if the output format is PC create a 16-bit int PCM file format description as an example
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
				dstFormat.BitsPerChannel = 16;
				dstFormat.BytesPerPacket = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
				dstFormat.FramesPerPacket = 1;
				dstFormat.FormatFlags = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
			} else {
				// compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame); // for iLBC num channels must be 1
				
				// use AudioFormat API to fill out the rest of the description
				var fie = AudioStreamBasicDescription.GetFormatInfo (ref dstFormat);
				if (fie != AudioFormatError.None) {
					Debug.Print ("Cannot create destination format {0:x}", fie);

					AppDelegate.ThreadStateSetDone ();
					return false;
				}
			}

			// create the AudioConverter
			AudioConverterError ce;
			var converter = AudioConverter.Create (srcFormat, dstFormat, out ce);
			Debug.Assert (ce == AudioConverterError.None);

			converter.InputData += EncoderDataProc;

			// if the source has a cookie, get it and set it on the Audio Converter
			ReadCookie (sourceFile, converter);

			// get the actual formats back from the Audio Converter
			srcFormat = converter.CurrentInputStreamDescription;
			dstFormat = converter.CurrentOutputStreamDescription;

			// if encoding to AAC set the bitrate to 192k which is a nice value for this demo
			// kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
			if (dstFormat.Format == AudioFormatType.MPEG4AAC) {
				uint outputBitRate = 192000; // 192k

				// ignore errors as setting may be invalid depending on format specifics such as samplerate
				try {
					converter.EncodeBitRate = outputBitRate;
				} catch {
				}

				// get it back and print it out
				outputBitRate = converter.EncodeBitRate;
				Debug.Print ("AAC Encode Bitrate: {0}", outputBitRate);
			}

			// can the Audio Converter resume conversion after an interruption?
			// this property may be queried at any time after construction of the Audio Converter after setting its output format
			// there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
			// construction time since it means less code to execute during or after interruption time
			bool canResumeFromInterruption;
			try {
				canResumeFromInterruption = converter.CanResumeFromInterruption;
				Debug.Print ("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
			} catch (Exception e) {
				// if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
				// then the codec being used is not a hardware codec so we're not concerned about codec state
				// we are always going to be able to resume conversion after an interruption

				canResumeFromInterruption = false;
				Debug.Print ("CanResumeFromInterruption: {0}", e.Message);
			}
			
			// create the destination file 
			var destinationFile = AudioFile.Create (destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

			// set up source buffers and data proc info struct
			afio = new AudioFileIO (32768);
			afio.SourceFile = sourceFile;
			afio.SrcFormat = srcFormat;

			if (srcFormat.BytesPerPacket == 0) {
				// if the source format is VBR, we need to get the maximum packet size
				// use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
				// in the file (without actually scanning the whole file to find the largest packet,
				// as may happen with kAudioFilePropertyMaximumPacketSize)
				afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

				// how many packets can we read for our buffer size?
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
				
				// allocate memory for the PacketDescription structures describing the layout of each packet
				afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
			} else {
				// CBR source format
				afio.SrcSizePerPacket = srcFormat.BytesPerPacket;
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
			}

			// set up output buffers
			int outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
			const int theOutputBufSize = 32768;
			var outputBuffer = Marshal.AllocHGlobal (theOutputBufSize);
			AudioStreamPacketDescription[] outputPacketDescriptions = null;

			if (outputSizePerPacket == 0) {
				// if the destination format is VBR, we need to get max size per packet from the converter
				outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

				// allocate memory for the PacketDescription structures describing the layout of each packet
				outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
			}
			int numOutputPackets = theOutputBufSize / outputSizePerPacket;
			
			// if the destination format has a cookie, get it and set it on the output file
			WriteCookie (converter, destinationFile);
			
			// write destination channel layout
			if (srcFormat.ChannelsPerFrame > 2) {
				WriteDestinationChannelLayout (converter, sourceFile, destinationFile);
			}

			long totalOutputFrames = 0; // used for debugging
			long outputFilePos = 0;
			AudioBuffers fillBufList = new AudioBuffers (1);
			bool error = false;

			// loop to convert data
			Debug.WriteLine ("Converting...");
			while (true) {
				// set up output buffer list
				fillBufList [0] = new AudioBuffer () {
					NumberChannels = dstFormat.ChannelsPerFrame,
					DataByteSize = theOutputBufSize,
					Data = outputBuffer
				};

				// this will block if we're interrupted
				var wasInterrupted = AppDelegate.ThreadStatePausedCheck();
				
				if (wasInterrupted && !canResumeFromInterruption) {
					// this is our interruption termination condition
					// an interruption has occured but the Audio Converter cannot continue
					Debug.WriteLine ("Cannot resume from interruption");
					error = true;
					break;
				}

				// convert data
				int ioOutputDataPackets = numOutputPackets;
				var fe = converter.FillComplexBuffer (ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
				// if interrupted in the process of the conversion call, we must handle the error appropriately
				if (fe != AudioConverterError.None) {
					Debug.Print ("FillComplexBuffer: {0}", fe);
					error = true;
					break;
				}

				if (ioOutputDataPackets == 0) {
					// this is the EOF conditon
					break;
				}

				// write to output file
				var inNumBytes = fillBufList [0].DataByteSize;

				var we = destinationFile.WritePackets (false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
				if (we != 0) {
					Debug.Print ("WritePackets: {0}", we);
					error = true;
					break;
				}

				// advance output file packet position
				outputFilePos += ioOutputDataPackets;
					
				if (dstFormat.FramesPerPacket != 0) { 
					// the format has constant frames per packet
					totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
				} else {
					// variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
					for (var i = 0; i < ioOutputDataPackets; ++i)
						totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
				}

			}

			Marshal.FreeHGlobal (outputBuffer);

			if (!error) {
				// write out any of the leading and trailing frames for compressed formats only
				if (dstFormat.BitsPerChannel == 0) {
					// our output frame count should jive with
					Debug.Print ("Total number of output frames counted: {0}", totalOutputFrames); 
					WritePacketTableInfo (converter, destinationFile);
				}
					
				// write the cookie again - sometimes codecs will update cookies at the end of a conversion
				WriteCookie (converter, destinationFile);
			}

			converter.Dispose ();
			destinationFile.Dispose ();
			sourceFile.Dispose ();

			// transition thread state to State.Done before continuing
			AppDelegate.ThreadStateSetDone ();

			return !error;
		}
		// load up audio data from the demo files into mSoundBuffer.data used in the render proc
		void LoadFiles ()
		{
			const int FilesCount = 2;

			for (int i = 0; i < FilesCount; i++) {
				Debug.Print ("Loading file #{0}", i);

				using (var file = ExtAudioFile.OpenUrl (sourceURL [i])) {

					var clientFormat = file.FileDataFormat;
					clientFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsAudioUnitCanonical;
					clientFormat.ChannelsPerFrame = 1;
					clientFormat.FramesPerPacket = 1;
					clientFormat.BitsPerChannel = 8 * sizeof (int);
					clientFormat.BytesPerPacket =
						clientFormat.BytesPerFrame = clientFormat.ChannelsPerFrame * sizeof (int);

					file.ClientDataFormat = clientFormat;

					// set the client format to be what we want back
					double rateRatio = GraphSampleRate / clientFormat.SampleRate;

					var numFrames = file.FileLengthFrames;
					numFrames = (uint)(numFrames * rateRatio); // account for any sample rate conversion
					Debug.Print ("Number of Sample Frames after rate conversion (if any): {0}", numFrames);

					// set up our buffer
					soundBuffer[i].TotalFrames = numFrames;

					UInt32 samples = (uint) (numFrames * clientFormat.ChannelsPerFrame);
					var data_size = (int)(sizeof(uint) * samples);
					soundBuffer[i].Data = Marshal.AllocHGlobal (data_size);

					// set up a AudioBufferList to read data into
					var bufList = new AudioBuffers (1);
					bufList [0] = new AudioBuffer {
						NumberChannels = 1,
						Data = soundBuffer [i].Data,
						DataByteSize = data_size
					};

					ExtAudioFileError error;
					file.Read ((uint) numFrames, bufList, out error);
					if (error != ExtAudioFileError.OK)
						throw new ApplicationException ();
				}
			}
		}
示例#5
0
        private void startTalking(UdpClient audioCaller)
        {
            //Stop old recording session

            //Generate new WaveFormat
            //    recorder.WaveFormat = new WaveFormat(16000, 16, 1);
            //    recorder.BufferMilliseconds = 50;
            //    recorder.DataAvailable += SendAudio; //Add event to SendAudio

            //			recorder = new InputAudioQueue (playerFormat);
            //
            //
            //			for (int i = 0; i < BUFFERCOUNT; i++) {
            //				IntPtr aBUff;
            //				//recorder.AllocateBuffer (AUDIOBUFFERSIZE, out aBUff);
            //				byteSize = AUDIOBUFFERSIZE * playerFormat.BytesPerPacket;
            //				recorder.AllocateBufferWithPacketDescriptors (byteSize, AUDIOBUFFERSIZE, out aBUff);
            //				recorder.EnqueueBuffer (aBUff, byteSize, null);
            //				Console.WriteLine ("Buffer allocated, enqueueing");
            //			}

            //New stuffs

            var inputComponent = AudioComponent.FindNextComponent(
                null,
                new AudioComponentDescription
                {
                    ComponentFlags = 0,
                    ComponentFlagsMask = 0,
                    ComponentManufacturer = AudioComponentManufacturerType.Apple,
                    ComponentSubType = (int)AudioTypeOutput.Remote,
                    ComponentType = AudioComponentType.Output
                });

            recorder = inputComponent.CreateAudioUnit();
            recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus);
            recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus);

            var audioFormat = new AudioStreamBasicDescription
                {
                    SampleRate = Globals.SAMPLERATE,
                    Format = AudioFormatType.LinearPCM,
                    FormatFlags = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked,
                    FramesPerPacket = 1,
                    ChannelsPerFrame = 1,
                    BitsPerChannel = 16,
                    BytesPerPacket = 2,
                    BytesPerFrame = 2
                };

            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus);
            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus);

            recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus);

            // TODO: Disable buffers (requires interop)
            aBuffer = new AudioBuffer
                {
                    NumberChannels = 1,
                    DataByteSize = 512 * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2)
                };
            isTalking = true;
            //recorder.InputCompleted += SendAudio;
            //recorder.Start ();

            recorder.Initialize ();
            recorder.Start ();
        }
示例#6
0
        AudioUnitStatus AudioInputCallBack(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioUnit audioUnit)
        {
            MemoryStream ms = new MemoryStream();

            String s = "a000";
            byte[] bufWriter = Encoding.ASCII.GetBytes(s.ToCharArray(), 0, 4);
            ms.Write(bufWriter, 0, 4);

            bufWriter = BitConverter.GetBytes(AudioSessionId);
            if (BitConverter.IsLittleEndian) Array.Reverse(bufWriter);
            ms.Write(bufWriter, 0, 4);

            long time = (long) (DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalMilliseconds;

            //Console.WriteLine ((time - lasttime) + " ms delay");
            lasttime = time;
            bufWriter = BitConverter.GetBytes(time);
            if (BitConverter.IsLittleEndian) Array.Reverse(bufWriter);
            ms.Write(bufWriter, 0, 8);

            var buffer = new AudioBuffer()
                {
                    NumberChannels = 1,
                    DataByteSize = (int)numberFrames * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal((int)numberFrames * 2)
                };

            var bufferList = new AudioBuffers(1);
            bufferList[0] = buffer;

            var status = audioUnit.Render(ref actionFlags, timeStamp, busNumber, numberFrames, bufferList);

            var send = new byte[buffer.DataByteSize];
            System.Runtime.InteropServices.Marshal.Copy(buffer.Data, send, 0, send.Length);

            ms.Write (send, 0, send.Length);

            Console.Write("\n Buffer: ");
            foreach (byte b in send)
                Console.Write("\\x" + b);
            Console.Write("\n");

            System.Runtime.InteropServices.Marshal.FreeHGlobal(buffer.Data);

            byte[] sendbuf = ms.ToArray();
            if (sendbuf.Length > 4096) throw new Exception("Packet size too large!");
            Task tk = Task.Factory.StartNew(() =>
                {
                    try
                    {
                        var aSender = audioCaller.BeginSend(sendbuf, sendbuf.Length, null, null);
                        aSender.AsyncWaitHandle.WaitOne(TimeSpan.FromSeconds(3));
                        if (aSender.IsCompleted) audioCaller.EndSend(aSender);
                    }
                    catch
                    {

                    }
                });

            return AudioUnitStatus.OK;
        }
示例#7
0
        AudioUnitStatus AudioInputCallBack(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioUnit audioUnit)
        {
            var buffer = new AudioBuffer()
                {
                    NumberChannels = 1,
                    DataByteSize = (int)numberFrames * 2,
                    Data = System.Runtime.InteropServices.Marshal.AllocHGlobal((int)numberFrames * 2)
                };

            var bufferList = new AudioBuffers(1);
            bufferList[0] = buffer;

            var status = audioUnit.Render(ref actionFlags, timeStamp, busNumber, numberFrames, bufferList);

            var send = new byte[buffer.DataByteSize];
            System.Runtime.InteropServices.Marshal.Copy(buffer.Data, send, 0, send.Length);

            var handler = DataAvailable;
            if (handler != null)
                handler(this, send);

            Console.Write("\n Buffer: ");
            foreach (byte b in send)
                Console.Write("\\x" + b);
            Console.Write("\n");

            System.Runtime.InteropServices.Marshal.FreeHGlobal(buffer.Data);

            return AudioUnitStatus.OK;
        }