示例#1
0
        public void CreateLinearPCM()
        {
            var pcm = AudioStreamBasicDescription.CreateLinearPCM();

            Assert.IsNotNull(pcm.FormatName);
            Assert.IsFalse(pcm.IsVariableBitrate);
        }
示例#2
0
        public SongPlayer()
        {
            playingNotes = new List <PlayingNote>();
#if __IOS__
            streamDesc = AudioStreamBasicDescription.CreateLinearPCM(PLAYBACK_RATE, 1, 16, false);
#endif
        }
示例#3
0
        public void ProcessingTap()
        {
            if (!TestRuntime.CheckSystemAndSDKVersion(6, 0))
            {
                Assert.Inconclusive("AudioQueueProcessingTapNew requires iOS 6");
            }

            var aq = new InputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());
            AudioQueueStatus ret;
            bool             called = false;

            using (var tap = aq.CreateProcessingTap(
                       delegate(AudioQueueProcessingTap audioQueueTap, uint numberOfFrames, ref AudioTimeStamp timeStamp, ref AudioQueueProcessingTapFlags flags, AudioBuffers data) {
                called = true;
                return(33);
            }, AudioQueueProcessingTapFlags.PreEffects, out ret)) {
                Assert.AreEqual(AudioQueueStatus.Ok, ret, "#1");

                unsafe {
                    AudioQueueBuffer *buffer;
                    Assert.AreEqual(AudioQueueStatus.Ok, aq.AllocateBuffer(5000, out buffer), "#2");
                    Assert.AreEqual(AudioQueueStatus.Ok, aq.EnqueueBuffer(buffer), "#3");
                    //Assert.AreEqual (AudioQueueStatus.Ok, aq.Start (), "#4");
                }
            }

            //Assert.That (called, Is.True, "#10");
        }
        /// <summary>
        /// Plays a single note. Separate from the rest of the song playing code
        /// </summary>
        public static void PlayNote(Instrument.Note note)
        {
            lock (syncObj)
            {
#if __ANDROID__
                if (playingTrack != null)
                {
                    //We use pause instead of stop because pause stops playing immediately
                    playingTrack.Pause();
                    playingTrack.Release();
                    playingTrack.Dispose();
                }
#endif
#if __IOS__
                if (audioQueue != null)
                {
                    //Pass true to stop immediately
                    audioQueue.Stop(true);
                    audioQueue.Dispose();
                }
#endif

#if __ANDROID__
                playingTrack = new AudioTrack(
                    // Stream type
                    Android.Media.Stream.Music,
                    // Frequency
                    SongPlayer.PLAYBACK_RATE,
                    // Mono or stereo
                    ChannelOut.Mono,
                    // Audio encoding
                    Android.Media.Encoding.Pcm16bit,
                    // Length of the audio clip in bytes
                    (note.data.Length * 2),
                    // Mode. Stream or static.
                    AudioTrackMode.Static);

                playingTrack.Write(note.data, 0, note.data.Length);
                playingTrack.Play();
#endif
#if __IOS__
                audioQueue = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM(SongPlayer.PLAYBACK_RATE, 1, 16, false));
                unsafe
                {
                    AudioQueueBuffer *buffer;
                    audioQueue.AllocateBuffer(note.data.Length * 2, out buffer);

                    fixed(short *beatData = note.data)
                    {
                        buffer->CopyToAudioData((IntPtr)beatData, note.data.Length * 2);
                    }

                    audioQueue.EnqueueBuffer((IntPtr)buffer, note.data.Length * 2, null);
                }

                audioQueue.Start();
#endif
            }
        }
示例#5
0
        public void Properties()
        {
            var b = new InputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            b.HardwareCodecPolicy = AudioQueueHardwareCodecPolicy.PreferHardware;

            Assert.That(b.HardwareCodecPolicy, Is.EqualTo(AudioQueueHardwareCodecPolicy.PreferHardware), "#1");
        }
示例#6
0
        public void ChannelAssignments()
        {
            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(
                                new AudioQueueChannelAssignment("11", 0),
                                new AudioQueueChannelAssignment("22", 1)
                                ));
        }
示例#7
0
        public void Properties()
        {
            TestRuntime.RequestMicrophonePermission();

            var b = new InputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            b.HardwareCodecPolicy = AudioQueueHardwareCodecPolicy.PreferHardware;

            Assert.That(b.HardwareCodecPolicy, Is.EqualTo(AudioQueueHardwareCodecPolicy.PreferHardware), "#1");
        }
示例#8
0
        /// <summary>
        /// Queues up a file to record to the next time the beat is played
        /// </summary>
        /// <param name="fileName">File name.</param>
        public void QueueFileRecording(string fileName)
        {
            _file = ExtAudioFile.CreateWithUrl(
                new Foundation.NSUrl(fileName, false),
                AudioFileType.WAVE,
                AudioStreamBasicDescription.CreateLinearPCM(),
                AudioFileFlags.EraseFlags,
                out ExtAudioFileError e
                );

            _fileRecordingQueued = true;
        }
示例#9
0
        public void ChannelAssignments()
        {
            if (!TestRuntime.CheckSystemAndSDKVersion(6, 0))
            {
                Assert.Inconclusive("Requires iOS 6");
            }

            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(
                                new AudioQueueChannelAssignment("11", 0),
                                new AudioQueueChannelAssignment("22", 1)
                                ));
        }
示例#10
0
        void prepareAudioUnit()
        {
            // Updated for deprecated AudioSession
            var     session = AVAudioSession.SharedInstance();
            NSError error;

            if (session == null)
            {
                var alert = new UIAlertView("Session error", "Unable to create audio session", null, "Cancel");
                alert.Show();
                alert.Clicked += delegate
                {
                    alert.DismissWithClickedButtonIndex(0, true);
                    return;
                };
            }
            session.SetActive(true);
            session.SetCategory(AVAudioSessionCategory.PlayAndRecord);
            session.SetPreferredIOBufferDuration(0.005, out error);

            // Getting AudioComponent Remote output
            _audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote);

            // creating an audio unit instance
            _audioUnit = new AudioUnit(_audioComponent);

            // turning on microphone
            _audioUnit.SetEnableIO(true,
                                   AudioUnitScopeType.Input,
                                   1 // Remote Input
                                   );

            // setting audio format
            _audioUnit.SetAudioFormat(_dstFormat,
                                      AudioUnitScopeType.Input,
                                      0 // Remote Output
                                      );

            var format = AudioStreamBasicDescription.CreateLinearPCM(_sampleRate, bitsPerChannel: 32);

            format.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat;
            _audioUnit.SetAudioFormat(format, AudioUnitScopeType.Output, 1);

            // setting callback method
            _audioUnit.SetRenderCallback(_audioUnit_RenderCallback, AudioUnitScopeType.Global);

            _audioUnit.Initialize();
            _audioUnit.Start();
        }
示例#11
0
        private void UpdateSampleRates()
        {
            if (_audioUnit != null && !_audioUnit.IsPlaying)
            {
                _audioFormat             = AudioStreamBasicDescription.CreateLinearPCM(_sampleRate, bitsPerChannel: 32);
                _audioFormat.SampleRate  = _sampleRate;
                _audioFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat;

                // setting audio format
                _audioUnit.SetAudioFormat(_audioFormat,
                                          AudioUnitScopeType.Input,
                                          0 // Remote Output
                                          );

                _audioUnit.SetAudioFormat(_audioFormat, AudioUnitScopeType.Output, 1);
            }
        }
示例#12
0
        public void StartRecord()
        {
            var audioFormat = AudioStreamBasicDescription.CreateLinearPCM();

            inputQueue = new InputAudioQueue(audioFormat);

            inputQueue.InputCompleted += InputQueueInputCompleted;
            var bufferByteSize = 2048 * audioFormat.BytesPerPacket;

            IntPtr bufferPtr;

            for (var index = 0; index < 3; index++)
            {
                inputQueue.AllocateBufferWithPacketDescriptors(bufferByteSize, 2048, out bufferPtr);
                inputQueue.EnqueueBuffer(bufferPtr, bufferByteSize, null);
            }

            inputQueue.Start();
        }
示例#13
0
        public unsafe Task PlayOnce(System.IO.Stream stream)
        {
            return(Task.Run(() =>
            {
                try
                {
                    int sampleRate = 16000;
                    uint channels = 1;
                    uint bitsPerSample = 16;

                    if (_queue != null)
                    {
                        _queue.BufferCompleted -= Queue_BufferCompleted;
                        _queue.Stop(true);
                    }

                    var format = AudioStreamBasicDescription.CreateLinearPCM(sampleRate, channels, bitsPerSample);
                    _queue = new OutputAudioQueue(format);
                    _queue.BufferCompleted += Queue_BufferCompleted;
                    _queue.Volume = 1;

                    var buffer1 = new byte[stream.Length];
                    stream.Read(buffer1, 0, buffer1.Length);
                    _queue.AllocateBuffer(buffer1.Length, out AudioQueueBuffer * buffer);

                    GCHandle pinned = GCHandle.Alloc(buffer1, GCHandleType.Pinned);
                    IntPtr address = pinned.AddrOfPinnedObject();
                    buffer->CopyToAudioData(address, buffer1.Length);
                    buffer->AudioDataByteSize = (uint)buffer1.Length;

                    _queue.EnqueueBuffer(buffer, null);
                    _queue.Start();
                }
                catch (Exception ex)
                {
                }
            }));
        }
示例#14
0
        void InitAudioQueue()
        {
            // create our audio queue & configure buffers
            var audioFormat = AudioStreamBasicDescription.CreateLinearPCM(SampleRate, (uint)ChannelCount, (uint)BitsPerSample);

            audioQueue = new InputAudioQueue(audioFormat);
            audioQueue.InputCompleted += QueueInputCompleted;

            // calculate our buffer size and make sure it's not too big
            var bufferByteSize = (int)(TargetMeasurementTime / 1000F /*ms to sec*/ * SampleRate * audioFormat.BytesPerPacket);

            bufferByteSize = bufferByteSize < MaxBufferSize ? bufferByteSize : MaxBufferSize;

            for (var index = 0; index < CountAudioBuffers; index++)
            {
                var bufferPtr = IntPtr.Zero;

                BufferOperation(() => audioQueue.AllocateBuffer(bufferByteSize, out bufferPtr), () =>
                {
                    BufferOperation(() => audioQueue.EnqueueBuffer(bufferPtr, bufferByteSize, null), () => Debug.WriteLine("AudioQueue buffer enqueued :: {0} of {1}", index + 1, CountAudioBuffers));
                });
            }
        }
示例#15
0
        public void ChannelAssignments()
        {
            var aq = new OutputAudioQueue(AudioStreamBasicDescription.CreateLinearPCM());

            var route   = global::AVFoundation.AVAudioSession.SharedInstance().CurrentRoute;
            var outputs = route.Outputs;

            if (outputs.Length > 0)
            {
                var port        = outputs [0];
                var assignments = new List <AudioQueueChannelAssignment> ();
                var id          = port.UID;
                for (int i = 0; i < aq.AudioStreamDescription.ChannelsPerFrame; i++)
                {
                    assignments.Add(new AudioQueueChannelAssignment(id, (uint)i));
                }
                Assert.AreEqual(AudioQueueStatus.Ok, aq.SetChannelAssignments(assignments.ToArray()));
            }
            else
            {
                Assert.Ignore("No outputs in the current route ({0})", route.Description);
            }
        }
        void prepareAudioUnit()
        {
            // AudioSession
            AudioSession.Initialize();
            AudioSession.SetActive(true);
            AudioSession.Category = AudioSessionCategory.PlayAndRecord;
            AudioSession.PreferredHardwareIOBufferDuration = 0.005f;

            // Getting AudioComponent Remote output
            _audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote);

            // creating an audio unit instance
            _audioUnit = new AudioUnit(_audioComponent);

            // turning on microphone
            _audioUnit.SetEnableIO(true,
                                   AudioUnitScopeType.Input,
                                   1 // Remote Input
                                   );

            // setting audio format
            _audioUnit.SetAudioFormat(_dstFormat,
                                      AudioUnitScopeType.Input,
                                      0 // Remote Output
                                      );

            var format = AudioStreamBasicDescription.CreateLinearPCM(_sampleRate, bitsPerChannel: 32);

            format.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsAudioUnitCanonical;
            _audioUnit.SetAudioFormat(format, AudioUnitScopeType.Output, 1);

            // setting callback method
            _audioUnit.SetRenderCallback(_audioUnit_RenderCallback, AudioUnitScopeType.Global);

            _audioUnit.Initialize();
            _audioUnit.Start();
        }
示例#17
0
        void prepareExtAudioFile()
        {
            // Opening Audio File
            _extAudioFile = ExtAudioFile.OpenUrl(_url);

            // Getting file data format
            _srcFormat = _extAudioFile.FileDataFormat;

            // Setting the channel number of the output format same to the input format
            _dstFormat              = AudioStreamBasicDescription.CreateLinearPCM(channelsPerFrame: (uint)_srcFormat.ChannelsPerFrame, bitsPerChannel: 32);
            _dstFormat.FormatFlags |= AudioFormatFlags.IsNonInterleaved;

            // setting reading format as audio unit cannonical format
            _extAudioFile.ClientDataFormat = _dstFormat;

            // getting total frame
            _totalFrames = _extAudioFile.FileLengthFrames;

            // Allocating AudioBufferList
            _buffer = new AudioBuffers(_srcFormat.ChannelsPerFrame);
            for (int i = 0; i < _buffer.Count; ++i)
            {
                int size = (int)(sizeof(uint) * _totalFrames);
                _buffer.SetData(i, Marshal.AllocHGlobal(size), size);
            }
            _numberOfChannels = _srcFormat.ChannelsPerFrame;

            // Reading all frame into the buffer
            ExtAudioFileError status;

            _extAudioFile.Read((uint)_totalFrames, _buffer, out status);
            if (status != ExtAudioFileError.OK)
            {
                throw new ApplicationException();
            }
        }
示例#18
0
        unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl)
        {
            AudioStreamBasicDescription dataFormat;
            AudioQueueBuffer *          buffer = null;
            long currentPacket = 0;
            int  packetsToRead = 0;

            AudioStreamPacketDescription[] packetDescs = null;
            bool flushed = false;
            bool done    = false;
            int  bufferSize;

            using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) {
                dataFormat = audioFile.StreamBasicDescription;

                using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) {
                    queue.BufferCompleted += (sender, e) => {
                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
                    };

                    // we need to calculate how many packets we read at a time and how big a buffer we need
                    // we base this on the size of the packets in the file and an approximate duration for each buffer
                    bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;

                    // first check to see what the max size of a packet is - if it is bigger
                    // than our allocation default size, that needs to become larger
                    // adjust buffer size to represent about a second of audio based on this format
                    CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);

                    if (isVBR)
                    {
                        packetDescs = new AudioStreamPacketDescription [packetsToRead];
                    }
                    else
                    {
                        packetDescs = null;                         // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                    }

                    if (audioFile.MagicCookie.Length != 0)
                    {
                        queue.MagicCookie = audioFile.MagicCookie;
                    }

                    // allocate the input read buffer
                    queue.AllocateBuffer(bufferSize, out buffer);

                    // prepare the capture format
                    var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32);
                    captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

                    queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout);

                    // prepare the target format
                    var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame);

                    using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
                        captureFile.ClientDataFormat = captureFormat;

                        int          captureBufferSize = bufferSize / 2;
                        AudioBuffers captureABL        = new AudioBuffers(1);

                        AudioQueueBuffer *captureBuffer;
                        queue.AllocateBuffer(captureBufferSize, out captureBuffer);

                        captureABL [0] = new AudioBuffer()
                        {
                            Data           = captureBuffer->AudioData,
                            NumberChannels = captureFormat.ChannelsPerFrame
                        };

                        queue.Start();

                        double ts = 0;
                        queue.RenderOffline(ts, captureBuffer, 0);

                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);

                        while (true)
                        {
                            int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;

                            queue.RenderOffline(ts, captureBuffer, reqFrames);

                            captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize);
                            var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame;

                            // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes",
                            // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);

                            captureFile.WriteAsync((uint)writeFrames, captureABL);

                            if (flushed)
                            {
                                break;
                            }

                            ts += writeFrames;
                        }

                        CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false);
                    }
                }
            }
        }
示例#19
0
        /// <summary>
        /// Renders the given number of seconds to the given wav file
        /// </summary>
        /// <param name="fileName">File name.</param>
        /// <param name="seconds">Seconds.</param>
        public void RenderToFile(string fileName, double seconds)
        {
            long samples = (long)(seconds * Metronome.SampleRate);

            var inputStream = MixerNode.GetAudioFormat(AudioUnitScopeType.Output);

            var outputStream = AudioStreamBasicDescription.CreateLinearPCM(44100, 2);

            AudioConverter converter = AudioConverter.Create(inputStream, outputStream);

            var file = ExtAudioFile.CreateWithUrl(
                new Foundation.NSUrl(fileName, false),
                AudioFileType.WAVE,
                outputStream,
                AudioFileFlags.EraseFlags,
                out ExtAudioFileError e
                );

            long samplesRead = 0;

            // initialize the buffers
            var buffers = new AudioBuffers(2);

            buffers[0] = new AudioBuffer()
            {
                DataByteSize   = BufferSize * 4,
                NumberChannels = 1,
                Data           = Marshal.AllocHGlobal(sizeof(float) * BufferSize)
            };
            buffers[1] = new AudioBuffer()
            {
                DataByteSize   = BufferSize * 4,
                NumberChannels = 1,
                Data           = Marshal.AllocHGlobal(sizeof(float) * BufferSize)
            };

            var convBuffers = new AudioBuffers(1);

            convBuffers[0] = new AudioBuffer()
            {
                DataByteSize   = BufferSize * 4,
                NumberChannels = 2,
                Data           = Marshal.AllocHGlobal(sizeof(float) * BufferSize)
            };

            while (samples > 0)
            {
                int numSamples = (int)(Math.Min(BufferSize, samples));

                // get samples from the mixer
                Render((uint)numSamples, buffers, samplesRead);

                // conver to the file's format
                converter.ConvertComplexBuffer(numSamples, buffers, convBuffers);

                // write samples to the file
                var error = file.Write((uint)numSamples, convBuffers);
                if (error != ExtAudioFileError.OK)
                {
                    throw new ApplicationException();
                }

                samples     -= BufferSize;
                samplesRead += numSamples;
            }

            buffers.Dispose();
            convBuffers.Dispose();
            converter.Dispose();
            file.Dispose();
        }
示例#20
0
        public Mixer()
        {
            BuildAUGraph();

            _converter = AudioConverter.Create(MixerNode.GetAudioFormat(AudioUnitScopeType.Output), AudioStreamBasicDescription.CreateLinearPCM());

            Metronome.Instance.TempoChanged += TempoChanged;

            _countOff = new PitchStream(StreamInfoProvider.GetDefault(), null);
            _countOff.IntervalLoop = new SampleIntervalLoop(_countOff, new double[] { 1 });
            _countOff.AddFrequency("A4");
        }
示例#21
0
        public void InitAudio()
        {
            var     session = AVAudioSession.SharedInstance();
            NSError error;

            if (session == null)
            {
                var alert = new UIAlertView("Session error", "Unable to create audio session", null, "Cancel");
                alert.Show();
                alert.Clicked += delegate
                {
                    alert.DismissWithClickedButtonIndex(0, true);
                    return;
                };
            }
            session.SetActive(false);
            session.SetCategory(AVAudioSessionCategory.Playback, AVAudioSessionCategoryOptions.AllowBluetooth | AVAudioSessionCategoryOptions.DefaultToSpeaker | AVAudioSessionCategoryOptions.DuckOthers);            //Neded so we can listen to remote events

            notification = AVAudioSession.Notifications.ObserveInterruption((sender, args) => {
                /* Handling audio interuption here */

                if (args.InterruptionType == AVAudioSessionInterruptionType.Began)
                {
                    if (_audioUnit != null && _audioUnit.IsPlaying)
                    {
                        _audioUnit.Stop();
                    }
                }

                System.Diagnostics.Debug.WriteLine("Notification: {0}", args.Notification);

                System.Diagnostics.Debug.WriteLine("InterruptionType: {0}", args.InterruptionType);
                System.Diagnostics.Debug.WriteLine("Option: {0}", args.Option);
            });

            var opts = session.CategoryOptions;

            session.SetPreferredIOBufferDuration(0.01, out error);

            session.SetActive(true);

            _audioFormat = AudioStreamBasicDescription.CreateLinearPCM(_sampleRate, bitsPerChannel: 32);

            _audioFormat.FormatFlags |= AudioFormatFlags.IsNonInterleaved | AudioFormatFlags.IsFloat;

            _audioComponent = AudioComponent.FindComponent(AudioTypeOutput.Remote);

            // creating an audio unit instance
            _audioUnit = new AudioUnit.AudioUnit(_audioComponent);



            // setting audio format
            _audioUnit.SetAudioFormat(_audioFormat,
                                      AudioUnitScopeType.Input,
                                      0 // Remote Output
                                      );

            //_audioFormat.FormatFlags = AudioStreamBasicDescription.AudioFormatFlagsNativeFloat;

            _audioUnit.SetAudioFormat(_audioFormat, AudioUnitScopeType.Output, 1);
            // setting callback method
            _audioUnit.SetRenderCallback(_audioUnit_RenderCallback, AudioUnitScopeType.Global);

            _audioUnit.Initialize();
            _audioUnit.Stop();
        }