void prepareAudioQueue()
        {
            _audioFile = AudioFile.Open(_url, AudioFilePermission.Read, AudioFileType.AIFF);

            // Getting AudioStreamBasicDescription
            var audioFormat = _audioFile.StreamBasicDescription;

            // Creating an audio output queue object instance
            _audioQueue = new OutputAudioQueue(audioFormat);
            _audioQueue.OutputCompleted += new EventHandler <OutputCompletedEventArgs>(_audioQueue_OutputCompleted);

            // Getting packet size
            int maxPacketSize = _audioFile.MaximumPacketSize;

            _startingPacketCount = 0;

            _numPacketsToRead = 1024;
            _bufferByteSize   = _numPacketsToRead * maxPacketSize;

            // enqueue buffers
            IntPtr bufferPtr;

            for (int index = 0; index < 3; index++)
            {
                _audioQueue.AllocateBuffer(_bufferByteSize, out bufferPtr);
                outputCallback(bufferPtr);
            }
            _isPrepared = true;
        }
Exemplo n.º 2
0
        public Control GetView(RageLib.FileSystem.Common.File file)
        {
            var data = file.GetData();

            var ms        = new MemoryStream(data);
            var audioFile = new AudioFile();

            try
            {
                audioFile.Open(ms);
            }
            catch
            {
                ms.Close();

                throw;
            }

            var view       = new AudioView();
            var controller = new AudioViewController(view);

            controller.AudioFile = audioFile;

            return(view);
        }
Exemplo n.º 3
0
        internal SoundEffect(string assetName, bool isMusic)
        {
            // use of CFUrl.FromFile is necessary in case assetName contains spaces (which must be url-encoded)
            audioFile = AudioFile.Open(CFUrl.FromFile(assetName), AudioFilePermission.Read, 0);

            if (audioFile == null)
            {
                throw new Content.ContentLoadException("Could not open sound effect " + assetName);
            }

            description = audioFile.StreamBasicDescription;
            DeriveBufferSize(0.5);
            isVBR = (description.BytesPerPacket == 0 || description.FramesPerPacket == 0);

            if (!isMusic)
            {
                firstInstance = new SoundEffectInstance(this, false);
            }
        }
Exemplo n.º 4
0
        public void ReadTest()
        {
            var path = Path.GetFullPath(Path.Combine("AudioToolbox", "1.caf"));
            var af   = AudioFile.Open(CFUrl.FromFile(path), AudioFilePermission.Read, AudioFileType.CAF);

            int  len;
            long current = 0;
            long size    = 1024;

            byte [] buffer = new byte [size];
            while ((len = af.Read(current, buffer, 0, buffer.Length, false)) != -1)
            {
                current += len;
            }

            var full_len = new FileInfo(path).Length;
            int header   = 4096;

            Assert.That(header + current == full_len, "#1");
        }
Exemplo n.º 5
0
        public Control GetView(RageLib.FileSystem.Common.File file)
        {
            var data = file.GetData();

            var ms = new MemoryStream(data);
            var audioFile = new AudioFile();
            try
            {
                audioFile.Open(ms);
            }
            catch
            {
                ms.Close();

                throw;
            }

            var view = new AudioView();
            var controller = new AudioViewController(view);
            controller.AudioFile = audioFile;

            return view;
        }
Exemplo n.º 6
0
        public static bool Convert(string input, string output, AudioFormatType targetFormat, AudioFileType containerType, Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality quality)
        {
            CFUrl           source       = CFUrl.FromFile(input);
            CFUrl           dest         = CFUrl.FromFile(output);
            var             dstFormat    = new AudioStreamBasicDescription();
            var             sourceFile   = AudioFile.Open(source, AudioFilePermission.Read);
            AudioFormatType outputFormat = targetFormat;
            // get the source data format
            var srcFormat        = (AudioStreamBasicDescription)sourceFile.DataFormat;
            var outputSampleRate = 0;

            switch (quality)
            {
            case Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality.Low:
                outputSampleRate = (int)Math.Max(8000, srcFormat.SampleRate / 2);
                break;

            default:
                outputSampleRate = (int)Math.Max(8000, srcFormat.SampleRate);
                break;
            }

            dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate);             // set sample rate
            if (outputFormat == AudioFormatType.LinearPCM)
            {
                // if the output format is PC create a 16-bit int PCM file format description as an example
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame);                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var fie = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                if (fie != AudioFormatError.None)
                {
                    return(false);
                }
            }

            var converter = AudioConverter.Create(srcFormat, dstFormat);

            converter.InputData += HandleInputData;

            // if the source has a cookie, get it and set it on the Audio Converter
            ReadCookie(sourceFile, converter);

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // if encoding to AAC set the bitrate to 192k which is a nice value for this demo
            // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
            if (dstFormat.Format == AudioFormatType.MPEG4AAC)
            {
                uint outputBitRate = 192000;                 // 192k

                // ignore errors as setting may be invalid depending on format specifics such as samplerate
                try {
                    converter.EncodeBitRate = outputBitRate;
                } catch {
                }

                // get it back and print it out
                outputBitRate = converter.EncodeBitRate;
            }

            // create the destination file
            var destinationFile = AudioFile.Create(dest, containerType, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio            = new AudioFileIO(32768);
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket;       // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32768;
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription[] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;
            }
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            // write destination channel layout
            if (srcFormat.ChannelsPerFrame > 2)
            {
                WriteDestinationChannelLayout(converter, sourceFile, destinationFile);
            }

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);
            bool         error             = false;

            // loop to convert data
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                if (fe != AudioConverterError.None)
                {
                    error = true;
                    break;
                }

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                if (we != 0)
                {
                    error = true;
                    break;
                }

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.FramesPerPacket != 0)
                {
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
                }
                else
                {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (var i = 0; i < ioOutputDataPackets; ++i)
                    {
                        totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
                    }
                }
            }

            Marshal.FreeHGlobal(outputBuffer);

            if (!error)
            {
                // write out any of the leading and trailing frames for compressed formats only
                if (dstFormat.BitsPerChannel == 0)
                {
                    // our output frame count should jive with
                    WritePacketTableInfo(converter, destinationFile);
                }

                // write the cookie again - sometimes codecs will update cookies at the end of a conversion
                WriteCookie(converter, destinationFile);
            }

            converter.Dispose();
            destinationFile.Dispose();
            sourceFile.Dispose();

            return(true);
        }
Exemplo n.º 7
0
        protected INativeObject GetINativeInstance(Type t)
        {
            var ctor = t.GetConstructor(Type.EmptyTypes);

            if ((ctor != null) && !ctor.IsAbstract)
            {
                return(ctor.Invoke(null) as INativeObject);
            }

            if (!NativeObjectInterfaceType.IsAssignableFrom(t))
            {
                throw new ArgumentException("t");
            }
            switch (t.Name)
            {
            case "CFAllocator":
                return(CFAllocator.SystemDefault);

            case "CFBundle":
                var bundles = CFBundle.GetAll();
                if (bundles.Length > 0)
                {
                    return(bundles [0]);
                }
                else
                {
                    throw new InvalidOperationException(string.Format("Could not create the new instance for type {0}.", t.Name));
                }

            case "CFNotificationCenter":
                return(CFNotificationCenter.Darwin);

            case "CFReadStream":
            case "CFStream":
                CFReadStream  readStream;
                CFWriteStream writeStream;
                CFStream.CreatePairWithSocketToHost("www.google.com", 80, out readStream, out writeStream);
                return(readStream);

            case "CFWriteStream":
                CFStream.CreatePairWithSocketToHost("www.google.com", 80, out readStream, out writeStream);
                return(writeStream);

            case "CFUrl":
                return(CFUrl.FromFile("/etc"));

            case "CFPropertyList":
                return(CFPropertyList.FromData(NSData.FromString("<string>data</string>")).PropertyList);

            case "DispatchData":
                return(DispatchData.FromByteBuffer(new byte [] { 1, 2, 3, 4 }));

            case "AudioFile":
                var path = Path.GetFullPath("1.caf");
                var af   = AudioFile.Open(CFUrl.FromFile(path), AudioFilePermission.Read, AudioFileType.CAF);
                return(af);

            case "CFHTTPMessage":
                return(CFHTTPMessage.CreateEmpty(false));

            case "CFMutableString":
                return(new CFMutableString("xamarin"));

            case "CGBitmapContext":
                byte[] data = new byte [400];
                using (CGColorSpace space = CGColorSpace.CreateDeviceRGB()) {
                    return(new CGBitmapContext(data, 10, 10, 8, 40, space, CGBitmapFlags.PremultipliedLast));
                }

            case "CGContextPDF":
                var filename = Environment.GetFolderPath(Environment.SpecialFolder.CommonDocuments) + "/t.pdf";
                using (var url = new NSUrl(filename))
                    return(new CGContextPDF(url));

            case "CGColorConversionInfo":
                var cci = new GColorConversionInfoTriple()
                {
                    Space     = CGColorSpace.CreateGenericRgb(),
                    Intent    = CGColorRenderingIntent.Default,
                    Transform = CGColorConversionInfoTransformType.ApplySpace
                };
                return(new CGColorConversionInfo((NSDictionary)null, cci, cci, cci));

            case "CGDataConsumer":
                using (NSMutableData destData = new NSMutableData()) {
                    return(new CGDataConsumer(destData));
                }

            case "CGDataProvider":
                filename = "xamarin1.png";
                return(new CGDataProvider(filename));

            case "CGFont":
                return(CGFont.CreateWithFontName("Courier New"));

            case "CGPattern":
                return(new CGPattern(
                           new RectangleF(0, 0, 16, 16),
                           CGAffineTransform.MakeIdentity(),
                           16, 16,
                           CGPatternTiling.NoDistortion,
                           true,
                           (cgc) => {}));

            case "CMBufferQueue":
                return(CMBufferQueue.CreateUnsorted(2));

            case "CTFont":
                CTFontDescriptorAttributes fda = new CTFontDescriptorAttributes()
                {
                    FamilyName = "Courier",
                    StyleName  = "Bold",
                    Size       = 16.0f
                };
                using (var fd = new CTFontDescriptor(fda))
                    return(new CTFont(fd, 10));

            case "CTFontCollection":
                return(new CTFontCollection(new CTFontCollectionOptions()));

            case "CTFontDescriptor":
                fda = new CTFontDescriptorAttributes();
                return(new CTFontDescriptor(fda));

            case "CTTextTab":
                return(new CTTextTab(CTTextAlignment.Left, 2));

            case "CTTypesetter":
                return(new CTTypesetter(new NSAttributedString("Hello, world",
                                                               new CTStringAttributes()
                {
                    ForegroundColorFromContext = true,
                    Font = new CTFont("ArialMT", 24)
                })));

            case "CTFrame":
                var framesetter = new CTFramesetter(new NSAttributedString("Hello, world",
                                                                           new CTStringAttributes()
                {
                    ForegroundColorFromContext = true,
                    Font = new CTFont("ArialMT", 24)
                }));
                var bPath = UIBezierPath.FromRect(new RectangleF(0, 0, 3, 3));
                return(framesetter.GetFrame(new NSRange(0, 0), bPath.CGPath, null));

            case "CTFramesetter":
                return(new CTFramesetter(new NSAttributedString("Hello, world",
                                                                new CTStringAttributes()
                {
                    ForegroundColorFromContext = true,
                    Font = new CTFont("ArialMT", 24)
                })));

            case "CTGlyphInfo":
                return(new CTGlyphInfo("copyright", new CTFont("ArialMY", 24), "Foo"));

            case "CTLine":
                return(new CTLine(new NSAttributedString("Hello, world",
                                                         new CTStringAttributes()
                {
                    ForegroundColorFromContext = true,
                    Font = new CTFont("ArialMT", 24)
                })));

            case "CGImageDestination":
                var storage = new NSMutableData();
                return(CGImageDestination.Create(new CGDataConsumer(storage), "public.png", 1));

            case "CGImageMetadataTag":
                using (NSString name = new NSString("tagName"))
                    using (var value = new NSString("value"))
                        return(new CGImageMetadataTag(CGImageMetadataTagNamespaces.Exif, CGImageMetadataTagPrefixes.Exif, name, CGImageMetadataType.Default, value));

            case "CGImageSource":
                filename = "xamarin1.png";
                return(CGImageSource.FromUrl(NSUrl.FromFilename(filename)));

            case "SecPolicy":
                return(SecPolicy.CreateSslPolicy(false, null));

            case "SecIdentity":
                using (var options = NSDictionary.FromObjectAndKey(new NSString("farscape"), SecImportExport.Passphrase)) {
                    NSDictionary[] array;
                    var            result = SecImportExport.ImportPkcs12(farscape_pfx, options, out array);
                    if (result != SecStatusCode.Success)
                    {
                        throw new InvalidOperationException(string.Format("Could not create the new instance for type {0} due to {1}.", t.Name, result));
                    }
                    return(new SecIdentity(array [0].LowlevelObjectForKey(SecImportExport.Identity.Handle)));
                }

            case "SecTrust":
                X509Certificate x = new X509Certificate(mail_google_com);
                using (var policy = SecPolicy.CreateSslPolicy(true, "mail.google.com"))
                    return(new SecTrust(x, policy));

            case "SslContext":
                return(new SslContext(SslProtocolSide.Client, SslConnectionType.Stream));

            case "UIFontFeature":
                return(new UIFontFeature(CTFontFeatureNumberSpacing.Selector.ProportionalNumbers));

            case "NetworkReachability":
                return(new NetworkReachability(IPAddress.Loopback, null));

            case "VTCompressionSession":
            case "VTSession":
                return(VTCompressionSession.Create(1024, 768, CMVideoCodecType.H264, (sourceFrame, status, flags, buffer) => { }, null, (CVPixelBufferAttributes)null));

            case "VTFrameSilo":
                return(VTFrameSilo.Create());

            case "VTMultiPassStorage":
                return(VTMultiPassStorage.Create());

            case "CFString":
                return(new CFString("test"));

            case "DispatchBlock":
                return(new DispatchBlock(() => { }));

            case "DispatchQueue":
                return(new DispatchQueue("com.example.subsystem.taskXYZ"));

            case "DispatchGroup":
                return(DispatchGroup.Create());

            case "CGColorSpace":
                return(CGColorSpace.CreateDeviceCmyk());

            case "CGGradient":
                CGColor[] cArray = { UIColor.Black.CGColor, UIColor.Clear.CGColor, UIColor.Blue.CGColor };
                return(new CGGradient(null, cArray));

            case "CGImage":
                filename = "xamarin1.png";
                using (var dp = new CGDataProvider(filename))
                    return(CGImage.FromPNG(dp, null, false, CGColorRenderingIntent.Default));

            case "CGColor":
                return(UIColor.Black.CGColor);

            case "CMClock":
                return(CMClock.HostTimeClock);

            case "CMTimebase":
                return(new CMTimebase(CMClock.HostTimeClock));

            case "CVPixelBufferPool":
                return(new CVPixelBufferPool(
                           new CVPixelBufferPoolSettings(),
                           new CVPixelBufferAttributes(CVPixelFormatType.CV24RGB, 100, 50)
                           ));

            case "SecCertificate":
                using (var cdata = NSData.FromArray(mail_google_com))
                    return(new SecCertificate(cdata));

            case "SecCertificate2":
                using (var cdata = NSData.FromArray(mail_google_com))
                    return(new SecCertificate2(new SecCertificate(cdata)));

            case "SecTrust2":
                X509Certificate x2 = new X509Certificate(mail_google_com);
                using (var policy = SecPolicy.CreateSslPolicy(true, "mail.google.com"))
                    return(new SecTrust2(new SecTrust(x2, policy)));

            case "SecIdentity2":
                using (var options = NSDictionary.FromObjectAndKey(new NSString("farscape"), SecImportExport.Passphrase)) {
                    NSDictionary[] array;
                    var            result = SecImportExport.ImportPkcs12(farscape_pfx, options, out array);
                    if (result != SecStatusCode.Success)
                    {
                        throw new InvalidOperationException(string.Format("Could not create the new instance for type {0} due to {1}.", t.Name, result));
                    }
                    return(new SecIdentity2(new SecIdentity(array [0].LowlevelObjectForKey(SecImportExport.Identity.Handle))));
                }

            case "SecKey":
                SecKey private_key;
                SecKey public_key;
                using (var record = new SecRecord(SecKind.Key)) {
                    record.KeyType       = SecKeyType.RSA;
                    record.KeySizeInBits = 512;                     // it's not a performance test :)
                    SecKey.GenerateKeyPair(record.ToDictionary(), out public_key, out private_key);
                    return(private_key);
                }

            case "SecAccessControl":
                return(new SecAccessControl(SecAccessible.WhenPasscodeSetThisDeviceOnly));

            default:
                throw new InvalidOperationException(string.Format("Could not create the new instance for type {0}.", t.Name));
            }
        }
Exemplo n.º 8
0
        bool DoConvertFile(CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
        {
            AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription();

            // in this sample we should never be on the main thread here
            Debug.Assert(!NSThread.IsMain);

            // transition thread state to State::Running before continuing
            AppDelegate.ThreadStateSetRunning();

            Debug.WriteLine("DoConvertFile");

            // get the source file
            var sourceFile = AudioFile.Open(sourceURL, AudioFilePermission.Read);

            // get the source data format
            var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

            // setup the output file format
            dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate);             // set sample rate
            if (outputFormat == AudioFormatType.LinearPCM)
            {
                // if the output format is PC create a 16-bit int PCM file format description as an example
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame);                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var fie = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                if (fie != AudioFormatError.None)
                {
                    Debug.Print("Cannot create destination format {0:x}", fie);

                    AppDelegate.ThreadStateSetDone();
                    return(false);
                }
            }

            // create the AudioConverter
            AudioConverterError ce;
            var converter = AudioConverter.Create(srcFormat, dstFormat, out ce);

            Debug.Assert(ce == AudioConverterError.None);

            converter.InputData += EncoderDataProc;

            // if the source has a cookie, get it and set it on the Audio Converter
            ReadCookie(sourceFile, converter);

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // if encoding to AAC set the bitrate to 192k which is a nice value for this demo
            // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
            if (dstFormat.Format == AudioFormatType.MPEG4AAC)
            {
                uint outputBitRate = 192000;                 // 192k

                // ignore errors as setting may be invalid depending on format specifics such as samplerate
                try {
                    converter.EncodeBitRate = outputBitRate;
                } catch {
                }

                // get it back and print it out
                outputBitRate = converter.EncodeBitRate;
                Debug.Print("AAC Encode Bitrate: {0}", outputBitRate);
            }

            // can the Audio Converter resume conversion after an interruption?
            // this property may be queried at any time after construction of the Audio Converter after setting its output format
            // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
            // construction time since it means less code to execute during or after interruption time
            bool canResumeFromInterruption;

            try {
                canResumeFromInterruption = converter.CanResumeFromInterruption;
                Debug.Print("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
            } catch (Exception e) {
                // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
                // then the codec being used is not a hardware codec so we're not concerned about codec state
                // we are always going to be able to resume conversion after an interruption

                canResumeFromInterruption = false;
                Debug.Print("CanResumeFromInterruption: {0}", e.Message);
            }

            // create the destination file
            var destinationFile = AudioFile.Create(destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio            = new AudioFileIO(32768);
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket;       // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32768;
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription[] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            }
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            // write destination channel layout
            if (srcFormat.ChannelsPerFrame > 2)
            {
                WriteDestinationChannelLayout(converter, sourceFile, destinationFile);
            }

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);
            bool         error             = false;

            // loop to convert data
            Debug.WriteLine("Converting...");
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // this will block if we're interrupted
                var wasInterrupted = AppDelegate.ThreadStatePausedCheck();

                if (wasInterrupted && !canResumeFromInterruption)
                {
                    // this is our interruption termination condition
                    // an interruption has occured but the Audio Converter cannot continue
                    Debug.WriteLine("Cannot resume from interruption");
                    error = true;
                    break;
                }

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                if (fe != AudioConverterError.None)
                {
                    Debug.Print("FillComplexBuffer: {0}", fe);
                    error = true;
                    break;
                }

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                if (we != 0)
                {
                    Debug.Print("WritePackets: {0}", we);
                    error = true;
                    break;
                }

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.FramesPerPacket != 0)
                {
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
                }
                else
                {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (var i = 0; i < ioOutputDataPackets; ++i)
                    {
                        totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
                    }
                }
            }

            Marshal.FreeHGlobal(outputBuffer);

            if (!error)
            {
                // write out any of the leading and trailing frames for compressed formats only
                if (dstFormat.BitsPerChannel == 0)
                {
                    // our output frame count should jive with
                    Debug.Print("Total number of output frames counted: {0}", totalOutputFrames);
                    WritePacketTableInfo(converter, destinationFile);
                }

                // write the cookie again - sometimes codecs will update cookies at the end of a conversion
                WriteCookie(converter, destinationFile);
            }

            converter.Dispose();
            destinationFile.Dispose();
            sourceFile.Dispose();

            // transition thread state to State.Done before continuing
            AppDelegate.ThreadStateSetDone();

            return(!error);
        }
Exemplo n.º 9
0
 static void UpdateFormatInfo(UILabel label, CFUrl fileURL)
 {
     UpdateFormatInfo(label, AudioFile.Open(fileURL, AudioFilePermission.Read), fileURL.FileSystemPath);
 }
Exemplo n.º 10
0
 static void UpdateFormatInfo(UILabel label, NSUrl fileURL)
 {
     UpdateFormatInfo(label, AudioFile.Open(fileURL, AudioFilePermission.Read), System.IO.Path.GetFileName(fileURL.Path));
 }
Exemplo n.º 11
0
        unsafe static void RenderAudio(CFUrl sourceUrl, CFUrl destinationUrl)
        {
            AudioStreamBasicDescription dataFormat;
            AudioQueueBuffer *          buffer = null;
            long currentPacket = 0;
            int  packetsToRead = 0;

            AudioStreamPacketDescription[] packetDescs = null;
            bool flushed = false;
            bool done    = false;
            int  bufferSize;

            using (var audioFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read, (AudioFileType)0)) {
                dataFormat = audioFile.StreamBasicDescription;

                using (var queue = new OutputAudioQueue(dataFormat, CFRunLoop.Current, CFRunLoop.ModeCommon)) {
                    queue.BufferCompleted += (sender, e) => {
                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);
                    };

                    // we need to calculate how many packets we read at a time and how big a buffer we need
                    // we base this on the size of the packets in the file and an approximate duration for each buffer
                    bool isVBR = dataFormat.BytesPerPacket == 0 || dataFormat.FramesPerPacket == 0;

                    // first check to see what the max size of a packet is - if it is bigger
                    // than our allocation default size, that needs to become larger
                    // adjust buffer size to represent about a second of audio based on this format
                    CalculateBytesForTime(dataFormat, audioFile.MaximumPacketSize, 1.0, out bufferSize, out packetsToRead);

                    if (isVBR)
                    {
                        packetDescs = new AudioStreamPacketDescription [packetsToRead];
                    }
                    else
                    {
                        packetDescs = null;                         // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
                    }

                    if (audioFile.MagicCookie.Length != 0)
                    {
                        queue.MagicCookie = audioFile.MagicCookie;
                    }

                    // allocate the input read buffer
                    queue.AllocateBuffer(bufferSize, out buffer);

                    // prepare the capture format
                    var captureFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame, 32);
                    captureFormat.BytesPerFrame = captureFormat.BytesPerPacket = dataFormat.ChannelsPerFrame * 4;

                    queue.SetOfflineRenderFormat(captureFormat, audioFile.ChannelLayout);

                    // prepare the target format
                    var dstFormat = AudioStreamBasicDescription.CreateLinearPCM(dataFormat.SampleRate, (uint)dataFormat.ChannelsPerFrame);

                    using (var captureFile = ExtAudioFile.CreateWithUrl(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags)) {
                        captureFile.ClientDataFormat = captureFormat;

                        int          captureBufferSize = bufferSize / 2;
                        AudioBuffers captureABL        = new AudioBuffers(1);

                        AudioQueueBuffer *captureBuffer;
                        queue.AllocateBuffer(captureBufferSize, out captureBuffer);

                        captureABL [0] = new AudioBuffer()
                        {
                            Data           = captureBuffer->AudioData,
                            NumberChannels = captureFormat.ChannelsPerFrame
                        };

                        queue.Start();

                        double ts = 0;
                        queue.RenderOffline(ts, captureBuffer, 0);

                        HandleOutput(audioFile, queue, buffer, ref packetsToRead, ref currentPacket, ref done, ref flushed, ref packetDescs);

                        while (true)
                        {
                            int reqFrames = captureBufferSize / captureFormat.BytesPerFrame;

                            queue.RenderOffline(ts, captureBuffer, reqFrames);

                            captureABL.SetData(0, captureBuffer->AudioData, (int)captureBuffer->AudioDataByteSize);
                            var writeFrames = captureABL [0].DataByteSize / captureFormat.BytesPerFrame;

                            // Console.WriteLine ("ts: {0} AudioQueueOfflineRender: req {1} frames / {2} bytes, got {3} frames / {4} bytes",
                            // ts, reqFrames, captureBufferSize, writeFrames, captureABL.Buffers [0].DataByteSize);

                            captureFile.WriteAsync((uint)writeFrames, captureABL);

                            if (flushed)
                            {
                                break;
                            }

                            ts += writeFrames;
                        }

                        CFRunLoop.Current.RunInMode(CFRunLoop.ModeDefault, 1, false);
                    }
                }
            }
        }
Exemplo n.º 12
0
        void Convert(string sourceFilePath, string destinationFilePath, AudioFormatType outputFormatType, int?sampleRate = null)
        {
            var destinationUrl = NSUrl.FromFilename(destinationFilePath);
            var sourceUrl      = NSUrl.FromFilename(sourceFilePath);

            // get the source file
            var name = Path.GetFileNameWithoutExtension(destinationFilePath);

            using var sourceFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read);

            var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;
            var dstFormat = new AudioStreamBasicDescription();

            // setup the output file format
            dstFormat.SampleRate = sampleRate ?? srcFormat.SampleRate;
            if (outputFormatType == AudioFormatType.LinearPCM)
            {
                // if the output format is PCM create a 16 - bit int PCM file format
                dstFormat.Format           = AudioFormatType.LinearPCM;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormatType;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var afe = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                Assert.AreEqual(AudioFormatError.None, afe, $"GetFormatInfo: {name}");
            }

            // create the AudioConverter
            using var converter = AudioConverter.Create(srcFormat, dstFormat, out var ce);
            Assert.AreEqual(AudioConverterError.None, ce, $"AudioConverterCreate: {name}");

            // set up source buffers and data proc info struct
            var afio = new AudioFileIO(32 * 1024);              // 32Kb

            converter.InputData += (ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription [] dataPacketDescription) => {
                return(EncoderDataProc(afio, ref numberDataPackets, data, ref dataPacketDescription));
            };

            // Some audio formats have a magic cookie associated with them which is required to decompress audio data
            // When converting audio data you must check to see if the format of the data has a magic cookie
            // If the audio data format has a magic cookie associated with it, you must add this information to anAudio Converter
            // using AudioConverterSetProperty and kAudioConverterDecompressionMagicCookie to appropriately decompress the data
            // http://developer.apple.com/mac/library/qa/qa2001/qa1318.html
            var cookie = sourceFile.MagicCookie;

            // if there is an error here, then the format doesn't have a cookie - this is perfectly fine as some formats do not
            if (cookie?.Length > 0)
            {
                converter.DecompressionMagicCookie = cookie;
            }

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // create the destination file
            using var destinationFile = AudioFile.Create(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32 * 1024;                // 32Kb
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription [] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            }
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);

            // loop to convert data
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                Assert.AreEqual(AudioConverterError.None, fe, $"FillComplexBuffer: {name}");

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                Assert.AreEqual(AudioFileError.Success, we, $"WritePackets: {name}");

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                // the format has constant frames per packet
                totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
            }

            Marshal.FreeHGlobal(outputBuffer);

            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.BitsPerChannel == 0)
            {
                WritePacketTableInfo(converter, destinationFile);
            }

            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFile);
        }