示例#1
0
        unsafe static AudioFormatType[] GetFormats(AudioFormatProperty prop)
        {
            int size;

            if (AudioFormatPropertyNative.AudioFormatGetPropertyInfo(prop, 0, IntPtr.Zero, out size) != 0)
            {
                return(null);
            }

            var elementSize = sizeof(AudioFormatType);
            var data        = new AudioFormatType[size / elementSize];

            fixed(AudioFormatType *ptr = data)
            {
                var res = AudioFormatPropertyNative.AudioFormatGetProperty(prop, 0, IntPtr.Zero, ref size, (IntPtr)ptr);

                if (res != 0)
                {
                    return(null);
                }

                Array.Resize(ref data, elementSize);
                return(data);
            }
        }
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="model"></param>
        /// <param name="sampleRate"></param>
        /// <param name="audioFormatType"></param>
        /// <param name="audioFormatEncoding"></param>
        public SmRtApiConfigBase(string model,
                                 int sampleRate,
                                 AudioFormatType audioFormatType,
                                 AudioFormatEncoding audioFormatEncoding)
        {
            if (audioFormatType == AudioFormatType.File && audioFormatEncoding != AudioFormatEncoding.File ||
                audioFormatEncoding == AudioFormatEncoding.File && audioFormatType != AudioFormatType.File)
            {
                throw new ArgumentException("audioFormatType and audioFormatEncoding must both be File");
            }

            try
            {
                var unused = new CultureInfo(model);
            }
            catch (CultureNotFoundException ex)
            {
                throw new ArgumentException($"Invalid language code {model}", ex);
            }

            Model               = model;
            SampleRate          = sampleRate;
            AudioFormat         = audioFormatType;
            AudioFormatEncoding = audioFormatEncoding;
            BlockSize           = 2048;
        }
示例#3
0
        public static string GetAudioFormat(AudioFormatType Format)
        {
            switch (Format)
            {
            case AudioFormatType.aac:
                return("aac");

            case AudioFormatType.flac:
                return("flac");

            case AudioFormatType.m4a:
                return("m4a");

            case AudioFormatType.mp3:
                return("mp3");

            case AudioFormatType.opus:
                return("opus");

            case  AudioFormatType.vorbis:
                return("vorbis");

            case AudioFormatType.wav:
                return("wav");

            default:
                return("best");
            }
        }
示例#4
0
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            UIImage greenImage = new UIImage("green_button.png").StretchableImage(12, 0);
            UIImage redImage   = new UIImage("red_button.png").StretchableImage(12, 0);

            startButton.SetBackgroundImage(greenImage, UIControlState.Normal);
            startButton.SetBackgroundImage(redImage, UIControlState.Disabled);

            // default output format
            // sample rate of 0 indicates source file sample rate
            outputFormat = AudioFormatType.AppleLossless;
            sampleRate   = 0;

            // can we encode to AAC?
            if (IsAACHardwareEncoderAvailable())
            {
                outputFormatSelector.SetEnabled(true, 0);
            }
            else
            {
                // even though not enabled in IB, this segment will still be enabled
                // if not specifically turned off here which we'll assume is a bug
                outputFormatSelector.SetEnabled(false, 0);
            }

            sourceURL = CFUrl.FromFile("sourcePCM.aif");
            var paths = NSSearchPath.GetDirectories(NSSearchPathDirectory.DocumentDirectory, NSSearchPathDomain.User);

            destinationFilePath = paths[0] + "/output.caf";
            destinationURL      = NSUrl.FromFilename(destinationFilePath);

            UpdateFormatInfo(fileInfo, sourceURL);
        }
        unsafe static T[]? GetAvailable <T> (AudioFormatProperty prop, AudioFormatType format)
        {
            uint size;

            if (AudioFormatPropertyNative.AudioFormatGetPropertyInfo(prop, sizeof(AudioFormatType), ref format, out size) != 0)
            {
                return(null);
            }

            var data         = new T[size / Marshal.SizeOf(typeof(T))];
            var array_handle = GCHandle.Alloc(data, GCHandleType.Pinned);              // This requires a pinned GCHandle, since it's not possible to use unsafe code to get the address of a generic object.

            try {
                var ptr = array_handle.AddrOfPinnedObject();
                var res = AudioFormatPropertyNative.AudioFormatGetProperty(prop, sizeof(AudioFormatType), ref format, ref size, ptr);
                if (res != 0)
                {
                    return(null);
                }

                Array.Resize(ref data, (int)size / Marshal.SizeOf(typeof(T)));
                return(data);
            } finally {
                array_handle.Free();
            }
        }
        unsafe static T[] GetAvailable <T> (AudioFormatProperty prop, AudioFormatType format)
        {
            uint size;

            if (AudioFormatPropertyNative.AudioFormatGetPropertyInfo(prop, sizeof(AudioFormatType), ref format, out size) != 0)
            {
                return(null);
            }

            var data         = new T[size / Marshal.SizeOf(typeof(T))];
            var array_handle = GCHandle.Alloc(data, GCHandleType.Pinned);

            try {
                var ptr = array_handle.AddrOfPinnedObject();
                var res = AudioFormatPropertyNative.AudioFormatGetProperty(prop, sizeof(AudioFormatType), ref format, ref size, ptr);
                if (res != 0)
                {
                    return(null);
                }

                Array.Resize(ref data, (int)size / Marshal.SizeOf(typeof(T)));
                return(data);
            } finally {
                array_handle.Free();
            }
        }
        partial void segmentedControllerValueChanged(UISegmentedControl sender)
        {
            switch (sender.Tag)
            {
                case 0:
                    switch (sender.SelectedSegment)
                    {
                        case 0:
                            outputFormat = AudioFormatType.MPEG4AAC;
                            outputSampleRateSelector.SetEnabled(true, 0);
                            outputSampleRateSelector.SetEnabled(true, 1);
                            outputSampleRateSelector.SetEnabled(true, 2);
                            outputSampleRateSelector.SetEnabled(true, 3);
                            break;
                        case 1:
                            outputFormat = AudioFormatType.AppleIMA4;
                            outputSampleRateSelector.SetEnabled(true, 0);
                            outputSampleRateSelector.SetEnabled(true, 1);
                            outputSampleRateSelector.SetEnabled(true, 2);
                            outputSampleRateSelector.SetEnabled(true, 3);
                            break;
                        case 2:
					// iLBC sample rate is 8K
                            outputFormat = AudioFormatType.iLBC;
                            sampleRate = 8000.0;
                            outputSampleRateSelector.SelectedSegment = 2;
                            outputSampleRateSelector.SetEnabled(false, 0);
                            outputSampleRateSelector.SetEnabled(false, 1);
                            outputSampleRateSelector.SetEnabled(true, 2);
                            outputSampleRateSelector.SetEnabled(false, 3);
                            break;
                        case 3:
                            outputFormat = AudioFormatType.AppleLossless;
                            outputSampleRateSelector.SetEnabled(true, 0);
                            outputSampleRateSelector.SetEnabled(true, 1);
                            outputSampleRateSelector.SetEnabled(true, 2);
                            outputSampleRateSelector.SetEnabled(true, 3);
                            break;
                    }
                    break;
                case 1:
                    switch (sender.SelectedSegment)
                    {
                        case 0:
                            sampleRate = 44100.0;
                            break;
                        case 1:
                            sampleRate = 22050.0;
                            break;
                        case 2:
                            sampleRate = 8000.0;
                            break;
                        case 3:
                            sampleRate = 0;
                            break;
                    }
                    break;
            }
        }
示例#8
0
        public static WavParser GetParser(AudioFormatType type)
        {
            switch (type)
            {
            case AudioFormatType.Pcm: return(new PcmParser());

            case AudioFormatType.DviAdpcm: return(new DviAdpcmParser());

            default: throw new NotSupportedException("Invalid or unknown .wav compression format!");
            }
        }
示例#9
0
 //   `AudioFormatSubMessage: {type: "raw", encoding: "pcm_s16le", sample_rate: 44100}`
 /// <summary>
 ///
 /// </summary>
 /// <param name="audioFormatType"></param>
 /// <param name="encoding"></param>
 /// <param name="sampleRate">in Hz</param>
 public AudioFormatSubMessage(AudioFormatType audioFormatType, AudioFormatEncoding encoding, int sampleRate)
 {
     type = audioFormatType.ToApiString();
     if (audioFormatType == AudioFormatType.File)
     {
         this.encoding = null;
         sample_rate   = 0;
         return;
     }
     this.encoding = encoding.ToApiString();
     sample_rate   = sampleRate;
 }
        public static AudioFormatType[] GetAvailableFormats(AudioFileType fileType)
        {
            uint size;

            if (AudioFileGetGlobalInfoSize(AudioFileGlobalProperty.AvailableFormatIDs, sizeof(AudioFileType), ref fileType, out size) != 0)
            {
                return(null);
            }

            var data = new AudioFormatType[size / sizeof(AudioFormatType)];

            fixed(AudioFormatType *ptr = data)
            {
                var res = AudioFileGetGlobalInfo(AudioFileGlobalProperty.AvailableFormatIDs, sizeof(AudioFormatType), ref fileType, ref size, ptr);

                if (res != 0)
                {
                    return(null);
                }

                return(data);
            }
        }
示例#11
0
        unsafe static T[] GetAvailable <T> (AudioFormatProperty prop, AudioFormatType format) where T : struct
        {
            uint size;

            if (AudioFormatPropertyNative.AudioFormatGetPropertyInfo(prop, sizeof(AudioFormatType), ref format, out size) != 0)
            {
                return(null);
            }

            var data = new T[size / sizeof(T)];

            fixed(T *ptr = data)
            {
                var res = AudioFormatPropertyNative.AudioFormatGetProperty(prop, sizeof(AudioFormatType), ref format, ref size, (IntPtr)ptr);

                if (res != 0)
                {
                    return(null);
                }

                Array.Resize(ref data, (int)size / sizeof(T));
                return(data);
            }
        }
		public static AudioFormatType[] GetAvailableFormats (AudioFileType fileType)
		{
			uint size;
			if (AudioFileGetGlobalInfoSize (AudioFileGlobalProperty.AvailableFormatIDs, sizeof (AudioFileType), ref fileType, out size) != 0)
				return null;

			var data = new AudioFormatType[size / sizeof (AudioFormatType)];
			fixed (AudioFormatType* ptr = data) {
				var res = AudioFileGetGlobalInfo (AudioFileGlobalProperty.AvailableFormatIDs, sizeof (AudioFormatType), ref fileType, ref size, ptr);
				if (res != 0)
					return null;

				return data;
			}
		}
		extern static int AudioFileGetGlobalInfo (AudioFileGlobalProperty propertyID, uint size, ref AudioFileType inSpecifier, ref uint ioDataSize, AudioFormatType* outPropertyData);
示例#14
0
        bool DoConvertFile(CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
        {
            AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription();

            // in this sample we should never be on the main thread here
            Debug.Assert(!NSThread.IsMain);

            // transition thread state to State::Running before continuing
            AppDelegate.ThreadStateSetRunning();

            Debug.WriteLine("DoConvertFile");

            // get the source file
            var sourceFile = AudioFile.Open(sourceURL, AudioFilePermission.Read);

            // get the source data format
            var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

            // setup the output file format
            dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate);             // set sample rate
            if (outputFormat == AudioFormatType.LinearPCM)
            {
                // if the output format is PC create a 16-bit int PCM file format description as an example
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame);                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var fie = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                if (fie != AudioFormatError.None)
                {
                    Debug.Print("Cannot create destination format {0:x}", fie);

                    AppDelegate.ThreadStateSetDone();
                    return(false);
                }
            }

            // create the AudioConverter
            AudioConverterError ce;
            var converter = AudioConverter.Create(srcFormat, dstFormat, out ce);

            Debug.Assert(ce == AudioConverterError.None);

            converter.InputData += EncoderDataProc;

            // if the source has a cookie, get it and set it on the Audio Converter
            ReadCookie(sourceFile, converter);

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // if encoding to AAC set the bitrate to 192k which is a nice value for this demo
            // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
            if (dstFormat.Format == AudioFormatType.MPEG4AAC)
            {
                uint outputBitRate = 192000;                 // 192k

                // ignore errors as setting may be invalid depending on format specifics such as samplerate
                try {
                    converter.EncodeBitRate = outputBitRate;
                } catch {
                }

                // get it back and print it out
                outputBitRate = converter.EncodeBitRate;
                Debug.Print("AAC Encode Bitrate: {0}", outputBitRate);
            }

            // can the Audio Converter resume conversion after an interruption?
            // this property may be queried at any time after construction of the Audio Converter after setting its output format
            // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
            // construction time since it means less code to execute during or after interruption time
            bool canResumeFromInterruption;

            try {
                canResumeFromInterruption = converter.CanResumeFromInterruption;
                Debug.Print("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
            } catch (Exception e) {
                // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
                // then the codec being used is not a hardware codec so we're not concerned about codec state
                // we are always going to be able to resume conversion after an interruption

                canResumeFromInterruption = false;
                Debug.Print("CanResumeFromInterruption: {0}", e.Message);
            }

            // create the destination file
            var destinationFile = AudioFile.Create(destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio            = new AudioFileIO(32768);
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket;       // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32768;
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription[] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            }
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            // write destination channel layout
            if (srcFormat.ChannelsPerFrame > 2)
            {
                WriteDestinationChannelLayout(converter, sourceFile, destinationFile);
            }

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);
            bool         error             = false;

            // loop to convert data
            Debug.WriteLine("Converting...");
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // this will block if we're interrupted
                var wasInterrupted = AppDelegate.ThreadStatePausedCheck();

                if (wasInterrupted && !canResumeFromInterruption)
                {
                    // this is our interruption termination condition
                    // an interruption has occured but the Audio Converter cannot continue
                    Debug.WriteLine("Cannot resume from interruption");
                    error = true;
                    break;
                }

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                if (fe != AudioConverterError.None)
                {
                    Debug.Print("FillComplexBuffer: {0}", fe);
                    error = true;
                    break;
                }

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                if (we != 0)
                {
                    Debug.Print("WritePackets: {0}", we);
                    error = true;
                    break;
                }

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.FramesPerPacket != 0)
                {
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
                }
                else
                {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (var i = 0; i < ioOutputDataPackets; ++i)
                    {
                        totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
                    }
                }
            }

            Marshal.FreeHGlobal(outputBuffer);

            if (!error)
            {
                // write out any of the leading and trailing frames for compressed formats only
                if (dstFormat.BitsPerChannel == 0)
                {
                    // our output frame count should jive with
                    Debug.Print("Total number of output frames counted: {0}", totalOutputFrames);
                    WritePacketTableInfo(converter, destinationFile);
                }

                // write the cookie again - sometimes codecs will update cookies at the end of a conversion
                WriteCookie(converter, destinationFile);
            }

            converter.Dispose();
            destinationFile.Dispose();
            sourceFile.Dispose();

            // transition thread state to State.Done before continuing
            AppDelegate.ThreadStateSetDone();

            return(!error);
        }
示例#15
0
 //   `AudioFormatSubMessage: {type: "raw", encoding: "pcm_s16le", sample_rate: 44100}`
 /// <summary>
 ///
 /// </summary>
 /// <param name="audioFormatType"></param>
 /// <param name="encoding"></param>
 /// <param name="sampleRate">in Hz</param>
 public AudioFormatSubMessage(AudioFormatType audioFormatType, AudioFormatEncoding encoding, int sampleRate)
 {
     type          = audioFormatType.ToApiString();
     this.encoding = encoding.ToApiString();
     sample_rate   = sampleRate;
 }
示例#16
0
        void Convert(string sourceFilePath, string destinationFilePath, AudioFormatType outputFormatType, int?sampleRate = null)
        {
            var destinationUrl = NSUrl.FromFilename(destinationFilePath);
            var sourceUrl      = NSUrl.FromFilename(sourceFilePath);

            // get the source file
            var name = Path.GetFileNameWithoutExtension(destinationFilePath);

            using var sourceFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read);

            var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;
            var dstFormat = new AudioStreamBasicDescription();

            // setup the output file format
            dstFormat.SampleRate = sampleRate ?? srcFormat.SampleRate;
            if (outputFormatType == AudioFormatType.LinearPCM)
            {
                // if the output format is PCM create a 16 - bit int PCM file format
                dstFormat.Format           = AudioFormatType.LinearPCM;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormatType;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var afe = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                Assert.AreEqual(AudioFormatError.None, afe, $"GetFormatInfo: {name}");
            }

            // create the AudioConverter
            using var converter = AudioConverter.Create(srcFormat, dstFormat, out var ce);
            Assert.AreEqual(AudioConverterError.None, ce, $"AudioConverterCreate: {name}");

            // set up source buffers and data proc info struct
            var afio = new AudioFileIO(32 * 1024);              // 32Kb

            converter.InputData += (ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription [] dataPacketDescription) => {
                return(EncoderDataProc(afio, ref numberDataPackets, data, ref dataPacketDescription));
            };

            // Some audio formats have a magic cookie associated with them which is required to decompress audio data
            // When converting audio data you must check to see if the format of the data has a magic cookie
            // If the audio data format has a magic cookie associated with it, you must add this information to anAudio Converter
            // using AudioConverterSetProperty and kAudioConverterDecompressionMagicCookie to appropriately decompress the data
            // http://developer.apple.com/mac/library/qa/qa2001/qa1318.html
            var cookie = sourceFile.MagicCookie;

            // if there is an error here, then the format doesn't have a cookie - this is perfectly fine as some formats do not
            if (cookie?.Length > 0)
            {
                converter.DecompressionMagicCookie = cookie;
            }

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // create the destination file
            using var destinationFile = AudioFile.Create(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32 * 1024;                // 32Kb
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription [] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            }
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);

            // loop to convert data
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                Assert.AreEqual(AudioConverterError.None, fe, $"FillComplexBuffer: {name}");

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                Assert.AreEqual(AudioFileError.Success, we, $"WritePackets: {name}");

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                // the format has constant frames per packet
                totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
            }

            Marshal.FreeHGlobal(outputBuffer);

            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.BitsPerChannel == 0)
            {
                WritePacketTableInfo(converter, destinationFile);
            }

            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFile);
        }
        public static AudioStreamBasicDescription[] GetAvailableStreamDescriptions(AudioFileType fileType, AudioFormatType formatType)
        {
            AudioFileTypeAndFormatID input;

            input.FileType   = fileType;
            input.FormatType = formatType;

            uint size;

            if (AudioFileGetGlobalInfoSize(AudioFileGlobalProperty.AvailableStreamDescriptionsForFormat, (uint)sizeof(AudioFileTypeAndFormatID), ref input, out size) != 0)
            {
                return(null);
            }

            var data = new AudioStreamBasicDescription[size / sizeof(AudioStreamBasicDescription)];

            fixed(AudioStreamBasicDescription *ptr = data)
            {
                var res = AudioFileGetGlobalInfo(AudioFileGlobalProperty.AvailableStreamDescriptionsForFormat, (uint)sizeof(AudioFileTypeAndFormatID), ref input, ref size, ptr);

                if (res != 0)
                {
                    return(null);
                }

                return(data);
            }
        }
		bool DoConvertFile (CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
		{
			AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription ();

			// in this sample we should never be on the main thread here
			Debug.Assert (!NSThread.IsMain);

			// transition thread state to State::Running before continuing
			AppDelegate.ThreadStateSetRunning ();
			
			Debug.WriteLine ("DoConvertFile");

			// get the source file
			var sourceFile = AudioFile.Open (sourceURL, AudioFilePermission.Read);
			
			// get the source data format
			var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

			// setup the output file format
			dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate); // set sample rate
			if (outputFormat == AudioFormatType.LinearPCM) {
				// if the output format is PC create a 16-bit int PCM file format description as an example
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
				dstFormat.BitsPerChannel = 16;
				dstFormat.BytesPerPacket = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
				dstFormat.FramesPerPacket = 1;
				dstFormat.FormatFlags = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
			} else {
				// compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame); // for iLBC num channels must be 1
				
				// use AudioFormat API to fill out the rest of the description
				var fie = AudioStreamBasicDescription.GetFormatInfo (ref dstFormat);
				if (fie != AudioFormatError.None) {
					Debug.Print ("Cannot create destination format {0:x}", fie);

					AppDelegate.ThreadStateSetDone ();
					return false;
				}
			}

			// create the AudioConverter
			AudioConverterError ce;
			var converter = AudioConverter.Create (srcFormat, dstFormat, out ce);
			Debug.Assert (ce == AudioConverterError.None);

			converter.InputData += EncoderDataProc;

			// if the source has a cookie, get it and set it on the Audio Converter
			ReadCookie (sourceFile, converter);

			// get the actual formats back from the Audio Converter
			srcFormat = converter.CurrentInputStreamDescription;
			dstFormat = converter.CurrentOutputStreamDescription;

			// if encoding to AAC set the bitrate to 192k which is a nice value for this demo
			// kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
			if (dstFormat.Format == AudioFormatType.MPEG4AAC) {
				uint outputBitRate = 192000; // 192k

				// ignore errors as setting may be invalid depending on format specifics such as samplerate
				try {
					converter.EncodeBitRate = outputBitRate;
				} catch {
				}

				// get it back and print it out
				outputBitRate = converter.EncodeBitRate;
				Debug.Print ("AAC Encode Bitrate: {0}", outputBitRate);
			}

			// can the Audio Converter resume conversion after an interruption?
			// this property may be queried at any time after construction of the Audio Converter after setting its output format
			// there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
			// construction time since it means less code to execute during or after interruption time
			bool canResumeFromInterruption;
			try {
				canResumeFromInterruption = converter.CanResumeFromInterruption;
				Debug.Print ("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
			} catch (Exception e) {
				// if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
				// then the codec being used is not a hardware codec so we're not concerned about codec state
				// we are always going to be able to resume conversion after an interruption

				canResumeFromInterruption = false;
				Debug.Print ("CanResumeFromInterruption: {0}", e.Message);
			}
			
			// create the destination file 
			var destinationFile = AudioFile.Create (destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

			// set up source buffers and data proc info struct
			afio = new AudioFileIO (32768);
			afio.SourceFile = sourceFile;
			afio.SrcFormat = srcFormat;

			if (srcFormat.BytesPerPacket == 0) {
				// if the source format is VBR, we need to get the maximum packet size
				// use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
				// in the file (without actually scanning the whole file to find the largest packet,
				// as may happen with kAudioFilePropertyMaximumPacketSize)
				afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

				// how many packets can we read for our buffer size?
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
				
				// allocate memory for the PacketDescription structures describing the layout of each packet
				afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
			} else {
				// CBR source format
				afio.SrcSizePerPacket = srcFormat.BytesPerPacket;
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
			}

			// set up output buffers
			int outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
			const int theOutputBufSize = 32768;
			var outputBuffer = Marshal.AllocHGlobal (theOutputBufSize);
			AudioStreamPacketDescription[] outputPacketDescriptions = null;

			if (outputSizePerPacket == 0) {
				// if the destination format is VBR, we need to get max size per packet from the converter
				outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

				// allocate memory for the PacketDescription structures describing the layout of each packet
				outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
			}
			int numOutputPackets = theOutputBufSize / outputSizePerPacket;
			
			// if the destination format has a cookie, get it and set it on the output file
			WriteCookie (converter, destinationFile);
			
			// write destination channel layout
			if (srcFormat.ChannelsPerFrame > 2) {
				WriteDestinationChannelLayout (converter, sourceFile, destinationFile);
			}

			long totalOutputFrames = 0; // used for debugging
			long outputFilePos = 0;
			AudioBuffers fillBufList = new AudioBuffers (1);
			bool error = false;

			// loop to convert data
			Debug.WriteLine ("Converting...");
			while (true) {
				// set up output buffer list
				fillBufList [0] = new AudioBuffer () {
					NumberChannels = dstFormat.ChannelsPerFrame,
					DataByteSize = theOutputBufSize,
					Data = outputBuffer
				};

				// this will block if we're interrupted
				var wasInterrupted = AppDelegate.ThreadStatePausedCheck();
				
				if (wasInterrupted && !canResumeFromInterruption) {
					// this is our interruption termination condition
					// an interruption has occured but the Audio Converter cannot continue
					Debug.WriteLine ("Cannot resume from interruption");
					error = true;
					break;
				}

				// convert data
				int ioOutputDataPackets = numOutputPackets;
				var fe = converter.FillComplexBuffer (ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
				// if interrupted in the process of the conversion call, we must handle the error appropriately
				if (fe != AudioConverterError.None) {
					Debug.Print ("FillComplexBuffer: {0}", fe);
					error = true;
					break;
				}

				if (ioOutputDataPackets == 0) {
					// this is the EOF conditon
					break;
				}

				// write to output file
				var inNumBytes = fillBufList [0].DataByteSize;

				var we = destinationFile.WritePackets (false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
				if (we != 0) {
					Debug.Print ("WritePackets: {0}", we);
					error = true;
					break;
				}

				// advance output file packet position
				outputFilePos += ioOutputDataPackets;
					
				if (dstFormat.FramesPerPacket != 0) { 
					// the format has constant frames per packet
					totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
				} else {
					// variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
					for (var i = 0; i < ioOutputDataPackets; ++i)
						totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
				}

			}

			Marshal.FreeHGlobal (outputBuffer);

			if (!error) {
				// write out any of the leading and trailing frames for compressed formats only
				if (dstFormat.BitsPerChannel == 0) {
					// our output frame count should jive with
					Debug.Print ("Total number of output frames counted: {0}", totalOutputFrames); 
					WritePacketTableInfo (converter, destinationFile);
				}
					
				// write the cookie again - sometimes codecs will update cookies at the end of a conversion
				WriteCookie (converter, destinationFile);
			}

			converter.Dispose ();
			destinationFile.Dispose ();
			sourceFile.Dispose ();

			// transition thread state to State.Done before continuing
			AppDelegate.ThreadStateSetDone ();

			return !error;
		}
示例#19
0
        public Format(JObject node)
        {
            switch ((string)node["videoFormatType"])
            {
            case "SD":
                videoFormatType = VideoFormatType.SD;
                break;

            case "HD":
                videoFormatType = VideoFormatType.HD;
                break;
            }
            coverArtSmallUri = (string)node["images"][0]["uri"];
            coverArtLargeUri = (string)node["images"][1]["uri"];

            foreach (JObject offer in node["offers"])
            {
                switch ((string)offer["offerType"])
                {
                case "SUBSCRIPTION":
                    subscriptionOffer = new SubscriptionOffer(offer);
                    break;

                case "PURCHASE":
                    purchaseOffer = new PurchaseOffer(offer, videoFormatType == VideoFormatType.HD);
                    break;

                case "RENTAL":
                    rentalOffer = new RentalOffer(offer, videoFormatType == VideoFormatType.HD);
                    break;

                case "SEASON_PURCHASE":
                    seasonPurchaseOffer = new SeasonPurchaseOffer(offer);
                    break;

                case "SEASON_RENTAL":
                    seasonRentalOffer = new SeasonRentalOffer(offer);
                    break;

                case "TV_PASS":
                    tvPassOffer = new TvPassOffer(offer);
                    break;
                }
            }
            videoAspectRatio = (float)node["videoAspectRatio"];
            foreach (string audioFormat in node["audioFormatTypes"])
            {
                switch (audioFormat)
                {
                //case "STEREO":
                //    audioFormatType = AudioFormatType.Stereo;
                //    break;
                case "AC_3_5_1":
                    audioFormatType = AudioFormatType.AC3;
                    break;
                }
            }
            hasEncode              = node["hasEncode"] != null ? (bool)node["hasEncode"] : false;
            hasTrailerEncode       = node["hasTrailerEncode"] != null ? (bool)node["hasTrailerEncode"] : false;
            hasMobileEncode        = node["hasMobileEncode"] != null ? (bool)node["hasMobileEncode"] : false;
            hasMobileTrailerEncode = node["hasMobileTrailerEncode"] != null ? (bool)node["hasMobileTrailerEncode"] : false;
        }
		public AudioClassDescription (AudioCodecComponentType type, AudioFormatType subType, AudioCodecManufacturer manufacturer)
		{
			Type = type;
			SubType = subType;
			Manufacturer = manufacturer;
		}
示例#21
0
 public AudioStreamBasicDescription(AudioFormatType formatType)
     : this()
 {
     Format = formatType;
 }
示例#22
0
 public static AudioValueRange[] GetAvailableEncodeSampleRates(AudioFormatType format)
 {
     return(GetAvailable <AudioValueRange> (AudioFormatProperty.AvailableEncodeSampleRates, format));
 }
示例#23
0
 public AudioClassDescription(AudioCodecComponentType type, AudioFormatType subType, AudioCodecManufacturer manufacturer)
 {
     Type         = type;
     SubType      = subType;
     Manufacturer = manufacturer;
 }
		public static AudioStreamBasicDescription[] GetAvailableStreamDescriptions (AudioFileType fileType, AudioFormatType formatType)
		{
			AudioFileTypeAndFormatID input;
			input.FileType = fileType;
			input.FormatType = formatType;

			uint size;
			if (AudioFileGetGlobalInfoSize (AudioFileGlobalProperty.AvailableStreamDescriptionsForFormat, (uint)sizeof (AudioFileTypeAndFormatID), ref input, out size) != 0)
				return null;

			var data = new AudioStreamBasicDescription[size / sizeof (AudioStreamBasicDescription)];
			fixed (AudioStreamBasicDescription* ptr = data) {
				var res = AudioFileGetGlobalInfo (AudioFileGlobalProperty.AvailableStreamDescriptionsForFormat, (uint)sizeof (AudioFileTypeAndFormatID), ref input, ref size, ptr);
				if (res != 0)
					return null;

				return data;
			}
		}
示例#25
0
 public AudioStreamBasicDescription(AudioFormatType formatType)
     : this()
 {
     Format = formatType;
 }
		partial void segmentedControllerValueChanged (UISegmentedControl sender)
		{
			switch (sender.Tag) {
			case 0:
				switch (sender.SelectedSegment) {
				case 0:
					outputFormat = AudioFormatType.MPEG4AAC;
					outputSampleRateSelector.SetEnabled (true, 0);
					outputSampleRateSelector.SetEnabled (true, 1);
					outputSampleRateSelector.SetEnabled (true, 2);
					outputSampleRateSelector.SetEnabled (true, 3);
					break;
				case 1:
					outputFormat = AudioFormatType.AppleIMA4;
					outputSampleRateSelector.SetEnabled (true, 0);
					outputSampleRateSelector.SetEnabled (true, 1);
					outputSampleRateSelector.SetEnabled (true, 2);
					outputSampleRateSelector.SetEnabled (true, 3);
					break;
				case 2:
					// iLBC sample rate is 8K
					outputFormat = AudioFormatType.iLBC;
					outputSampleRateSelector.SelectedSegment = 2;
					outputSampleRateSelector.SetEnabled (false, 0);
					outputSampleRateSelector.SetEnabled (false, 1);
					outputSampleRateSelector.SetEnabled (true, 2);
					outputSampleRateSelector.SetEnabled (false, 3);
					break;
				case 3:
					outputFormat = AudioFormatType.AppleLossless;
					outputSampleRateSelector.SetEnabled (true, 0);
					outputSampleRateSelector.SetEnabled (true, 1);
					outputSampleRateSelector.SetEnabled (true, 2);
					outputSampleRateSelector.SetEnabled (true, 3);
					break;
				}
				break;
			case 1:
				switch (sender.SelectedSegment) {
				case 0:
					sampleRate = 44100.0;
					break;
				case 1:
					sampleRate = 22050.0;
					break;
				case 2:
					sampleRate = 8000.0;
					break;
				case 3:
					sampleRate = 0;
					break;
				}
				break;
			}
		}
示例#27
0
        public static unsafe extern AudioFormatError AudioFormatGetProperty(AudioFormatProperty propertyID, int inSpecifierSize, ref AudioFormatType inSpecifier,
			ref uint ioDataSize, IntPtr outPropertyData);
		public override void ViewDidLoad ()
		{
			base.ViewDidLoad ();

			UIImage greenImage = new UIImage ("green_button.png").StretchableImage (12, 0);
			UIImage redImage = new UIImage ("red_button.png").StretchableImage (12, 0);
			
			startButton.SetBackgroundImage (greenImage, UIControlState.Normal);
			startButton.SetBackgroundImage (redImage, UIControlState.Disabled);

			// default output format
			// sample rate of 0 indicates source file sample rate
			outputFormat = AudioFormatType.AppleLossless;
			sampleRate = 0;

			// can we encode to AAC?
			if (IsAACHardwareEncoderAvailable ()) {
				outputFormatSelector.SetEnabled (true, 0);
			} else {
				// even though not enabled in IB, this segment will still be enabled
				// if not specifically turned off here which we'll assume is a bug
				outputFormatSelector.SetEnabled (false, 0);
			}

			sourceURL = CFUrl.FromFile ("sourcePCM.aif");
			var paths = NSSearchPath.GetDirectories (NSSearchPathDirectory.DocumentDirectory, NSSearchPathDomain.User);
			destinationFilePath = paths[0] +  "/output.caf";
			destinationURL = NSUrl.FromFilename (destinationFilePath);

			UpdateFormatInfo (fileInfo, sourceURL);
		}
示例#29
0
        public static extern AudioFormatError AudioFormatGetPropertyInfo(AudioFormatProperty propertyID, int inSpecifierSize, ref AudioFormatType inSpecifier,
			out uint outPropertyDataSize);
示例#30
0
 public static AudioClassDescription[] GetEncoders(AudioFormatType format)
 {
     return(GetAvailable <AudioClassDescription> (AudioFormatProperty.Encoders, format));
 }
 public SmRtApiConfig(string model, int sampleRate, AudioFormatType audioFormatType, AudioFormatEncoding audioFormatEncoding) : base(model, sampleRate, audioFormatType, audioFormatEncoding)
 {
 }
示例#32
0
        public static bool Convert(string input, string output, AudioFormatType targetFormat, AudioFileType containerType, Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality quality)
        {
            CFUrl           source       = CFUrl.FromFile(input);
            CFUrl           dest         = CFUrl.FromFile(output);
            var             dstFormat    = new AudioStreamBasicDescription();
            var             sourceFile   = AudioFile.Open(source, AudioFilePermission.Read);
            AudioFormatType outputFormat = targetFormat;
            // get the source data format
            var srcFormat        = (AudioStreamBasicDescription)sourceFile.DataFormat;
            var outputSampleRate = 0;

            switch (quality)
            {
            case Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality.Low:
                outputSampleRate = (int)Math.Max(8000, srcFormat.SampleRate / 2);
                break;

            default:
                outputSampleRate = (int)Math.Max(8000, srcFormat.SampleRate);
                break;
            }

            dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate);             // set sample rate
            if (outputFormat == AudioFormatType.LinearPCM)
            {
                // if the output format is PC create a 16-bit int PCM file format description as an example
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame);                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var fie = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                if (fie != AudioFormatError.None)
                {
                    return(false);
                }
            }

            var converter = AudioConverter.Create(srcFormat, dstFormat);

            converter.InputData += HandleInputData;

            // if the source has a cookie, get it and set it on the Audio Converter
            ReadCookie(sourceFile, converter);

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // if encoding to AAC set the bitrate to 192k which is a nice value for this demo
            // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
            if (dstFormat.Format == AudioFormatType.MPEG4AAC)
            {
                uint outputBitRate = 192000;                 // 192k

                // ignore errors as setting may be invalid depending on format specifics such as samplerate
                try {
                    converter.EncodeBitRate = outputBitRate;
                } catch {
                }

                // get it back and print it out
                outputBitRate = converter.EncodeBitRate;
            }

            // create the destination file
            var destinationFile = AudioFile.Create(dest, containerType, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio            = new AudioFileIO(32768);
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket;       // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32768;
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription[] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;
            }
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            // write destination channel layout
            if (srcFormat.ChannelsPerFrame > 2)
            {
                WriteDestinationChannelLayout(converter, sourceFile, destinationFile);
            }

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);
            bool         error             = false;

            // loop to convert data
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                if (fe != AudioConverterError.None)
                {
                    error = true;
                    break;
                }

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                if (we != 0)
                {
                    error = true;
                    break;
                }

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.FramesPerPacket != 0)
                {
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
                }
                else
                {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (var i = 0; i < ioOutputDataPackets; ++i)
                    {
                        totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
                    }
                }
            }

            Marshal.FreeHGlobal(outputBuffer);

            if (!error)
            {
                // write out any of the leading and trailing frames for compressed formats only
                if (dstFormat.BitsPerChannel == 0)
                {
                    // our output frame count should jive with
                    WritePacketTableInfo(converter, destinationFile);
                }

                // write the cookie again - sometimes codecs will update cookies at the end of a conversion
                WriteCookie(converter, destinationFile);
            }

            converter.Dispose();
            destinationFile.Dispose();
            sourceFile.Dispose();

            return(true);
        }
示例#33
0
		public static bool Convert(string input, string output, AudioFormatType targetFormat, AudioFileType containerType, Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality quality) {
			CFUrl source = CFUrl.FromFile (input);
			CFUrl dest = CFUrl.FromFile (output);
			var dstFormat = new AudioStreamBasicDescription ();
			var sourceFile = AudioFile.Open (source, AudioFilePermission.Read);
			AudioFormatType outputFormat = targetFormat;
			// get the source data format
			var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;
			var outputSampleRate = 0;
			switch (quality)
			{
			case Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality.Low:
				outputSampleRate = (int)Math.Max (8000, srcFormat.SampleRate / 2);
					break;
			default:
				outputSampleRate = (int)Math.Max (8000, srcFormat.SampleRate);
				break;
			}

			dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate); // set sample rate
			if (outputFormat == AudioFormatType.LinearPCM) {
				// if the output format is PC create a 16-bit int PCM file format description as an example
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
				dstFormat.BitsPerChannel = 16;
				dstFormat.BytesPerPacket = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
				dstFormat.FramesPerPacket = 1;
				dstFormat.FormatFlags = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
			} else {
				// compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
				dstFormat.Format = outputFormat;
				dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame); // for iLBC num channels must be 1

				// use AudioFormat API to fill out the rest of the description
				var fie = AudioStreamBasicDescription.GetFormatInfo (ref dstFormat);
				if (fie != AudioFormatError.None) {
					return false;
				}
			}

			var converter = AudioConverter.Create (srcFormat, dstFormat);
			converter.InputData += HandleInputData;

			// if the source has a cookie, get it and set it on the Audio Converter
			ReadCookie (sourceFile, converter);

			// get the actual formats back from the Audio Converter
			srcFormat = converter.CurrentInputStreamDescription;
			dstFormat = converter.CurrentOutputStreamDescription;

			// if encoding to AAC set the bitrate to 192k which is a nice value for this demo
			// kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
			if (dstFormat.Format == AudioFormatType.MPEG4AAC) {
				uint outputBitRate = 192000; // 192k

				// ignore errors as setting may be invalid depending on format specifics such as samplerate
				try {
					converter.EncodeBitRate = outputBitRate;
				} catch {
				}

				// get it back and print it out
				outputBitRate = converter.EncodeBitRate;
			}

			// create the destination file 
			var destinationFile = AudioFile.Create (dest, containerType, dstFormat, AudioFileFlags.EraseFlags);

			// set up source buffers and data proc info struct
			afio = new AudioFileIO (32768);
			afio.SourceFile = sourceFile;
			afio.SrcFormat = srcFormat;

			if (srcFormat.BytesPerPacket == 0) {
				// if the source format is VBR, we need to get the maximum packet size
				// use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
				// in the file (without actually scanning the whole file to find the largest packet,
				// as may happen with kAudioFilePropertyMaximumPacketSize)
				afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

				// how many packets can we read for our buffer size?
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

				// allocate memory for the PacketDescription structures describing the layout of each packet
				afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
			} else {
				// CBR source format
				afio.SrcSizePerPacket = srcFormat.BytesPerPacket;
				afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
				// allocate memory for the PacketDescription structures describing the layout of each packet
				afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
			}

			// set up output buffers
			int outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
			const int theOutputBufSize = 32768;
			var outputBuffer = Marshal.AllocHGlobal (theOutputBufSize);
			AudioStreamPacketDescription[] outputPacketDescriptions = null;

			if (outputSizePerPacket == 0) {
				// if the destination format is VBR, we need to get max size per packet from the converter
				outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

			}
			// allocate memory for the PacketDescription structures describing the layout of each packet
			outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
			int numOutputPackets = theOutputBufSize / outputSizePerPacket;

			// if the destination format has a cookie, get it and set it on the output file
			WriteCookie (converter, destinationFile);

			// write destination channel layout
			if (srcFormat.ChannelsPerFrame > 2) {
				WriteDestinationChannelLayout (converter, sourceFile, destinationFile);
			}

			long totalOutputFrames = 0; // used for debugging
			long outputFilePos = 0;
			AudioBuffers fillBufList = new AudioBuffers (1);
			bool error = false;

			// loop to convert data
			while (true) {
				// set up output buffer list
				fillBufList [0] = new AudioBuffer () {
					NumberChannels = dstFormat.ChannelsPerFrame,
					DataByteSize = theOutputBufSize,
					Data = outputBuffer
				};

				// convert data
				int ioOutputDataPackets = numOutputPackets;
				var fe = converter.FillComplexBuffer (ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
				// if interrupted in the process of the conversion call, we must handle the error appropriately
				if (fe != AudioConverterError.None) {
					error = true;
					break;
				}

				if (ioOutputDataPackets == 0) {
					// this is the EOF conditon
					break;
				}

				// write to output file
				var inNumBytes = fillBufList [0].DataByteSize;

				var we = destinationFile.WritePackets (false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
				if (we != 0) {
					error = true;
					break;
				}

				// advance output file packet position
				outputFilePos += ioOutputDataPackets;

				if (dstFormat.FramesPerPacket != 0) { 
					// the format has constant frames per packet
					totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
				} else {
					// variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
					for (var i = 0; i < ioOutputDataPackets; ++i)
						totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
				}

			}

			Marshal.FreeHGlobal (outputBuffer);

			if (!error) {
				// write out any of the leading and trailing frames for compressed formats only
				if (dstFormat.BitsPerChannel == 0) {
					// our output frame count should jive with
					WritePacketTableInfo (converter, destinationFile);
				}

				// write the cookie again - sometimes codecs will update cookies at the end of a conversion
				WriteCookie (converter, destinationFile);
			}

			converter.Dispose ();
			destinationFile.Dispose ();
			sourceFile.Dispose ();

			return true;
		}