Ejemplo n.º 1
0
        void PrepareAudioQueue(MonoTouch.CoreFoundation.CFUrl url)
        {
            AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
            {
                SampleRate       = _samplingRate,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsBigEndian | AudioFormatFlags.LinearPCMIsPacked,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,  // monoral
                BitsPerChannel   = 16, // 16-bit
                BytesPerPacket   = 2,
                BytesPerFrame    = 2,
                Reserved         = 0
            };

            _audioFile = AudioFile.Create(url, AudioFileType.AIFF, audioFormat, AudioFileFlags.EraseFlags);

            _queue = new InputAudioQueue(audioFormat);
            _queue.InputCompleted += new EventHandler <InputCompletedEventArgs>(_queue_InputCompleted);

            _startingPacketCount = 0;
            _numPacketsToWrite   = 1024;
            _bufferByteSize      = (int)(_numPacketsToWrite * audioFormat.BytesPerPacket);

            // preparing queue buffer
            IntPtr bufferPtr;

            for (int index = 0; index < 3; index++)
            {
                //_queue.AllocateBuffer(_bufferByteSize, out bufferPtr);
                _queue.AllocateBufferWithPacketDescriptors(_bufferByteSize, _numPacketsToWrite, out bufferPtr);
                _queue.EnqueueBuffer(bufferPtr, _bufferByteSize, null);
            }
        }
Ejemplo n.º 2
0
        private void LoadAudioFileDetails(object pathObject)
        {
            string path = (string)pathObject;

            IAudioFile audioFile = AudioFile.Create(path, false);
            decimal    bitrate   = audioFile.Bitrate; // force bitrate calculation

            DescriptiveLameTagReader lameTagReader = new DescriptiveLameTagReader(path);

            Invoke(new Action <IAudioFile, DescriptiveLameTagReader>(SetAudioFileDetails), audioFile, lameTagReader);
        }
Ejemplo n.º 3
0
        private void OnLoadFile(string fileName)
        {
            _id3v2 = new ID3v2Tag(fileName);
            IAudioFile audioFile = AudioFile.Create(fileName, true);
            DescriptiveLameTagReader lameTagReader = new DescriptiveLameTagReader(fileName);

            _fullFileName = fileName;

            FileName     = Path.GetFileName(fileName);
            Artist       = _id3v2.Artist;
            Title        = _id3v2.Title;
            Album        = _id3v2.Album;
            Genre        = _id3v2.Genre;
            Year         = _id3v2.Year;
            Track        = _id3v2.TrackNumber;
            ID3v2Version = _id3v2.Header.TagVersion;

            if (_id3v2.PictureList == null || _id3v2.PictureList.Count == 0)
            {
                PictureCollection = new ObservableCollection <Picture>();
            }
            else
            {
                var pictureCollection = new ObservableCollection <Picture>();
                foreach (var apic in _id3v2.PictureList)
                {
                    pictureCollection.Add(new Picture(apic));
                }
                PictureCollection = pictureCollection;
            }

            Comment = null;
            if (_id3v2.CommentsList != null)
            {
                foreach (var item in _id3v2.CommentsList)
                {
                    if (item.Description != "iTunNORM")
                    {
                        Comment = item.Value;
                        break;
                    }
                }
            }

            PlayLength    = audioFile.TotalSeconds;
            Bitrate       = audioFile.Bitrate;
            EncoderPreset = string.Format("{0} {1}", lameTagReader.LameTagInfoEncoder, lameTagReader.UsePresetGuess == UsePresetGuess.NotNeeded ? lameTagReader.Preset : lameTagReader.PresetGuess);

            CanSave = true;
        }
Ejemplo n.º 4
0
 public static void Setup()
 {
     Ruby.Initialize();
     RPG.Create();
     Table.Create();
     Tone.Create();
     Map.Create();
     AudioFile.Create();
     Event.Create();
     Page.Create();
     EventCommand.Create();
     Condition.Create();
     MoveRoute.Create();
     MoveCommand.Create();
     Graphic.Create();
     MapInfo.Create();
     Tileset.Create();
 }
Ejemplo n.º 5
0
        public static bool Convert(string input, string output, AudioFormatType targetFormat, AudioFileType containerType, Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality quality)
        {
            CFUrl           source       = CFUrl.FromFile(input);
            CFUrl           dest         = CFUrl.FromFile(output);
            var             dstFormat    = new AudioStreamBasicDescription();
            var             sourceFile   = AudioFile.Open(source, AudioFilePermission.Read);
            AudioFormatType outputFormat = targetFormat;
            // get the source data format
            var srcFormat        = (AudioStreamBasicDescription)sourceFile.DataFormat;
            var outputSampleRate = 0;

            switch (quality)
            {
            case Microsoft.Xna.Framework.Content.Pipeline.Audio.ConversionQuality.Low:
                outputSampleRate = (int)Math.Max(8000, srcFormat.SampleRate / 2);
                break;

            default:
                outputSampleRate = (int)Math.Max(8000, srcFormat.SampleRate);
                break;
            }

            dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate);             // set sample rate
            if (outputFormat == AudioFormatType.LinearPCM)
            {
                // if the output format is PC create a 16-bit int PCM file format description as an example
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame);                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var fie = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                if (fie != AudioFormatError.None)
                {
                    return(false);
                }
            }

            var converter = AudioConverter.Create(srcFormat, dstFormat);

            converter.InputData += HandleInputData;

            // if the source has a cookie, get it and set it on the Audio Converter
            ReadCookie(sourceFile, converter);

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // if encoding to AAC set the bitrate to 192k which is a nice value for this demo
            // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
            if (dstFormat.Format == AudioFormatType.MPEG4AAC)
            {
                uint outputBitRate = 192000;                 // 192k

                // ignore errors as setting may be invalid depending on format specifics such as samplerate
                try {
                    converter.EncodeBitRate = outputBitRate;
                } catch {
                }

                // get it back and print it out
                outputBitRate = converter.EncodeBitRate;
            }

            // create the destination file
            var destinationFile = AudioFile.Create(dest, containerType, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio            = new AudioFileIO(32768);
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket;       // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32768;
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription[] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;
            }
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            // write destination channel layout
            if (srcFormat.ChannelsPerFrame > 2)
            {
                WriteDestinationChannelLayout(converter, sourceFile, destinationFile);
            }

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);
            bool         error             = false;

            // loop to convert data
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                if (fe != AudioConverterError.None)
                {
                    error = true;
                    break;
                }

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                if (we != 0)
                {
                    error = true;
                    break;
                }

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.FramesPerPacket != 0)
                {
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
                }
                else
                {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (var i = 0; i < ioOutputDataPackets; ++i)
                    {
                        totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
                    }
                }
            }

            Marshal.FreeHGlobal(outputBuffer);

            if (!error)
            {
                // write out any of the leading and trailing frames for compressed formats only
                if (dstFormat.BitsPerChannel == 0)
                {
                    // our output frame count should jive with
                    WritePacketTableInfo(converter, destinationFile);
                }

                // write the cookie again - sometimes codecs will update cookies at the end of a conversion
                WriteCookie(converter, destinationFile);
            }

            converter.Dispose();
            destinationFile.Dispose();
            sourceFile.Dispose();

            return(true);
        }
Ejemplo n.º 6
0
        static void Main(string[] args)
        {
            string fileName = GetFileName(args);

            if (fileName == null)
            {
                return;
            }

            Console.WriteLine();
            Console.WriteLine(string.Format("File: {0}", fileName));
            Console.WriteLine();

            IAudioFile audioFile = AudioFile.Create(fileName, true);

            Console.WriteLine("Audio Info");
            Console.WriteLine();
            Console.WriteLine(string.Format("Type:      {0}", EnumUtils.GetDescription(audioFile.FileType)));
            Console.WriteLine(string.Format("Length:    {0}:{1:00}", (int)audioFile.TotalSeconds / 60, (int)audioFile.TotalSeconds % 60));
            Console.WriteLine(string.Format("Bitrate:   {0:#,0} kbps", (int)audioFile.Bitrate));
            Console.WriteLine(string.Format("Frequency: {0:#,0} Hz", audioFile.Frequency));
            Console.WriteLine(string.Format("Channels:  {0}", audioFile.Channels));
            Console.WriteLine();

            if (ID3v2Tag.DoesTagExist(fileName))
            {
                IID3v2Tag id3v2 = new ID3v2Tag(fileName);

                Console.WriteLine(EnumUtils.GetDescription(id3v2.Header.TagVersion));
                Console.WriteLine();

                Console.WriteLine(string.Format("Artist:    {0}", id3v2.Artist));
                Console.WriteLine(string.Format("Title:     {0}", id3v2.Title));
                Console.WriteLine(string.Format("Album:     {0}", id3v2.Album));
                Console.WriteLine(string.Format("Year:      {0}", id3v2.Year));
                Console.WriteLine(string.Format("Track:     {0}", id3v2.TrackNumber));
                Console.WriteLine(string.Format("Genre:     {0}", id3v2.Genre));
                Console.WriteLine(string.Format("Pictures:  {0}", id3v2.PictureList.Count));
                Console.WriteLine(string.Format("Comments:  {0}", id3v2.CommentsList.Count));
                Console.WriteLine();

                // Example of saving an ID3v2 tag
                //
                // id3v2.Title = "New song title";
                // id3v2.Save(fileName);
            }

            if (ID3v1Tag.DoesTagExist(fileName))
            {
                IID3v1Tag id3v1 = new ID3v1Tag(fileName);

                Console.WriteLine(EnumUtils.GetDescription(id3v1.TagVersion));
                Console.WriteLine();

                Console.WriteLine(string.Format("Artist:    {0}", id3v1.Artist));
                Console.WriteLine(string.Format("Title:     {0}", id3v1.Title));
                Console.WriteLine(string.Format("Album:     {0}", id3v1.Album));
                Console.WriteLine(string.Format("Year:      {0}", id3v1.Year));
                Console.WriteLine(string.Format("Comment:   {0}", id3v1.Comment));
                Console.WriteLine(string.Format("Track:     {0}", id3v1.TrackNumber));
                Console.WriteLine(string.Format("Genre:     {0}", GenreHelper.GenreByIndex[id3v1.GenreIndex]));
                Console.WriteLine();

                // Example of saving an ID3v1 tag
                //
                // id3v1.Title = "New song title";
                // id3v1.Save(fileName);
            }

            if (audioFile.FileType == AudioFileType.Flac)
            {
                VorbisComment vorbis = new VorbisComment(fileName);

                Console.WriteLine("Vorbis Comment");
                Console.WriteLine();

                Console.WriteLine(string.Format("Artist:    {0}", vorbis.Artist));
                Console.WriteLine(string.Format("Title:     {0}", vorbis.Title));
                Console.WriteLine(string.Format("Album:     {0}", vorbis.Album));
                Console.WriteLine(string.Format("Year:      {0}", vorbis.Year));
                Console.WriteLine(string.Format("Comment:   {0}", vorbis.Comment));
                Console.WriteLine(string.Format("Track:     {0}", vorbis.TrackNumber));
                Console.WriteLine(string.Format("Genre:     {0}", vorbis.Genre));
                Console.WriteLine(string.Format("Vendor:    {0}", vorbis.Vendor));
                Console.WriteLine();

                // Example of saving a Vorbis Comment
                //
                // vorbis.Title = "New song title";
                // vorbis.Save(fileName);
            }
        }
Ejemplo n.º 7
0
        bool DoConvertFile(CFUrl sourceURL, NSUrl destinationURL, AudioFormatType outputFormat, double outputSampleRate)
        {
            AudioStreamBasicDescription dstFormat = new AudioStreamBasicDescription();

            // in this sample we should never be on the main thread here
            Debug.Assert(!NSThread.IsMain);

            // transition thread state to State::Running before continuing
            AppDelegate.ThreadStateSetRunning();

            Debug.WriteLine("DoConvertFile");

            // get the source file
            var sourceFile = AudioFile.Open(sourceURL, AudioFilePermission.Read);

            // get the source data format
            var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;

            // setup the output file format
            dstFormat.SampleRate = (outputSampleRate == 0 ? srcFormat.SampleRate : outputSampleRate);             // set sample rate
            if (outputFormat == AudioFormatType.LinearPCM)
            {
                // if the output format is PC create a 16-bit int PCM file format description as an example
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormat;
                dstFormat.ChannelsPerFrame = (outputFormat == AudioFormatType.iLBC ? 1 : srcFormat.ChannelsPerFrame);                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var fie = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                if (fie != AudioFormatError.None)
                {
                    Debug.Print("Cannot create destination format {0:x}", fie);

                    AppDelegate.ThreadStateSetDone();
                    return(false);
                }
            }

            // create the AudioConverter
            AudioConverterError ce;
            var converter = AudioConverter.Create(srcFormat, dstFormat, out ce);

            Debug.Assert(ce == AudioConverterError.None);

            converter.InputData += EncoderDataProc;

            // if the source has a cookie, get it and set it on the Audio Converter
            ReadCookie(sourceFile, converter);

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // if encoding to AAC set the bitrate to 192k which is a nice value for this demo
            // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
            if (dstFormat.Format == AudioFormatType.MPEG4AAC)
            {
                uint outputBitRate = 192000;                 // 192k

                // ignore errors as setting may be invalid depending on format specifics such as samplerate
                try {
                    converter.EncodeBitRate = outputBitRate;
                } catch {
                }

                // get it back and print it out
                outputBitRate = converter.EncodeBitRate;
                Debug.Print("AAC Encode Bitrate: {0}", outputBitRate);
            }

            // can the Audio Converter resume conversion after an interruption?
            // this property may be queried at any time after construction of the Audio Converter after setting its output format
            // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
            // construction time since it means less code to execute during or after interruption time
            bool canResumeFromInterruption;

            try {
                canResumeFromInterruption = converter.CanResumeFromInterruption;
                Debug.Print("Audio Converter {0} continue after interruption!", canResumeFromInterruption ? "CAN" : "CANNOT");
            } catch (Exception e) {
                // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
                // then the codec being used is not a hardware codec so we're not concerned about codec state
                // we are always going to be able to resume conversion after an interruption

                canResumeFromInterruption = false;
                Debug.Print("CanResumeFromInterruption: {0}", e.Message);
            }

            // create the destination file
            var destinationFile = AudioFile.Create(destinationURL, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio            = new AudioFileIO(32768);
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket;       // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32768;
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription[] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            }
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            // write destination channel layout
            if (srcFormat.ChannelsPerFrame > 2)
            {
                WriteDestinationChannelLayout(converter, sourceFile, destinationFile);
            }

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);
            bool         error             = false;

            // loop to convert data
            Debug.WriteLine("Converting...");
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // this will block if we're interrupted
                var wasInterrupted = AppDelegate.ThreadStatePausedCheck();

                if (wasInterrupted && !canResumeFromInterruption)
                {
                    // this is our interruption termination condition
                    // an interruption has occured but the Audio Converter cannot continue
                    Debug.WriteLine("Cannot resume from interruption");
                    error = true;
                    break;
                }

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                if (fe != AudioConverterError.None)
                {
                    Debug.Print("FillComplexBuffer: {0}", fe);
                    error = true;
                    break;
                }

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                if (we != 0)
                {
                    Debug.Print("WritePackets: {0}", we);
                    error = true;
                    break;
                }

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.FramesPerPacket != 0)
                {
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
                }
                else
                {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (var i = 0; i < ioOutputDataPackets; ++i)
                    {
                        totalOutputFrames += outputPacketDescriptions [i].VariableFramesInPacket;
                    }
                }
            }

            Marshal.FreeHGlobal(outputBuffer);

            if (!error)
            {
                // write out any of the leading and trailing frames for compressed formats only
                if (dstFormat.BitsPerChannel == 0)
                {
                    // our output frame count should jive with
                    Debug.Print("Total number of output frames counted: {0}", totalOutputFrames);
                    WritePacketTableInfo(converter, destinationFile);
                }

                // write the cookie again - sometimes codecs will update cookies at the end of a conversion
                WriteCookie(converter, destinationFile);
            }

            converter.Dispose();
            destinationFile.Dispose();
            sourceFile.Dispose();

            // transition thread state to State.Done before continuing
            AppDelegate.ThreadStateSetDone();

            return(!error);
        }
Ejemplo n.º 8
0
        /// <summary>
        /// Loads the track details from the tags in the associate mp3
        /// </summary>
        /// <param name="track">The track to load the details of.</param>
        /// <param name="updateLength">if set to <c>true</c> [update length].</param>
        public static void LoadTrack(Track track, bool updateLength = true)
        {
            if (!File.Exists(track.Filename))
            {
                return;
            }
            if (!track.Filename.ToLower().EndsWith(".mp3"))
            {
                return;
            }

            DebugHelper.WriteLine("Library - LoadTrack - " + track.Description);

            GuessTrackDetailsFromFileName(track);

            var dateLastModified = GetTrackLastModified(track.Filename);

            track.LastModified = dateLastModified;

            if (ID3v2Tag.DoesTagExist(track.Filename))
            {
                var tags = new ID3v2Tag(track.Filename);

                if (!string.IsNullOrEmpty(tags.Artist))
                {
                    track.Artist = tags.Artist.Trim();
                }
                if (!string.IsNullOrEmpty(tags.Artist))
                {
                    track.AlbumArtist = tags.Artist.Trim();
                }
                if (!string.IsNullOrEmpty(tags.Title))
                {
                    track.Title = tags.Title.Trim();
                }
                if (!string.IsNullOrEmpty(tags.Album))
                {
                    track.Album = tags.Album.Trim();
                }
                if (!string.IsNullOrEmpty(tags.Genre))
                {
                    track.Genre = tags.Genre.Trim();
                }
                if (!string.IsNullOrEmpty(tags.InitialKey))
                {
                    var tagKey = tags.InitialKey.Trim();
                    track.Key = KeyHelper.ParseKey(tagKey);
                }

                LoadArtistAndAlbumArtist(track);

                if (tags.LengthMilliseconds.HasValue)
                {
                    track.Length = (decimal)tags.LengthMilliseconds / 1000M;
                }

                decimal bpm;
                if (decimal.TryParse(tags.BPM, out bpm))
                {
                    track.Bpm = bpm;
                }

                track.Bpm      = BpmHelper.NormaliseBpm(track.Bpm);
                track.EndBpm   = track.Bpm;
                track.StartBpm = track.Bpm;
                track.Bpm      = BpmHelper.GetAdjustedBpmAverage(track.StartBpm, track.EndBpm);

                int trackNumber;
                var trackNumberTag = (tags.TrackNumber + "/").Split('/')[0].Trim();
                if (int.TryParse(trackNumberTag, out trackNumber))
                {
                    track.TrackNumber = trackNumber;
                }

                if (GenreCode.IsGenreCode(track.Genre))
                {
                    track.Genre = GenreCode.GetGenre(track.Genre);
                }
                if (track.Artist == "")
                {
                    track.Artist = NoValue;
                }
                if (track.AlbumArtist == "")
                {
                    track.AlbumArtist = NoValue;
                }
                if (track.Title == "")
                {
                    track.Title = NoValue;
                }
                if (track.Album == "")
                {
                    track.Album = NoValue;
                }
                if (track.Genre == "")
                {
                    track.Genre = NoValue;
                }
            }

            track.OriginalDescription = track.Description;
            track.FullLength          = track.Length;


            var audioFile = AudioFile.Create(track.Filename, true);

            track.Bitrate = audioFile.Bitrate;
            track.Length  = audioFile.TotalSeconds;

            if (updateLength)
            {
                UpdateLength(track);
            }

            //UpdateKey(track);

            track.Bpm = BpmHelper.GetAdjustedBpmAverage(track.StartBpm, track.EndBpm);

            track.OriginalDescription = track.Description;

            if (track.EndBpm == 0 || track.EndBpm == 100)
            {
                track.EndBpm = track.Bpm;
            }
            if (track.StartBpm == 0 || track.StartBpm == 100)
            {
                track.StartBpm = track.Bpm;
            }
        }
Ejemplo n.º 9
0
        private OperationResult <AudioMetaData> MetaDataForFileFromIdSharp(string fileName)
        {
            var sw = new Stopwatch();

            sw.Start();
            AudioMetaData result    = new AudioMetaData();
            var           isSuccess = false;

            try
            {
                result.Filename = fileName;
                IAudioFile audioFile = AudioFile.Create(fileName, true);
                if (ID3v2Tag.DoesTagExist(fileName))
                {
                    IID3v2Tag id3v2 = new ID3v2Tag(fileName);
                    result.Release         = id3v2.Album;
                    result.Artist          = id3v2.AlbumArtist ?? id3v2.Artist;
                    result.ArtistRaw       = id3v2.AlbumArtist ?? id3v2.Artist;
                    result.Genres          = id3v2.Genre?.Split(new char[] { ',', '\\' });
                    result.TrackArtist     = id3v2.OriginalArtist ?? id3v2.Artist ?? id3v2.AlbumArtist;
                    result.TrackArtistRaw  = id3v2.OriginalArtist;
                    result.AudioBitrate    = (int?)audioFile.Bitrate;
                    result.AudioChannels   = audioFile.Channels;
                    result.AudioSampleRate = (int)audioFile.Bitrate;
                    result.Disk            = ID3TagsHelper.ParseDiscNumber(id3v2.DiscNumber);
                    result.DiskSubTitle    = id3v2.SetSubtitle;
                    result.Images          = id3v2.PictureList?.Select(x => new AudioMetaDataImage
                    {
                        Data        = x.PictureData,
                        Description = x.Description,
                        MimeType    = x.MimeType,
                        Type        = (AudioMetaDataImageType)x.PictureType
                    }).ToArray();
                    result.Time              = audioFile.TotalSeconds > 0 ? ((decimal?)audioFile.TotalSeconds).ToTimeSpan() : null;
                    result.Title             = id3v2.Title.ToTitleCase(false);
                    result.TrackNumber       = ID3TagsHelper.ParseTrackNumber(id3v2.TrackNumber);
                    result.TotalTrackNumbers = ID3TagsHelper.ParseTotalTrackNumber(id3v2.TrackNumber);
                    var year = id3v2.Year ?? id3v2.RecordingTimestamp ?? id3v2.ReleaseTimestamp ?? id3v2.OriginalReleaseTimestamp;
                    result.Year = ID3TagsHelper.ParseYear(year);
                    isSuccess   = result.IsValid;
                }

                if (!isSuccess)
                {
                    if (ID3v1Tag.DoesTagExist(fileName))
                    {
                        IID3v1Tag id3v1 = new ID3v1Tag(fileName);
                        result.Release         = id3v1.Album;
                        result.Artist          = id3v1.Artist;
                        result.ArtistRaw       = id3v1.Artist;
                        result.AudioBitrate    = (int?)audioFile.Bitrate;
                        result.AudioChannels   = audioFile.Channels;
                        result.AudioSampleRate = (int)audioFile.Bitrate;
                        result.Time            = audioFile.TotalSeconds > 0 ? ((decimal?)audioFile.TotalSeconds).ToTimeSpan() : null;
                        result.Title           = id3v1.Title.ToTitleCase(false);
                        result.TrackNumber     = SafeParser.ToNumber <short?>(id3v1.TrackNumber);
                        var date = SafeParser.ToDateTime(id3v1.Year);
                        result.Year = date?.Year ?? SafeParser.ToNumber <int?>(id3v1.Year);
                        isSuccess   = result.IsValid;
                    }
                }
            }
            catch (Exception ex)
            {
                this.Logger.LogError(ex, "MetaDataForFileFromTagLib Filename [" + fileName + "] Error [" + ex.Serialize() + "]");
            }
            sw.Stop();
            return(new OperationResult <AudioMetaData>
            {
                IsSuccess = isSuccess,
                OperationTime = sw.ElapsedMilliseconds,
                Data = result
            });
        }
Ejemplo n.º 10
0
        public AlbumTrack(string path)
        {
            if (string.IsNullOrWhiteSpace(path))
            {
                throw new ArgumentNullException("path");
            }

            _path = path;

            if (ID3v2Tag.DoesTagExist(path))
            {
                ID3v2Tag id3v2 = new ID3v2Tag(path);
                DiscNumber  = id3v2.DiscNumber;
                TrackNumber = id3v2.TrackNumber;
                Artist      = id3v2.Artist;
                Title       = id3v2.Title;
                ReleaseDate = id3v2.Year;
                Album       = id3v2.Album;
                Genre       = id3v2.Genre;
                if (id3v2.PictureList != null && id3v2.PictureList.Count == 1)
                {
                    Picture = id3v2.PictureList[0];
                }
            }

            if (ID3v1Tag.DoesTagExist(path))
            {
                ID3v1Tag id3v1 = new ID3v1Tag(path);
                if (string.IsNullOrWhiteSpace(TrackNumber))
                {
                    TrackNumber = string.Format("{0}", id3v1.TrackNumber);
                }
                if (string.IsNullOrWhiteSpace(Artist))
                {
                    Artist = id3v1.Artist;
                }
                if (string.IsNullOrWhiteSpace(Title))
                {
                    Title = id3v1.Title;
                }
                if (string.IsNullOrWhiteSpace(ReleaseDate))
                {
                    ReleaseDate = id3v1.Year;
                }
                if (string.IsNullOrWhiteSpace(Album))
                {
                    Album = id3v1.Album;
                }
                if (string.IsNullOrWhiteSpace(Genre))
                {
                    Genre = GenreHelper.GenreByIndex[id3v1.GenreIndex];
                }
            }

            IAudioFile audioFile = AudioFile.Create(_path, throwExceptionIfUnknown: true);

            Bitrate      = audioFile.Bitrate;
            TotalSeconds = audioFile.TotalSeconds;

            // TODO: APE, Lyrics3

            // TODO: When no tags, try to guess from path and file names
            // TODO: Parse Tracks for TotalTracks
            // TODO: Parse Disc for TotalDiscs

            // Parse track # from TrackNumber including total tracks
            if (!string.IsNullOrWhiteSpace(TrackNumber))
            {
                if (TrackNumber.Contains('/'))
                {
                    TrackNumber = TrackNumber.Split(new[] { '/' }, StringSplitOptions.RemoveEmptyEntries)[0];
                    // TODO: Set total tracks?
                }
            }

            // Parse disc # from DiscNumber including total discs
            if (!string.IsNullOrWhiteSpace(DiscNumber))
            {
                if (DiscNumber.Contains('/'))
                {
                    DiscNumber = DiscNumber.Split(new[] { '/' }, StringSplitOptions.RemoveEmptyEntries)[0];
                    // TODO: Set total discs?
                }
            }
            else
            {
                DiscNumber = "1";
            }
        }
Ejemplo n.º 11
0
        void Convert(string sourceFilePath, string destinationFilePath, AudioFormatType outputFormatType, int?sampleRate = null)
        {
            var destinationUrl = NSUrl.FromFilename(destinationFilePath);
            var sourceUrl      = NSUrl.FromFilename(sourceFilePath);

            // get the source file
            var name = Path.GetFileNameWithoutExtension(destinationFilePath);

            using var sourceFile = AudioFile.Open(sourceUrl, AudioFilePermission.Read);

            var srcFormat = (AudioStreamBasicDescription)sourceFile.DataFormat;
            var dstFormat = new AudioStreamBasicDescription();

            // setup the output file format
            dstFormat.SampleRate = sampleRate ?? srcFormat.SampleRate;
            if (outputFormatType == AudioFormatType.LinearPCM)
            {
                // if the output format is PCM create a 16 - bit int PCM file format
                dstFormat.Format           = AudioFormatType.LinearPCM;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;
                dstFormat.BitsPerChannel   = 16;
                dstFormat.BytesPerPacket   = dstFormat.BytesPerFrame = 2 * dstFormat.ChannelsPerFrame;
                dstFormat.FramesPerPacket  = 1;
                dstFormat.FormatFlags      = AudioFormatFlags.LinearPCMIsPacked | AudioFormatFlags.LinearPCMIsSignedInteger;
            }
            else
            {
                // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
                dstFormat.Format           = outputFormatType;
                dstFormat.ChannelsPerFrame = srcFormat.ChannelsPerFrame;                 // for iLBC num channels must be 1

                // use AudioFormat API to fill out the rest of the description
                var afe = AudioStreamBasicDescription.GetFormatInfo(ref dstFormat);
                Assert.AreEqual(AudioFormatError.None, afe, $"GetFormatInfo: {name}");
            }

            // create the AudioConverter
            using var converter = AudioConverter.Create(srcFormat, dstFormat, out var ce);
            Assert.AreEqual(AudioConverterError.None, ce, $"AudioConverterCreate: {name}");

            // set up source buffers and data proc info struct
            var afio = new AudioFileIO(32 * 1024);              // 32Kb

            converter.InputData += (ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription [] dataPacketDescription) => {
                return(EncoderDataProc(afio, ref numberDataPackets, data, ref dataPacketDescription));
            };

            // Some audio formats have a magic cookie associated with them which is required to decompress audio data
            // When converting audio data you must check to see if the format of the data has a magic cookie
            // If the audio data format has a magic cookie associated with it, you must add this information to anAudio Converter
            // using AudioConverterSetProperty and kAudioConverterDecompressionMagicCookie to appropriately decompress the data
            // http://developer.apple.com/mac/library/qa/qa2001/qa1318.html
            var cookie = sourceFile.MagicCookie;

            // if there is an error here, then the format doesn't have a cookie - this is perfectly fine as some formats do not
            if (cookie?.Length > 0)
            {
                converter.DecompressionMagicCookie = cookie;
            }

            // get the actual formats back from the Audio Converter
            srcFormat = converter.CurrentInputStreamDescription;
            dstFormat = converter.CurrentOutputStreamDescription;

            // create the destination file
            using var destinationFile = AudioFile.Create(destinationUrl, AudioFileType.CAF, dstFormat, AudioFileFlags.EraseFlags);

            // set up source buffers and data proc info struct
            afio.SourceFile = sourceFile;
            afio.SrcFormat  = srcFormat;

            if (srcFormat.BytesPerPacket == 0)
            {
                // if the source format is VBR, we need to get the maximum packet size
                // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
                // in the file (without actually scanning the whole file to find the largest packet,
                // as may happen with kAudioFilePropertyMaximumPacketSize)
                afio.SrcSizePerPacket = sourceFile.PacketSizeUpperBound;

                // how many packets can we read for our buffer size?
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                afio.PacketDescriptions = new AudioStreamPacketDescription [afio.NumPacketsPerRead];
            }
            else
            {
                // CBR source format
                afio.SrcSizePerPacket  = srcFormat.BytesPerPacket;
                afio.NumPacketsPerRead = afio.SrcBufferSize / afio.SrcSizePerPacket;
            }

            // set up output buffers
            int       outputSizePerPacket = dstFormat.BytesPerPacket; // this will be non-zero if the format is CBR
            const int theOutputBufSize    = 32 * 1024;                // 32Kb
            var       outputBuffer        = Marshal.AllocHGlobal(theOutputBufSize);

            AudioStreamPacketDescription [] outputPacketDescriptions = null;

            if (outputSizePerPacket == 0)
            {
                // if the destination format is VBR, we need to get max size per packet from the converter
                outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

                // allocate memory for the PacketDescription structures describing the layout of each packet
                outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
            }
            int numOutputPackets = theOutputBufSize / outputSizePerPacket;

            // if the destination format has a cookie, get it and set it on the output file
            WriteCookie(converter, destinationFile);

            long         totalOutputFrames = 0;     // used for debugging
            long         outputFilePos     = 0;
            AudioBuffers fillBufList       = new AudioBuffers(1);

            // loop to convert data
            while (true)
            {
                // set up output buffer list
                fillBufList [0] = new AudioBuffer()
                {
                    NumberChannels = dstFormat.ChannelsPerFrame,
                    DataByteSize   = theOutputBufSize,
                    Data           = outputBuffer
                };

                // convert data
                int ioOutputDataPackets = numOutputPackets;
                var fe = converter.FillComplexBuffer(ref ioOutputDataPackets, fillBufList, outputPacketDescriptions);
                // if interrupted in the process of the conversion call, we must handle the error appropriately
                Assert.AreEqual(AudioConverterError.None, fe, $"FillComplexBuffer: {name}");

                if (ioOutputDataPackets == 0)
                {
                    // this is the EOF conditon
                    break;
                }

                // write to output file
                var inNumBytes = fillBufList [0].DataByteSize;

                var we = destinationFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePos, ref ioOutputDataPackets, outputBuffer);
                Assert.AreEqual(AudioFileError.Success, we, $"WritePackets: {name}");

                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                // the format has constant frames per packet
                totalOutputFrames += (ioOutputDataPackets * dstFormat.FramesPerPacket);
            }

            Marshal.FreeHGlobal(outputBuffer);

            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.BitsPerChannel == 0)
            {
                WritePacketTableInfo(converter, destinationFile);
            }

            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFile);
        }