예제 #1
0
        /// <summary>
        /// Loads the audio stream from the given byte array. If the AudioFileStream does not return an Ok status
        /// then a ContentLoadException is thrown.
        /// </summary>
        /// <param name="audiodata">The full byte array of the audio stream.</param>

        void LoadAudioStream(byte[] audiodata)
        {
            AudioFileStream afs = new AudioFileStream(AudioFileType.WAVE);

            //long pac = afs.DataPacketCount;
            afs.ParseBytes(audiodata, false);  // AudioFileStreamStatus status
            AudioStreamBasicDescription asbd = afs.StreamBasicDescription;

            Rate = (float)asbd.SampleRate;
            Size = (int)afs.DataByteCount;

            if (asbd.ChannelsPerFrame == 1)
            {
                Format = asbd.BitsPerChannel == 8 ? ALFormat.Mono8 : ALFormat.Mono16;
            }
            else
            {
                Format = asbd.BitsPerChannel == 8 ? ALFormat.Stereo8 : ALFormat.Stereo16;
            }

            byte [] d = new byte[afs.DataByteCount];
            Array.Copy(audiodata, afs.DataOffset, d, 0, afs.DataByteCount);

            _data = d;

            var _dblDuration = (Size / ((asbd.BitsPerChannel / 8) * ((asbd.ChannelsPerFrame == 0) ? 1 : asbd.ChannelsPerFrame))) / asbd.SampleRate;

            _duration = TimeSpan.FromSeconds(_dblDuration);

            afs.Close();
            //if(status != AudioFileStreamStatus.Ok) {
            //    throw new Content.ContentLoadException("Could not load audio data. The status code was " + status);
            //}
        }
예제 #2
0
        public static ExtAudioFile GetExtAudioFile(NSUrl url, out AudioStreamBasicDescription audioDescription)
        {
            // Notice the following line that we can not pass a NSUrl to a CFUrl
            //ExtAudioFile ext = ExtAudioFile.OpenUrl(url);

            // Basic Descriptions
            AudioStreamBasicDescription fileFormat;
            AudioStreamBasicDescription outputFormat;

            // So now we create a CFUrl
            CFUrl curl = CFUrl.FromFile(url.Path);

            // Open the file
            ExtAudioFile ext = ExtAudioFile.OpenUrl(curl);

            // Get the audio format
            fileFormat = ext.FileDataFormat;

            // Don't know how to handle sounds with more than 2 channels (i.e. stereo)
            // Remember that OpenAL sound effects must be mono to be spatialized anyway.
            if (fileFormat.ChannelsPerFrame > 2)
            {
#if DEBUG
                Console.WriteLine("Unsupported Format: Channel count [0] is greater than stereo.", fileFormat.ChannelsPerFrame);
#endif
                audioDescription = new AudioStreamBasicDescription();
                return(null);
            }

            // The output format must be linear PCM because that's the only type OpenAL knows how to deal with.
            // Set the client format to 16 bit signed integer (native-endian) data because that is the most
            // optimal format on iPhone/iPod Touch hardware.
            // Maintain the channel count and sample rate of the original source format.
            outputFormat                  = new AudioStreamBasicDescription();  // Create our output format description to be converted to
            outputFormat.SampleRate       = fileFormat.SampleRate;              // Preserve the original sample rate
            outputFormat.ChannelsPerFrame = fileFormat.ChannelsPerFrame;        // Preserve the orignal number of channels
            outputFormat.Format           = AudioFormatType.LinearPCM;          // We want Linear PCM

            // IsBigEndian is causing some problems with distorted sounds on MacOSX
//			outputFormat.FormatFlags = AudioFormatFlags.IsBigEndian
//							| AudioFormatFlags.IsPacked
//							| AudioFormatFlags.IsSignedInteger;

            outputFormat.FormatFlags = AudioFormatFlags.IsPacked
                                       | AudioFormatFlags.IsSignedInteger;
            outputFormat.FramesPerPacket = 1;                                 // We know for linear PCM, the definition is 1 frame per packet
            outputFormat.BitsPerChannel  = 16;                                // We know we want 16-bit
            outputFormat.BytesPerPacket  = 2 * outputFormat.ChannelsPerFrame; // We know we are using 16-bit, so 2-bytes per channel per frame
            outputFormat.BytesPerFrame   = 2 * outputFormat.ChannelsPerFrame; // For PCM, since 1 frame is 1 packet, it is the same as mBytesPerPacket

            // Set the desired client (output) data format
            ext.ClientDataFormat = outputFormat;

            // Copy the output format to the audio description that was passed in so the
            // info will be returned to the user.
            audioDescription = outputFormat;

            return(ext);
        }
예제 #3
0
        void HandlePacketDecoded(object sender, PacketReceivedEventArgs e)
        {
            AudioFileStream afs = (AudioFileStream)sender;

            byte[] audioData = new byte[e.Bytes];
            Marshal.Copy(e.InputData, audioData, 0, e.Bytes);
            //Console.WriteLine ("Packet decoded ");
            AudioStreamBasicDescription asbd = afs.StreamBasicDescription;

            Rate = (float)asbd.SampleRate;
            Size = e.Bytes;

            if (asbd.ChannelsPerFrame == 1)
            {
                if (asbd.BitsPerChannel == 8)
                {
                    Format = ALFormat.Mono8;
                }
                else if (asbd.BitsPerChannel == 0)                 // This shouldn't happen. hackking around bad data for now.
                {
                    //TODO: Remove this when sound's been fixed on iOS and other devices.
                    Format = ALFormat.Mono16;
                    Debug.WriteLine("Warning, bad decoded audio packet in SoundEffect.HandlePacketDecoded. Squelching sound.");
                    _duration = TimeSpan.Zero;
                    _data     = audioData;
                    return;
                }
                else
                {
                    Format = ALFormat.Mono16;
                }
            }
            else
            {
                if (asbd.BitsPerChannel == 8)
                {
                    Format = ALFormat.Stereo8;
                }
                else
                {
                    Format = ALFormat.Stereo16;
                }
            }
            _data = audioData;


            var _dblDuration = (e.Bytes / ((asbd.BitsPerChannel / 8) * asbd.ChannelsPerFrame)) / asbd.SampleRate;

            _duration = TimeSpan.FromSeconds(_dblDuration);
//			Console.WriteLine ("From Data: " + _name + " - " + Format + " = " + Rate + " / " + Size + " -- "  + Duration);
//			Console.WriteLine("Duration: " + _dblDuration
//			                        + " / size: " + e.Bytes
//			                        + " bits: " + asbd.BitsPerChannel
//			                        + " channels: " + asbd.ChannelsPerFrame
//			                        + " rate: " + asbd.SampleRate);
        }
예제 #4
0
        void HandlePacketDecoded(object sender, PacketReceivedEventArgs e)
        {
            AudioFileStream afs = (AudioFileStream)sender;

            byte[] audioData = new byte[e.Bytes];
            Marshal.Copy(e.InputData, audioData, 0, e.Bytes);
            //Console.WriteLine ("Packet decoded ");
            AudioStreamBasicDescription asbd = afs.StreamBasicDescription;

            Rate = (float)asbd.SampleRate;
            Size = e.Bytes;

            if (asbd.ChannelsPerFrame == 1)
            {
                if (asbd.BitsPerChannel == 8)
                {
                    Format = ALFormat.Mono8;
                }
                else
                {
                    Format = ALFormat.Mono16;
                }
            }
            else
            {
                if (asbd.BitsPerChannel == 8)
                {
                    Format = ALFormat.Stereo8;
                }
                else
                {
                    Format = ALFormat.Stereo16;
                }
            }
            _data = audioData;

            var _dblDuration = (e.Bytes / ((asbd.BitsPerChannel / 8) * asbd.ChannelsPerFrame)) / asbd.SampleRate;

            _duration = TimeSpan.FromSeconds(_dblDuration);
//			Console.WriteLine ("From Data: " + _name + " - " + Format + " = " + Rate + " / " + Size + " -- "  + Duration);
//			Console.WriteLine("Duration: " + _dblDuration
//			                        + " / size: " + e.Bytes
//			                        + " bits: " + asbd.BitsPerChannel
//			                        + " channels: " + asbd.ChannelsPerFrame
//			                        + " rate: " + asbd.SampleRate);
        }
예제 #5
0
        internal SoundEffect(string assetName, bool isMusic)
        {
            // use of CFUrl.FromFile is necessary in case assetName contains spaces (which must be url-encoded)
            audioFile = AudioFile.Open(CFUrl.FromFile(assetName), AudioFilePermission.Read, 0);

            if (audioFile == null)
            {
                throw new Content.ContentLoadException("Could not open sound effect " + assetName);
            }

            description = audioFile.StreamBasicDescription;
            DeriveBufferSize(0.5);
            isVBR = (description.BytesPerPacket == 0 || description.FramesPerPacket == 0);

            if (!isMusic)
            {
                firstInstance = new SoundEffectInstance(this, false);
            }
        }
예제 #6
0
        public static bool GetDataFromExtAudioFile(ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize,
                                                   byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate)
        {
            uint errorStatus        = 0;
            uint bufferSizeInFrames = 0;

            dataBufferSize = 0;
            format         = ALFormat.Mono16;
            sampleRate     = 0;
            /* Compute how many frames will fit into our max buffer size */
            bufferSizeInFrames = (uint)(maxBufferSize / outputFormat.BytesPerFrame);

            if (dataBuffer != null)
            {
                var audioBufferList = new AudioBuffers(maxBufferSize);

                // This a hack so if there is a problem speak to kjpou1 -Kenneth
                // the cleanest way is to copy the buffer to the pointer already allocated
                // but what we are going to do is replace the pointer with our own and restore it later
                //
                GCHandle meBePinned  = GCHandle.Alloc(dataBuffer, GCHandleType.Pinned);
                IntPtr   meBePointer = meBePinned.AddrOfPinnedObject();

                audioBufferList.SetData(0, meBePointer);

                try {
                    // Read the data into an AudioBufferList
                    // errorStatus here returns back the amount of information read
                    ExtAudioFileError extAudioFileError = ExtAudioFileError.OK;
                    errorStatus = ext.Read(bufferSizeInFrames, audioBufferList, out extAudioFileError);
                    if (errorStatus >= 0)
                    {
                        /* Success */
                        /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */

                        /* ExtAudioFile.Read returns the number of frames actually read.
                         * Need to convert back to bytes.
                         */
                        dataBufferSize = (int)bufferSizeInFrames * outputFormat.BytesPerFrame;

                        // Now we set our format
                        format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16;

                        sampleRate = outputFormat.SampleRate;
                    }
                    else
                    {
#if DEBUG
                        Console.WriteLine("ExtAudioFile.Read failed, Error = " + errorStatus);
#endif
                        return(false);
                    }
                } catch (Exception exc) {
#if DEBUG
                    Console.WriteLine("ExtAudioFile.Read failed: " + exc.Message);
#endif
                    return(false);
                } finally {
                    // Don't forget to free our dataBuffer memory pointer that was pinned above
                    meBePinned.Free();
                    // and restore what was allocated to beginwith
                    audioBufferList.SetData(0, IntPtr.Zero);
                }
            }
            return(true);
        }
예제 #7
0
        private void PlatformLoadAudioStream(Stream s)
        {
#if OPENAL && !(MONOMAC || IOS)
            ALFormat format;
            int      size;
            int      freq;

            var stream = s;
#if ANDROID
            var needsDispose = false;
            try
            {
                // If seek is not supported (usually an indicator of a stream opened into the AssetManager), then copy
                // into a temporary MemoryStream.
                if (!s.CanSeek)
                {
                    needsDispose = true;
                    stream       = new MemoryStream();
                    s.CopyTo(stream);
                    stream.Position = 0;
                }
#endif
            _data = AudioLoader.Load(stream, out format, out size, out freq);
#if ANDROID
        }
        finally
        {
            if (needsDispose)
            {
                stream.Dispose();
            }
        }
#endif
            Format = format;
            Size   = size;
            Rate   = freq;
#endif

#if MONOMAC || IOS
            var audiodata = new byte[s.Length];
            s.Read(audiodata, 0, (int)s.Length);

            using (AudioFileStream afs = new AudioFileStream(AudioFileType.WAVE))
            {
                afs.ParseBytes(audiodata, false);
                Size = (int)afs.DataByteCount;

                _data = new byte[afs.DataByteCount];
                Array.Copy(audiodata, afs.DataOffset, _data, 0, afs.DataByteCount);

                AudioStreamBasicDescription asbd = afs.DataFormat;
                int channelsPerFrame             = asbd.ChannelsPerFrame;
                int bitsPerChannel = asbd.BitsPerChannel;

                // There is a random chance that properties asbd.ChannelsPerFrame and asbd.BitsPerChannel are invalid because of a bug in Xamarin.iOS
                // See: https://bugzilla.xamarin.com/show_bug.cgi?id=11074 (Failed to get buffer attributes error when playing sounds)
                if (channelsPerFrame <= 0 || bitsPerChannel <= 0)
                {
                    NSError err;
                    using (NSData nsData = NSData.FromArray(audiodata))
                        using (AVAudioPlayer player = AVAudioPlayer.FromData(nsData, out err))
                        {
                            channelsPerFrame = (int)player.NumberOfChannels;
                            bitsPerChannel   = player.SoundSetting.LinearPcmBitDepth.GetValueOrDefault(16);

                            Rate      = (float)player.SoundSetting.SampleRate;
                            _duration = TimeSpan.FromSeconds(player.Duration);
                        }
                }
                else
                {
                    Rate = (float)asbd.SampleRate;
                    double duration = (Size / ((bitsPerChannel / 8) * channelsPerFrame)) / asbd.SampleRate;
                    _duration = TimeSpan.FromSeconds(duration);
                }

                if (channelsPerFrame == 1)
                {
                    Format = (bitsPerChannel == 8) ? ALFormat.Mono8 : ALFormat.Mono16;
                }
                else
                {
                    Format = (bitsPerChannel == 8) ? ALFormat.Stereo8 : ALFormat.Stereo16;
                }
            }
#endif
        }
예제 #8
0
        public static bool GetDataFromExtAudioFile(ExtAudioFile ext, AudioStreamBasicDescription outputFormat, int maxBufferSize,
                                                   byte[] dataBuffer, out int dataBufferSize, out ALFormat format, out double sampleRate)
        {
            int errorStatus        = 0;
            int bufferSizeInFrames = 0;

            dataBufferSize = 0;
            format         = ALFormat.Mono16;
            sampleRate     = 0;
            /* Compute how many frames will fit into our max buffer size */
            bufferSizeInFrames = maxBufferSize / outputFormat.BytesPerFrame;

            if (dataBuffer != null)
            {
                MutableAudioBufferList audioBufferList = new MutableAudioBufferList(1, maxBufferSize);

                audioBufferList.Buffers [0].DataByteSize   = maxBufferSize;
                audioBufferList.Buffers [0].NumberChannels = outputFormat.ChannelsPerFrame;



                // This a hack so if there is a problem speak to kjpou1 -Kenneth
                // the cleanest way is to copy the buffer to the pointer already allocated
                // but what we are going to do is replace the pointer with our own and restore it later
                //
                GCHandle meBePinned  = GCHandle.Alloc(dataBuffer, GCHandleType.Pinned);
                IntPtr   meBePointer = meBePinned.AddrOfPinnedObject();

                // Let's not use copy for right now while we test this.  For very large files this
                //  might show some stutter in the sound loading
                //Marshal.Copy(dataBuffer, 0, audioBufferList.Buffers[0].Data, maxBufferSize);
                IntPtr savedDataPtr = audioBufferList.Buffers [0].Data;
                audioBufferList.Buffers [0].Data = meBePointer;


                try {
                    // Read the data into an AudioBufferList
                    // errorStatus here returns back the amount of information read
                    errorStatus = ext.Read(bufferSizeInFrames, audioBufferList);
                    if (errorStatus >= 0)
                    {
                        /* Success */
                        /* Note: 0 == bufferSizeInFrames is a legitimate value meaning we are EOF. */

                        /* ExtAudioFile.Read returns the number of frames actually read.
                         * Need to convert back to bytes.
                         */
                        dataBufferSize = bufferSizeInFrames * outputFormat.BytesPerFrame;

                        // Now we set our format
                        format = outputFormat.ChannelsPerFrame > 1 ? ALFormat.Stereo16 : ALFormat.Mono16;

                        sampleRate = outputFormat.SampleRate;
                    }
                    else
                    {
#if DEBUG
                        Console.WriteLine("ExtAudioFile.Read failed, Error = " + errorStatus);
#endif
                        return(false);
                    }
                } catch (Exception exc) {
#if DEBUG
                    Console.WriteLine("ExtAudioFile.Read failed: " + exc.Message);
#endif
                    return(false);
                } finally {
                    // Don't forget to free our dataBuffer memory pointer that was pinned above
                    meBePinned.Free();
                    // and restore what was allocated to beginwith
                    audioBufferList.Buffers[0].Data = savedDataPtr;
                }
            }
            return(true);
        }
예제 #9
0
        private void PlatformLoadAudioStream(Stream s, out TimeSpan duration)
        {
            byte[] buffer;

#if OPENAL && !(MONOMAC || IOS)
            ALFormat format;
            int      size;
            int      freq;

            var stream = s;

            buffer = AudioLoader.Load(stream, out format, out size, out freq);

            Format = format;
            Size   = size;
            Rate   = freq;

            duration = TimeSpan.FromSeconds((float)size / freq);
#endif

#if MONOMAC || IOS
            var audiodata = new byte[s.Length];
            s.Read(audiodata, 0, (int)s.Length);

            using (AudioFileStream afs = new AudioFileStream(AudioFileType.WAVE))
            {
                afs.ParseBytes(audiodata, false);
                Size = (int)afs.DataByteCount;

                buffer = new byte[afs.DataByteCount];
                Array.Copy(audiodata, afs.DataOffset, buffer, 0, afs.DataByteCount);

                AudioStreamBasicDescription asbd = afs.DataFormat;
                int channelsPerFrame             = asbd.ChannelsPerFrame;
                int bitsPerChannel = asbd.BitsPerChannel;

                // There is a random chance that properties asbd.ChannelsPerFrame and asbd.BitsPerChannel are invalid because of a bug in Xamarin.iOS
                // See: https://bugzilla.xamarin.com/show_bug.cgi?id=11074 (Failed to get buffer attributes error when playing sounds)
                if (channelsPerFrame <= 0 || bitsPerChannel <= 0)
                {
                    NSError err;
                    using (NSData nsData = NSData.FromArray(audiodata))
                        using (AVAudioPlayer player = AVAudioPlayer.FromData(nsData, out err))
                        {
                            channelsPerFrame = (int)player.NumberOfChannels;
                            bitsPerChannel   = player.SoundSetting.LinearPcmBitDepth.GetValueOrDefault(16);

                            Rate     = (float)player.SoundSetting.SampleRate;
                            duration = TimeSpan.FromSeconds(player.Duration);
                        }
                }
                else
                {
                    Rate = (float)asbd.SampleRate;
                    double durationSec = (Size / ((bitsPerChannel / 8) * channelsPerFrame)) / asbd.SampleRate;
                    duration = TimeSpan.FromSeconds(durationSec);
                }

                if (channelsPerFrame == 1)
                {
                    Format = (bitsPerChannel == 8) ? ALFormat.Mono8 : ALFormat.Mono16;
                }
                else
                {
                    Format = (bitsPerChannel == 8) ? ALFormat.Stereo8 : ALFormat.Stereo16;
                }
            }
#endif
            // bind buffer
            SoundBuffer = new OALSoundBuffer();
            SoundBuffer.BindDataBuffer(buffer, Format, Size, (int)Rate);
        }
예제 #10
0
        private void PlatformLoadAudioStream(Stream s, out TimeSpan duration)
        {
            byte[] buffer;

#if OPENAL && !(MONOMAC || IOS)
            ALFormat format;
            int      size;
            int      freq;

            var stream = s;

            buffer = AudioLoader.Load(stream, out format, out size, out freq);

            Format = format;
            Size   = size;
            Rate   = freq;

            var bytesPerSecond = freq;
            if (format == ALFormat.Mono16 || format == ALFormat.Stereo8)
            {
                bytesPerSecond *= 2;
            }
            else if (format == ALFormat.Stereo16)
            {
                bytesPerSecond *= 4;
            }

            duration = TimeSpan.FromSeconds((float)size / bytesPerSecond);
#endif

#if MONOMAC || IOS
            var audiodata = new byte[s.Length];
            s.Read(audiodata, 0, (int)s.Length);

            using (AudioFileStream afs = new AudioFileStream(AudioFileType.WAVE))
            {
                afs.ParseBytes(audiodata, false);
                Size = (int)afs.DataByteCount;

                buffer = new byte[afs.DataByteCount];
                Array.Copy(audiodata, afs.DataOffset, buffer, 0, afs.DataByteCount);

                AudioStreamBasicDescription asbd = afs.DataFormat;
                int channelsPerFrame             = asbd.ChannelsPerFrame;
                int bitsPerChannel = asbd.BitsPerChannel;

                Rate = (float)asbd.SampleRate;
                double durationSec = (Size / ((bitsPerChannel / 8) * channelsPerFrame)) / asbd.SampleRate;
                duration = TimeSpan.FromSeconds(durationSec);

                if (channelsPerFrame == 1)
                {
                    Format = (bitsPerChannel == 8) ? ALFormat.Mono8 : ALFormat.Mono16;
                }
                else
                {
                    Format = (bitsPerChannel == 8) ? ALFormat.Stereo8 : ALFormat.Stereo16;
                }
            }
#endif
            // bind buffer
            SoundBuffer = new OALSoundBuffer();
            SoundBuffer.BindDataBuffer(buffer, Format, Size, (int)Rate);
        }