예제 #1
0
 public FormatInfo(SoundType type, SoundFormat format, int channels, int bits)
 {
     Type     = type;
     Format   = format;
     Channels = channels;
     Bits     = bits;
 }
예제 #2
0
        /// <summary>
        /// Creates a new stream instance using the provided stream as a source.
        /// Will also read the first frame of the MP3 into the internal buffer.
        /// </summary>
        public MP3Stream(Stream sourceStream, int chunkSize)
        {
            IsEOF                 = false;
            _SourceStream         = sourceStream;
            _BitStream            = new Bitstream(new PushbackStream(_SourceStream, chunkSize));
            _Buffer               = new Buffer16BitStereo();
            _Decoder.OutputBuffer = _Buffer;
            // read the first frame. This will fill the initial buffer with data, and get our frequency!
            IsEOF |= !ReadFrame();
            switch (_ChannelCountRep)
            {
            case 1:
                FormatRep = SoundFormat.Pcm16BitMono;
                break;

            case 2:
                FormatRep = SoundFormat.Pcm16BitStereo;
                break;

            default:
                throw new MP3SharpException($"Unhandled channel count rep: {_ChannelCountRep} (allowed values are 1-mono and 2-stereo).");
            }
            if (FormatRep == SoundFormat.Pcm16BitMono)
            {
                _Buffer.DoubleMonoToStereo = true;
            }
        }
예제 #3
0
        private int SetupCallback(void **data, char *format, int *rate, int *channels)
        {
            IntPtr pFormat   = new IntPtr(format);
            string formatStr = Marshal.PtrToStringAnsi(pFormat);

            SoundType sType;

            if (!Enum.TryParse <SoundType>(formatStr, out sType))
            {
                ArgumentException exc = new ArgumentException("Unsupported sound type " + formatStr);
                if (m_excHandler != null)
                {
                    m_excHandler(exc);
                    return(1);
                }
                else
                {
                    throw exc;
                }
            }

            m_format = new SoundFormat(sType, *rate, *channels);
            if (m_formatSetupCB != null)
            {
                m_format = m_formatSetupCB(m_format);
            }

            Marshal.Copy(m_format.Format.ToUtf8(), 0, pFormat, 4);
            *rate     = m_format.Rate;
            *channels = m_format.Channels;

            return(m_format.UseCustomAudioRendering == true ? 0 : 1);
        }
        public static int GetChannelCountFromSoundFormat(SoundFormat format)
        {
            switch (format)
            {
            case SoundFormat.Raw:
                return(2);

            case SoundFormat.Mono:
                return(1);

            case SoundFormat.Stereo:
                return(2);

            case SoundFormat.Quad:
                return(4);

            case SoundFormat.Surround:
                return(5);

            case SoundFormat.FiveDot1:
                return(6);

            case SoundFormat.SevenDot1:
                return(8);

            default:
                throw new ArgumentException($"Invalid sound format {format}", nameof(format));
            }
        }
예제 #5
0
파일: VLCStream.cs 프로젝트: saintgene/iSpy
        private SoundFormat SoundFormatCallback(SoundFormat sf)
        {
            if (!_needsSetup)
            {
                return(sf);
            }
            _needsSetup = false;

            int chan = _realChannels = sf.Channels;

            if (chan > 1)
            {
                chan = 2;//downmix
            }
            _recordingFormat = new WaveFormat(sf.Rate, 16, chan);
            _waveProvider    = new BufferedWaveProvider(RecordingFormat);
            _sampleChannel   = new SampleChannel(_waveProvider);
            _sampleChannel.PreVolumeMeter += SampleChannelPreVolumeMeter;

            if (HasAudioStream == null)
            {
                return(sf);
            }
            HasAudioStream?.Invoke(this, EventArgs.Empty);
            HasAudioStream = null;

            return(sf);
        }
예제 #6
0
        private void view_SoundFormatTagChanged(object sender, EventArgs e)
        {
            SoundFormatTag?viewFormatTag = this.view.SoundFormatTag;

            if (viewFormatTag.HasValue)
            {
                List <SoundFormat> soundFormatList = new List <SoundFormat>();
                foreach (var format in this.formatList)
                {
                    if (format.Tag == viewFormatTag)
                    {
                        soundFormatList.Add(format);
                    }
                }
                SoundFormat    currentFormat       = this.view.SoundFormat;
                SoundFormatTag?preferredTag        = viewFormatTag;
                int?           preferredSampleRate = null;
                int?           preferredChannels   = null;
                if (currentFormat != null)
                {
                    preferredSampleRate = currentFormat.SamplesPerSecond;
                    preferredChannels   = currentFormat.Channels;
                }
                this.view.SoundFormats = soundFormatList.ToArray();
                SoundFormat suggestedFormat = SoundProvider.SuggestFormat(this.view.SoundDevice.Id, viewFormatTag,
                                                                          preferredSampleRate, preferredChannels);
                this.view.SoundFormat = suggestedFormat;
            }
            else
            {
                this.view.SoundFormats = null;
            }
        }
예제 #7
0
        private static int GetOutOfSyncSamples(SoundProvider soundProvider, int sampleIndex, TimeSpan duration,
                                               int samplesRead)
        {
            SoundFormat sourceFormat = soundProvider.SourceFormat;
            SoundFormat audioFormat  = soundProvider.Format;

            // Get number of source samples read and expected number of source
            double secondsPassed   = (double)duration.Ticks / ticksPerSec;
            int    expectedSamples = (int)(secondsPassed * sourceFormat.SamplesPerSecond);

            // Get sample ratio
            double samplesRatio = (double)sourceFormat.SamplesPerSecond / audioFormat.SamplesPerSecond;

            // Get number of samples in each packet
            int packetSamples = (int)((soundProvider.PacketLength * sourceFormat.SamplesPerSecond) / 1000.0);

            // Get total number of source samples read
            int totalSamplesRead = samplesRead + (int)(sampleIndex * samplesRatio);

            // Check if more than two packets are missing or out of sync
            if ((totalSamplesRead + 2 * packetSamples < expectedSamples) ||
                (totalSamplesRead > 2 * packetSamples + expectedSamples))
            {
                return(expectedSamples - totalSamplesRead);
            }
            // Few or zero samples are out of sync
            // We can ignore this now.
            return(0);
        }
예제 #8
0
 /// <summary>
 ///     Creates a new stream instance using the provided stream as a source.
 ///     Will also read the first frame of the MP3 into the internal buffer.
 ///     TODO: allow selecting stereo or mono in the constructor (note that this also requires "implementing" the stereo format).
 ///     UPDATE: (Giperion) I hate that TODO above
 /// </summary>
 public MP3Stream(Stream sourceStream, int chunkSize, bool IsMono)
 {
     IsEOF          = false;
     m_SourceStream = sourceStream;
     pushback       = new PushbackStream(m_SourceStream, chunkSize);
     m_BitStream    = new Bitstream(pushback);
     if (IsMono)
     {
         Decoder.Params ParamsCustom = new Decoder.Params();
         ParamsCustom.OutputChannels = OutputChannels.LEFT;
         m_Decoder         = new Decoder(ParamsCustom);
         m_Buffer          = new Buffer16BitMono();
         FormatRep         = SoundFormat.Pcm16BitMono;
         m_ChannelCountRep = 1;
     }
     else
     {
         m_Buffer          = new Buffer16BitStereo();
         FormatRep         = SoundFormat.Pcm16BitStereo;
         m_ChannelCountRep = 2;
     }
     m_Decoder.OutputBuffer = m_Buffer;
     // read the first frame. This will fill the initial buffer with data, and get our frequency!
     if (!ReadFrame())
     {
         IsEOF = true;
     }
 }
예제 #9
0
            public void SetFileFormat()             // HACK?
            {
                string PathExtension = System.IO.Path.GetExtension(this.FilePath).Substring(1);

                if (string.Equals(PathExtension, "MP3", System.StringComparison.OrdinalIgnoreCase))
                {
                    this.SndFormat = SoundFormat.MP3;
                }
                else if (string.Equals(PathExtension, "OGG", System.StringComparison.OrdinalIgnoreCase))
                {
                    this.SndFormat = SoundFormat.OGG;
                }
                else if (string.Equals(PathExtension, "WAV", System.StringComparison.OrdinalIgnoreCase) || string.Equals(PathExtension, "WAVE", System.StringComparison.OrdinalIgnoreCase))
                {
                    this.SndFormat = SoundFormat.WAV;
                }
                else if (string.Equals(PathExtension, "AIFF", System.StringComparison.OrdinalIgnoreCase) || string.Equals(PathExtension, "AIF", System.StringComparison.OrdinalIgnoreCase))
                {
                    this.SndFormat = SoundFormat.AIFF;
                }
                else
                {
                    throw new System.NotSupportedException();
                }
            }
예제 #10
0
        public XAudio2_StreamingSoundBuffer(XAudio2_Audio audio, Stream input, SoundFormat format)
        {
            mAudio  = audio;
            mInput  = input;
            mFormat = format;

            Initialize();
        }
예제 #11
0
 public void BufferData(int bufferId, SoundFormat format, short[] buffer, int size, int freq)
 {
     if (_context == null)
     {
         return;
     }
     AL.BufferData(bufferId, getFormat(format), buffer, size, freq);
 }
예제 #12
0
 public MP3Player(string manafacturer, string model, decimal price, uint count, int memory,
     SoundFormat[] supportedFormats, Display display,Frequency frequencyRange, int power)
     : base(manafacturer, model, price, count)
 {
     this.Memory = memory;
     this.SupportedFormats = supportedFormats;
     this.Display = display;
     this.FrequencyRange = frequencyRange;
     this.Power = power;
 }
예제 #13
0
 /// <inheritdoc />
 protected Sound(SerializationInfo info, StreamingContext ctxt)
     : base(info, ctxt)
 {
     _filename             = info.GetString("Filename");
     _soundFormat          = (SoundFormat)info.GetValue("SoundFormat", typeof(SoundFormat));
     SoundEffectUsed       = info.GetInt32("SoundEffectUsed");
     SongUsed              = info.GetInt32("SongUsed");
     CompiledAsSoundEffect = info.GetBoolean("CompiledAsSoundEffect");
     CompiledAsSong        = info.GetBoolean("CompiledAsSong");
 }
예제 #14
0
 private static bool FormatEnumCallback(IntPtr had, ref AcmInterop.ACMFORMATDETAILS pafd, UIntPtr dwInstance,
                                        int fdwSupport)
 {
     if (pafd.cbwfx >= Marshal.SizeOf(typeof(MMInterop.WAVEFORMATEX)))
     {
         SoundFormat soundFormat = new SoundFormat(pafd.pwfx);
         callbackFormats.Add(soundFormat);
     }
     return(true);
 }
예제 #15
0
        public void Open()
        {
            if (this.opened)
            {
                throw new InvalidOperationException();
            }
            if (this.inputFormat == null)
            {
                throw new InvalidOperationException("Input format is not specified.");
            }
            if (this.outputFormat == null)
            {
                throw new InvalidOperationException("Output format is not specified.");
            }
            this.pwfxSource   = this.inputFormat.ToPtr();
            this.pwfxDest     = this.outputFormat.ToPtr();
            this.outputFormat = new SoundFormat(this.pwfxDest);

            int mmr = AcmInterop.acmStreamOpen(out this.pStream, IntPtr.Zero, this.pwfxSource, this.pwfxDest,
                                               IntPtr.Zero, IntPtr.Zero, UIntPtr.Zero, 0);

            if (mmr != 0)
            {
                throw new SoundException("acmStreamOpen", mmr);
            }
            int cbSrcLength = (int)((this.bufferLength / 1000.0) * this.inputFormat.AverageBytesPerSecond);

            this.header             = new AcmInterop.ACMSTREAMHEADER();
            this.header.cbStruct    = Marshal.SizeOf(this.header);
            this.header.cbSrcLength = cbSrcLength;
            int suggestedDstLength;

            mmr = AcmInterop.acmStreamSize(this.pStream, cbSrcLength, out suggestedDstLength,
                                           AcmInterop.ACM_STREAMSIZEF_SOURCE);
            try {
                this.header.cbDstLength = suggestedDstLength;
                this.header.pbDst       = Marshal.AllocHGlobal(suggestedDstLength);
                this.header.pbSrc       = Marshal.AllocHGlobal(cbSrcLength);
                this.headerGCHandle     = GCHandle.Alloc(this.header, GCHandleType.Pinned);
                mmr = AcmInterop.acmStreamPrepareHeader(this.pStream, ref this.header, 0);
                if (mmr != 0)
                {
                    throw new SoundException("acmStreamPrepareHeader", mmr);
                }
                this.isStart = true;
                this.opened  = true;
            }
            finally {
                if (!this.opened)
                {
                    this.Close();
                }
            }
        }
예제 #16
0
        /// <summary>
        ///     Creates a new stream instance using the provided stream as a source.
        ///     Will also read the first frame of the MP3 into the internal buffer.
        ///     TODO: allow selecting stereo or mono in the constructor (note that this also requires "implementing" the stereo format).
        /// </summary>
        public MP3Stream(Stream sourceStream, int chunkSize)
        {
            FormatRep              = SoundFormat.Pcm16BitStereo;
            m_SourceStream         = sourceStream;
            m_BitStream            = new Bitstream(new PushbackStream(m_SourceStream, chunkSize));
            m_Buffer               = new Buffer16BitStereo();
            m_Decoder.OutputBuffer = m_Buffer;

            // read the first frame. This will fill the initial buffer with data, and get our frequency!
            ReadFrame();
        }
예제 #17
0
        private SoundFormat OnAudioSetup(SoundFormat arg)
        {
            m_audioFormat = arg;
            var streamInfo = StreamInfo.FromSoundFormat(arg);
            streamInfo.ID = AUDIO_ID;
            m_inputMedia.AddOrUpdateStream(streamInfo, AUDIO_BUFFERS);

            // This sample supports only stereo or 5.1 audio, to use other audio channel layouts change TinyAudioProcessor
            arg.UseCustomAudioRendering = (arg.Channels == 2 || arg.Channels == 6); 
            return arg;
        }
예제 #18
0
        private void cmbSoundFormat_Format(object sender, ListControlConvertEventArgs e)
        {
            SoundFormat format = e.Value as SoundFormat;

            if (format != null)
            {
                e.Value = string.Format("{0,4} kbps, {1,7} Hz, {2} channels, {3,2} bit",
                                        (format.AverageBytesPerSecond * 8) / 1000,
                                        (float)format.SamplesPerSecond, format.Channels, format.BitsPerSample);
            }
        }
예제 #19
0
        public int AddSound(int index, byte[] data, bool isEnd)
        {
            if (!this.opened)
            {
                throw new InvalidOperationException();
            }
            if (!audio)
            {
                throw new InvalidOperationException();
            }
            if (data == null)
            {
                throw new ArgumentNullException("data");
            }
            if (this.audioEncoder != null)
            {
                SoundFormat sourceFormat = this.audioEncoder.InputFormat;
                int         bufferSize   = (int)(this.audioEncoder.BufferLength * (sourceFormat.AverageBytesPerSecond / 1000.0));
                double      sampleRatio  = (double)this.audioFormat.SamplesPerSecond / sourceFormat.SamplesPerSecond;

                // Encoder buffer size may be less than input data
                // So we have to break input data into separate buffers
                // Get source format, encoder buffer size
                if (data.Length <= bufferSize)
                {
                    // On convert is enough
                    int    sourceSamples    = data.Length / sourceFormat.BlockAlign;
                    int    convertedSamples = (int)(sourceSamples * sampleRatio);
                    byte[] convertedData    = this.audioEncoder.Convert(data, 0, data.Length, isEnd);
                    return(AddSoundPrivate(index, convertedData, convertedSamples));
                }
                else
                {
                    // Break input data into buffers
                    int nBuffers        = (int)Math.Ceiling((double)data.Length / bufferSize);
                    int nSamplesWritten = 0;
                    for (int i = 0; i < nBuffers; i++)
                    {
                        int    thisBufferSize   = Math.Min(bufferSize, data.Length - i * bufferSize);
                        int    sourceSamples    = thisBufferSize / sourceFormat.BlockAlign;
                        int    convertedSamples = (int)(sourceSamples * sampleRatio);
                        bool   isLastConvert    = isEnd && i == nBuffers - 1;
                        byte[] convertedData    = this.audioEncoder.Convert(data, i * bufferSize, thisBufferSize, isLastConvert);
                        nSamplesWritten += this.AddSoundPrivate(index + nSamplesWritten, convertedData, convertedSamples);
                    }
                    return(nSamplesWritten);
                }
            }
            else
            {
                int nSamples = data.Length / this.audioFormat.BlockAlign;
                return(AddSoundPrivate(index, data, nSamples));
            }
        }
예제 #20
0
        private void view_SoundDeviceChanged(object sender, EventArgs e)
        {
            SoundDevice device = this.view.SoundDevice;

            if (device != null && !string.IsNullOrEmpty(device.Id))
            {
                SoundFormat[] formats = null;
                try {
                    formats = SoundProvider.GetFormats(device.Id, true);
                }
                catch (SoundException) {
                    this.view.SoundDevice               = null;
                    this.view.SoundFormats              = null;
                    this.view.SoundFormatTags           = null;
                    this.view.AllowSelectSoundFormat    = false;
                    this.view.AllowSelectSoundFormatTag = false;
                    return;
                }
                this.view.AllowSelectSoundFormat    = true;
                this.view.AllowSelectSoundFormatTag = true;

                this.formatList = new List <SoundFormat>(formats);
                this.formatList.Sort(SoundFormatComparer);
                List <SoundFormatTag> distinctTags = new List <SoundFormatTag>(formatList.Count);
                foreach (var format in formats)
                {
                    if (!distinctTags.Contains(format.Tag))
                    {
                        distinctTags.Add(format.Tag);
                    }
                }
                SoundFormat currentFormat = this.view.SoundFormat;
                this.view.SoundFormatTags = distinctTags.ToArray();
                SoundFormatTag?preferredTag        = null;
                int?           preferredChannels   = null;
                int?           preferredSampleRate = null;
                if (currentFormat != null)
                {
                    preferredTag        = currentFormat.Tag;
                    preferredSampleRate = currentFormat.SamplesPerSecond;
                    preferredChannels   = currentFormat.Channels;
                }
                SoundFormat suggestedFormat = SoundProvider.SuggestFormat(device.Id, preferredTag, preferredSampleRate,
                                                                          preferredChannels);
                this.view.SoundFormatTag = suggestedFormat.Tag;
                this.view.SoundFormat    = suggestedFormat;
            }
            else
            {
                this.view.AllowSelectSoundFormat    = false;
                this.view.AllowSelectSoundFormatTag = false;
            }
        }
예제 #21
0
 /// <summary>
 ///     Creates a new stream instance using the provided stream as a source.
 ///     Will also read the first frame of the MP3 into the internal buffer.
 ///     TODO: allow selecting stereo or mono in the constructor (note that this also requires "implementing" the stereo format).
 /// </summary>
 public MP3Stream(Stream sourceStream, int chunkSize)
 {
     IsEOF = false;
     FormatRep = SoundFormat.Pcm16BitStereo;
     m_SourceStream = sourceStream;
     m_BitStream = new Bitstream(new PushbackStream(m_SourceStream, chunkSize));
     m_Buffer = new Buffer16BitStereo();
     m_Decoder.OutputBuffer = m_Buffer;
     // read the first frame. This will fill the initial buffer with data, and get our frequency!
     if (!ReadFrame())
         IsEOF = true;
 }
예제 #22
0
 /// <summary>
 /// Adds an outlet port to the node
 /// </summary>
 /// <remarks>
 /// Ports are where signal flow comes into and out of the DSP kernel
 /// </remarks>
 /// <param name="node">DSPNode specifying the node on which the outlet is added</param>
 /// <param name="channelCount">Int specifying the number of channels in the port</param>
 /// <param name="format">SoundFormat specifying the speaker support</param>
 public void AddOutletPort(DSPNode node, int channelCount, SoundFormat format)
 {
     AssertSameGraphAsNode(node);
     QueueCommand(new AddOutletPortCommand
     {
         m_Type         = DSPCommandType.AddOutletPort,
         m_Graph        = m_Graph,
         m_Handle       = m_Handle,
         m_Node         = node.Handle,
         m_ChannelCount = channelCount,
         m_Format       = (int)format,
     });
 }
예제 #23
0
        private SoundFormat SoundFormatCallback(SoundFormat sf)
        {
            var streamInfo = new StreamInfo();

            streamInfo.Category   = StreamCategory.Audio;
            streamInfo.Codec      = sf.SoundType;
            streamInfo.Channels   = sf.Channels;
            streamInfo.Samplerate = sf.Rate;

            m_renderMedia.Initialize(streamInfo);
            m_renderPlayer.Open(m_renderMedia);
            return(sf);
        }
예제 #24
0
 /// <summary>
 ///     Creates a new stream instance using the provided stream as a source.
 ///     Will also read the first frame of the MP3 into the internal buffer.
 ///     TODO: allow selecting stereo or mono in the constructor (note that this also requires "implementing" the stereo format).
 /// </summary>
 public MP3Stream(Stream sourceStream, int chunkSize, SoundFormat outputFormat = SoundFormat.Pcm16BitStereo)
 {
     IsEOF                  = false;
     FormatRep              = outputFormat;
     m_SourceStream         = sourceStream;
     m_BitStream            = new Bitstream(new PushbackStream(m_SourceStream, chunkSize));
     m_Buffer               = new Buffer16Bit((int)outputFormat);
     m_Decoder.OutputBuffer = m_Buffer;
     // read the first frame. This will fill the initial buffer with data, and get our frequency!
     if (!ReadFrame())
     {
         IsEOF = true;
     }
 }
예제 #25
0
 public Settings()
 {
     mapFolder    = dataFolder = classFilter = "";
     permFilter   = "base default standard";
     pluginFolder = ".\\Plugins\\";
     Flags        = SettingsFlags.AutoUpdateCheck | SettingsFlags.BitmapAlpha | SettingsFlags.LoadSpecular | SettingsFlags.SortTags | SettingsFlags.UsePermFilter | SettingsFlags.UseClassFilter;
     ViewerColour = Color.CornflowerBlue;
     Language     = Language.English;
     BitmFormat   = 0;
     ModeFormat   = 0;
     Snd_Format   = 0;
     mapScale     = 100;
     pakScale     = 50;
 }
예제 #26
0
파일: VLCStream.cs 프로젝트: uzbekdev1/main
        private SoundFormat SoundFormatCallback(SoundFormat sf)
        {
            if (_needsSetup)
            {
                _recordingFormat = new WaveFormat(sf.Rate, 16, sf.Channels);
                _waveProvider    = new BufferedWaveProvider(RecordingFormat);
                _sampleChannel   = new SampleChannel(_waveProvider);
                _sampleChannel.PreVolumeMeter += SampleChannelPreVolumeMeter;

                _needsSetup = false;
            }

            return(sf);
        }
예제 #27
0
        private ALFormat getFormat(SoundFormat format)
        {
            switch (format)
            {
            case SoundFormat.Mono16: return(ALFormat.Mono16);

            case SoundFormat.Mono8: return(ALFormat.Mono8);

            case SoundFormat.Stereo16: return(ALFormat.Stereo16);

            case SoundFormat.Stereo8: return(ALFormat.Stereo8);

            default: throw new NotSupportedException(format.ToString());
            }
        }
예제 #28
0
    public void Initialize(int inputChannelCount, SoundFormat inputFormat, int inputSampleRate, long inputBufferSize)
    {
        m_channels   = inputChannelCount;
        m_format     = inputFormat;
        m_sampleRate = inputSampleRate;
        m_bufferSize = inputBufferSize;

        switch (inputFormat)
        {
        case SoundFormat.Stereo:
        {
            m_graph = DSPGraph.Create(m_format, m_channels, (int)m_bufferSize, m_sampleRate);
            break;
        }
        }
    }
예제 #29
0
        public int AddSilence(int index, int nSamples)
        {
            if (!this.opened)
            {
                throw new InvalidOperationException();
            }
            if (!audio)
            {
                throw new InvalidOperationException();
            }
            SoundFormat sourceFormat = this.audioEncoder != null ? this.audioEncoder.InputFormat : this.audioFormat;

            // TODO: Use smaller buffers instead of one BIG buffer
            byte[] silenceData = new byte[nSamples * sourceFormat.BlockAlign];
            return(AddSound(index, silenceData, false));
        }
예제 #30
0
        public override Stream LoadTagData(Stream stream)
        {
            var MediaInfo = stream.ReadUInt8();                //#1

            this.Codec = (SoundFormat)(MediaInfo >> 4);        //ACC 10
            this.Rate  = (SoundRate)((MediaInfo & 0x0f) >> 2); //AAC: always 3
            this.Size  = (SoundSize)((MediaInfo & 0x02) >> 1); //AAC: always 1
            this.Type  = (SoundType)(MediaInfo & 0x01);        //AAC: always 1
            //=== Data ===
            var pos = stream.Position;
            var wtf = stream.ReadBytes(1);                                      //#1
            var buf = stream.ReadBytes((int)this.DataSize - 1 - 1);             //#n

            DataStream = new MemoryStream();
            DataStream.Write(buf, 0, buf.Length);
            return(DataStream);
        }
예제 #31
0
        private int SetupCallback(void **data, char *format, int *rate, int *channels)
        {
            IntPtr    pFormat   = new IntPtr(format);
            string    formatStr = Marshal.PtrToStringAnsi(pFormat);
            SoundType sType     = formatStr.TryParseSoundType();

            m_format = new SoundFormat(sType, *rate, *channels);
            if (m_formatSetupCB != null)
            {
                m_format = m_formatSetupCB(m_format);
            }

            Marshal.Copy(m_format.Format.ToUtf8(), 0, pFormat, 4);
            *rate     = m_format.Rate;
            *channels = m_format.Channels;

            return(m_format.UseCustomAudioRendering == true ? 0 : 1);
        }
예제 #32
0
        /// <summary>
        /// Gets the XNA Pipeline importer for the given sound format.
        /// </summary>
        /// <param name="soundFormat">Format of the sound.</param>
        /// <returns>Returns XNA Pipeline importer.</returns>
        private string GetImporter(SoundFormat soundFormat)
        {
            switch (soundFormat)
            {
            case SoundFormat.Wav:
                return("WavImporter");

            case SoundFormat.Wma:
                return("WmaImporter");

            case SoundFormat.Mp3:
                return("Mp3Importer");

            default:
                Debug.Assert(true);
                return(null);
            }
        }
예제 #33
0
        public void Open()
        {
            if (this.opened) {
            throw new InvalidOperationException();
             }
             if (this.inputFormat == null) {
            throw new InvalidOperationException("Input format is not specified.");
             }
             if (this.outputFormat == null) {
            throw new InvalidOperationException("Output format is not specified.");
             }
             this.pwfxSource = this.inputFormat.ToPtr();
             this.pwfxDest = this.outputFormat.ToPtr();
             this.outputFormat = new SoundFormat(this.pwfxDest);

             int mmr = AcmInterop.acmStreamOpen(out this.pStream, IntPtr.Zero, this.pwfxSource, this.pwfxDest,
                                            IntPtr.Zero, IntPtr.Zero, UIntPtr.Zero, 0);
             if (mmr != 0) {
            throw new SoundException("acmStreamOpen", mmr);
             }
             int cbSrcLength = (int)((this.bufferLength / 1000.0) * this.inputFormat.AverageBytesPerSecond);
             this.header = new AcmInterop.ACMSTREAMHEADER();
             this.header.cbStruct = Marshal.SizeOf(this.header);
             this.header.cbSrcLength = cbSrcLength;
             int suggestedDstLength;
             mmr = AcmInterop.acmStreamSize(this.pStream, cbSrcLength, out suggestedDstLength,
                                        AcmInterop.ACM_STREAMSIZEF_SOURCE);
             try {
            this.header.cbDstLength = suggestedDstLength;
            this.header.pbDst = Marshal.AllocHGlobal(suggestedDstLength);
            this.header.pbSrc = Marshal.AllocHGlobal(cbSrcLength);
            this.headerGCHandle = GCHandle.Alloc(this.header, GCHandleType.Pinned);
            mmr = AcmInterop.acmStreamPrepareHeader(this.pStream, ref this.header, 0);
            if (mmr != 0) {
               throw new SoundException("acmStreamPrepareHeader", mmr);
            }
            this.isStart = true;
            this.opened = true;
             }
             finally {
            if (!this.opened) {
               this.Close();
            }
             }
        }
예제 #34
0
 private static extern ErrorCode SetSoftwareFormat(IntPtr system, int samplerate, SoundFormat format, int numoutputchannels, int maxinputchannels, DspResampler resamplemethod);
예제 #35
0
 private static extern ErrorCode GetSoftwareFormat(IntPtr system, ref int samplerate, ref SoundFormat format, ref int numoutputchannels, ref int maxinputchannels, ref DspResampler resamplemethod, ref int bits);
예제 #36
0
        public static AcmConvertionMap GetConvertionMap(SoundFormat[] inputFormats, SoundFormatTag tagFilter)
        {
            // First, we enumerate convertion formats
             AcmConvertionMap initConvertionMap = new AcmConvertionMap();
             int maxFormatSize = GetMaxFormatSize();

             // Enumerate acm drivers
             foreach (int driverId in GetDriverIds()) {
            // Open driver
            IntPtr phDriver;
            int mmr = AcmInterop.acmDriverOpen(out phDriver, driverId, 0);
            if (mmr != 0) {
               continue;
            }
            // For each input format, we do enumeration
            foreach (SoundFormat inputFormat in inputFormats) {

               // Fill format details struct
               AcmInterop.ACMFORMATDETAILS fmtDetails = new AcmInterop.ACMFORMATDETAILS();
               IntPtr pwfxFormat = inputFormat.ToPtr(maxFormatSize);
               fmtDetails.cbStruct = Marshal.SizeOf(fmtDetails);
               fmtDetails.pwfx = pwfxFormat;
               fmtDetails.cbwfx = maxFormatSize;

               // Enumerate convertion formats
               callbackFormats = new List<SoundFormat>();
               IntPtr pwfxInput = inputFormat.ToPtr();
               mmr = AcmInterop.acmFormatEnum(phDriver, ref fmtDetails, FormatEnumCallback, IntPtr.Zero,
                                              AcmInterop.ACM_FORMATENUMF_CONVERT);
               Marshal.FreeHGlobal(pwfxInput);

               // Add formats to the map (if succeed)
               if (mmr == 0) {
                  initConvertionMap.Add(inputFormat, callbackFormats);
               }
               callbackFormats = null;
            }

            // Close driver
            mmr = AcmInterop.acmDriverClose(phDriver, 0);
             }

             // Now we query ACM to make sure each convertion is supported
             AcmConvertionMap finalConvertionMap = new AcmConvertionMap();
             SoundFormat[] inputs = initConvertionMap.GetInputs();
             foreach (SoundFormat inputFormat in inputs) {
            IntPtr pwfxSrc = inputFormat.ToPtr();
            foreach (SoundFormat outputFormat in initConvertionMap.GetOutputs(inputFormat)) {
               // Filter tags
               if (tagFilter != SoundFormatTag.UNKNOWN && outputFormat.Tag != tagFilter) {
                  continue;
               }
               IntPtr phs;
               IntPtr pwfxDst = outputFormat.ToPtr();
               // Open acm stream using the query flag
               int mmr = AcmInterop.acmStreamOpen(out phs, IntPtr.Zero, pwfxSrc, pwfxDst, IntPtr.Zero, IntPtr.Zero,
                                                  UIntPtr.Zero, AcmInterop.ACM_STREAMOPENF_QUERY);
               Marshal.FreeHGlobal(pwfxDst);

               // Add format to the final map if succeed
               if (mmr == 0) {
                  finalConvertionMap.Add(inputFormat, outputFormat);
               }
            }
            Marshal.FreeHGlobal(pwfxSrc);
             }
             return finalConvertionMap;
        }
예제 #37
0
파일: Sound.cs 프로젝트: sladen/openbve2
 // constructors
 /// <summary>Creates a new instance of this class.</summary>
 /// <param name="format">The sound format of the raw data.</param>
 /// <param name="bytes">The byte raw data. With 8 bits per sample, values are unsigned from 0 to 255. With 16 bits per sample, values are signed from -32768 to 32767 in little endian byte order. Channels are interleaved in that one sample from each channel is given sequentially before continuing with the next sample.</param>
 public SoundData(SoundFormat format, byte[] bytes)
 {
     this.Format = format;
     this.Bytes = bytes;
 }
예제 #38
0
파일: Sound.cs 프로젝트: nathanchere/nFMOD
 private static extern ErrorCode GetFormat(IntPtr sound, ref SoundType type, ref SoundFormat format, ref int channels, ref int bits);
예제 #39
0
 private static bool FormatEnumCallback(IntPtr had, ref AcmInterop.ACMFORMATDETAILS pafd, UIntPtr dwInstance,
                                      int fdwSupport)
 {
     if (pafd.cbwfx >= Marshal.SizeOf(typeof(MMInterop.WAVEFORMATEX))) {
     SoundFormat soundFormat = new SoundFormat(pafd.pwfx);
     callbackFormats.Add(soundFormat);
      }
      return true;
 }
예제 #40
0
        /// <summary>
        /// Creates a new stream instance using the provided stream as a source.
        ///
        /// TODO: allow selecting stereo or mono in the constructor (note that
        ///   this also requires "implementing" the stereo format).
        /// </summary>
        public Mp3Stream(Stream sourceStream, int chunkSize)
        {
            FormatRep = SoundFormat.Pcm16BitStereo;
                   SourceStream = sourceStream;
                   JZBitStream = new javazoom.jl.decoder.Bitstream(new javazoom.jl.decoder.BackStream(SourceStream, chunkSize));
                   QueueOBuffer = new OBuffer16BitStereo();

                   JZDecoder.OutputBuffer = QueueOBuffer;
        }