GetSampleSizeInBytes() public method

public GetSampleSizeInBytes ( System.TimeSpan duration ) : int
duration System.TimeSpan
return int
        public void PlaySound(Tone tone, TimeSpan duration)
        {
            Deployment.Current.Dispatcher.BeginInvoke(() =>
            {
                if (_timer == null)
                {
                    _timer = new DispatcherTimer
                    {
                        Interval = TimeSpan.FromMilliseconds(33)
                    };
                    _timer.Tick += delegate { try { FrameworkDispatcher.Update(); } catch { } };
                }

                if (_timer.IsEnabled) _timer.Stop();

                _timeLeft = duration;

                FrameworkDispatcher.Update();
                _frequency = tone;
                _dynamicSound = new DynamicSoundEffectInstance(SampleRate, AudioChannels.Mono);
                _dynamicSound.BufferNeeded += dynamicSound_BufferNeeded;
                _dynamicSound.Play();
                _bufferSize = _dynamicSound.GetSampleSizeInBytes(TimeSpan.FromSeconds(1));
                _soundBuffer = new byte[_bufferSize];

                _timer.Start();
            });
        }
Example #2
0
    public OggSong(string oggFile)
    {
        reader = new VorbisReader(oggFile);
        effect = new DynamicSoundEffectInstance(reader.SampleRate, (AudioChannels)reader.Channels);
        buffer = new byte[effect.GetSampleSizeInBytes(TimeSpan.FromMilliseconds(500))];
        nvBuffer = new float[buffer.Length / 2];

        // when a buffer is needed, set our handle so the helper thread will read in more data
        effect.BufferNeeded += (s, e) => readNextBuffer();
    }
 public SoundReversibleInstance(SoundReversible sound, byte[] audioBytes, int sampleRate, AudioChannels channels, bool inReverse)
 {
     this.sound = sound;
     this.sampleRate = sampleRate;
     this.channels = channels;
     reversed = inReverse;
     baseAudioBytes = audioBytes;
     dynamicSound = NewDynamicSoundEffectInstance();
     count = dynamicSound.GetSampleSizeInBytes(TimeSpan.FromMilliseconds(BUFFER_CHUNK_SIZE));
 }
Example #4
0
        /// <summary>
        /// Constructor.
        /// </summary>
        public XnaAudio()
        {
            // Event handler for getting audio data when the buffer is full
            microphone.BufferDuration = TimeSpan.FromMilliseconds(100);
            microphone.BufferReady += new EventHandler<EventArgs>(microphone_BufferReady);

            // initialize dynamic sound effect instance
            playback = new DynamicSoundEffectInstance(microphone.SampleRate, AudioChannels.Mono);
            playback.BufferNeeded += GetSamples;
            sampleSize = playback.GetSampleSizeInBytes(TimeSpan.FromMilliseconds(100));
        }
Example #5
0
        private static void PlayThings(Context ctx, Node srcNode)
        {
            FrameworkDispatcher.Update();
            using (var aud = new DynamicSoundEffectInstance(48000, AudioChannels.Mono))
            {
                SampleSize = aud.GetSampleSizeInBytes(new TimeSpan(0, 0, 0, 0, SampleTimeMs));

                var sub = ctx.Socket(SocketType.SUB);
                sub.Subscribe(new byte[0]);
                sub.Connect(srcNode.Url);

                mBar.SignalAndWait();

                bool bufferNeeded = true;
                aud.BufferNeeded += (_, __) => bufferNeeded = true;

                while (isRunning)
                {
                    if (bufferNeeded)
                    {
                        foreach (var n in mNodes.Where(n => n is GeneratorNode).Cast<GeneratorNode>())
                        {
                            n.SignalForData();
                        }
                        bufferNeeded = false;
                    }

                    var bytes = sub.Recv(SampleTimeMs / 2);

                    if (bytes != null)
                    {
                        aud.SubmitBuffer(bytes);
                        if (aud.State == SoundState.Stopped)
                            aud.Play();
                    }

                    FrameworkDispatcher.Update();

                    //running = false;
                }

                aud.Stop();
                sub.Dispose();
            }
        }
Example #6
0
        /// <summary>
        /// インスタンスを生成します。
        /// StreamingWave が存在する間、ストリーミングのために input はオープン状態が継続され、
        /// Dispose メソッドでその Dispose メソッドが呼び出されます。
        /// </summary>
        /// <param name="input">Wave ファイルの Stream。</param>
        /// <param name="bufferDuration">バッファリングする再生時間。</param>
        public StreamingWave(Stream input, TimeSpan bufferDuration)
        {
            if (input == null) throw new ArgumentNullException("input");
            this.input = input;

            reader = new BinaryReader(input);

            // 'data' chunk のデータ部の直前まで読み込みます。
            riffChunk = RiffChunk.ReadFrom(reader);
            formatChunk = WaveFormatChunk.ReadFrom(reader);
            dataChunkHeader = ChunkHeader.ReadFrom(reader);

            // 'data' chunk のデータ部の開始位置を記憶します。
            dataOffset = input.Position;

            int sampleRate = (int) formatChunk.SampleRate;
            AudioChannels channels = (AudioChannels) formatChunk.Channels;
            dynamicSound = new DynamicSoundEffectInstance(sampleRate, channels);
            dynamicSound.BufferNeeded += new EventHandler<EventArgs>(OnDynamicSoundBufferNeeded);

            bufferSize = dynamicSound.GetSampleSizeInBytes(bufferDuration);
            buffer = new byte[bufferSize];

            readDataAsyncCaller = new ReadDataAsyncCaller(ReadData);
        }