public IWaveProvider Convert() { using (Stream stream = new MemoryStream(buffer)) { using (var rawStream = new RawSourceWaveStream(stream, new WaveFormat(48000, 16, 1))) using (var downsample = new WaveFormatConversionStream(new WaveFormat(44100, 16, 1), rawStream)) using (var outputStream = new MemoryStream()) { WaveFileWriter.WriteWavFileToStream(outputStream, downsample.ToSampleProvider().ToWaveProvider16()); var outputData = outputStream.ToArray(); waveBufferSize = outputData.Length; BufferedWaveProvider bufferedWaveProvider = new BufferedWaveProvider(new WaveFormat(44100, 1)); if (outputData.Length < bufferedWaveProvider.BufferLength) { bufferedWaveProvider.AddSamples(outputData, 0, outputData.Length); } else { bufferedWaveProvider.AddSamples(outputData, 0, bufferedWaveProvider.BufferLength); } IWaveProvider finalStream = new Wave16ToFloatProvider(bufferedWaveProvider); return(finalStream); } } }
public void StartDetect(int inputDevice) { this.Dispatcher.BeginInvoke((ThreadStart) delegate() { waveIn = new WaveInEvent(); waveIn.DeviceNumber = inputDevice; waveIn.WaveFormat = new WaveFormat(44100, 1); waveIn.DataAvailable += sound.WaveIn_DataAvailable; sound.bufferedWaveProvider = new BufferedWaveProvider(waveIn.WaveFormat); // begin record waveIn.StartRecording(); IWaveProvider stream = new Wave16ToFloatProvider(sound.bufferedWaveProvider); Pitch pitch = new Pitch(stream); byte[] buffer = new byte[8192]; int bytesRead; bytesRead = stream.Read(buffer, 0, buffer.Length); freq = pitch.Get(buffer); if (freq != 0) { ReturnFreq(); } }); }
void Test() { var file = new Mp3FileReader(fileName); int _Bytes = (int)file.Length; byte[] Buffer = new byte[_Bytes]; int read = file.Read(Buffer, 0, (int)_Bytes); data = new float[read]; Wave16ToFloatProvider t = new Wave16ToFloatProvider(file); int j = 0; for (int i = 0; i < read; i += 4) { float leftSample = BitConverter.ToInt16(Buffer, i); float rightSample = BitConverter.ToInt16(Buffer, i + 2); leftSample = leftSample / 32768f; rightSample = rightSample / 32768f; data[i / 2] = leftSample; data[(i / 2) + 1] = rightSample; /*if (j < 1000) * if (leftSample != 0 || rightSample != 0) { * Debug.Log(leftSample + " " + rightSample); * j++; * }*/ } test.SetData(data, 0); }
private void processToolStripMenuItem_Click(object sender, EventArgs e) { if (Wave == null) { Stream audioStream = Recorder.GetRecordingStream(); audioStream.Position = 0; Wave = new WaveFormatConversionStream(WaveFormat, new Wave32To16Stream(new WaveFileReader(audioStream))); WaveToFloat = new Wave16ToFloatProvider(Wave); } ProcessAudio(); }
public virtual void PlayAudioFilePaused() { var wavFile = GetPathToWav(); WaveFileReader reader = new WaveFileReader(wavFile); Wave16ToFloatProvider converter = new Wave16ToFloatProvider(reader); _jackOut.Init(converter); _jackOut.Play(); _jackOut.Pause(); Thread.Sleep(100); _jackOut.Stop(); Assert.AreEqual(0, reader.Position); reader.Close(); }
public virtual void PlayAudioFile() { string wavFile = GetPathToWav(); WaveFileReader reader = new WaveFileReader(wavFile); Wave16ToFloatProvider converter = new Wave16ToFloatProvider(reader); Analyser analyser = new Analyser(); _client.ProcessFunc += analyser.AnalyseOutAction; _jackOut.Init(converter); _jackOut.Play(); Thread.Sleep(100); _jackOut.Stop(); reader.Close(); Assert.AreNotEqual(0, analyser.NotEmptySamples); }
private void playMoveRotate(Stream sound) { if (AudioSettings.VOL == 0) { return; } wave = new WaveFileReader(sound); var reduce = new BlockAlignReductionStream(wave); var provider = new Wave16ToFloatProvider(reduce); provider.Volume = AudioSettings.VOL / 200F; outputMoveAndRotate = new DirectSoundOut(); outputMoveAndRotate.PlaybackStopped += new EventHandler <StoppedEventArgs>(playBackStoppedMoveRotate); outputMoveAndRotate.Init(provider); outputMoveAndRotate.Play(); }
private void playMusic(Stream music) { if (AudioSettings.VOL == 0 || AudioSettings.MUSIC == 0) { return; } wave = new WaveFileReader(music); var reduce = new BlockAlignReductionStream(wave); LoopStream loop = new LoopStream(reduce); var provider = new Wave16ToFloatProvider(loop); provider.Volume = AudioSettings.VOL / 200F; outputMusic = new DirectSoundOut(); outputMusic.PlaybackStopped += new EventHandler <StoppedEventArgs>(playBackStoppedMusic); outputMusic.Init(provider); outputMusic.Play(); }
public static void ApplyAutoTune(string fileToProcess, string tempFile, AutoTuneSettings autotuneSettings) { using (WaveFileReader waveFileReader = new WaveFileReader(fileToProcess)) { IWaveProvider source = new Wave16ToFloatProvider(waveFileReader); IWaveProvider sourceProvider = new AutoTuneWaveProvider(source, autotuneSettings); IWaveProvider waveProvider = new WaveFloatTo16Provider(sourceProvider); using (WaveFileWriter waveFileWriter = new WaveFileWriter(tempFile, waveProvider.WaveFormat)) { byte[] array = new byte[8192]; int num; do { num = waveProvider.Read(array, 0, array.Length); waveFileWriter.Write(array, 0, num); }while (num != 0 && waveFileWriter.Length < waveFileReader.Length); } } }
//Do float array public float[] convert(MediaFoundationReader x) { float[] floatBuffer; int _byteBuffer32_length = (int)x.Length * 2; int _floatBuffer_length = _byteBuffer32_length / sizeof(float); IWaveProvider stream32 = new Wave16ToFloatProvider(x); WaveBuffer _waveBuffer = new WaveBuffer(_byteBuffer32_length); stream32.Read(_waveBuffer, 0, (int)_byteBuffer32_length); floatBuffer = new float[_floatBuffer_length]; for (int i = 0; i < _floatBuffer_length; i++) { floatBuffer[i] = _waveBuffer.FloatBuffer[i]; } return(floatBuffer); }
/** Return an array with all the music frames */ private static float[] GetRawMp3Frames(string filename) { float[] floatBuffer; using (MediaFoundationReader media = new MediaFoundationReader(filename)) { int _byteBuffer32_length = (int)media.Length * 2; int _floatBuffer_length = _byteBuffer32_length / sizeof(float); IWaveProvider stream32 = new Wave16ToFloatProvider(media); WaveBuffer _waveBuffer = new WaveBuffer(_byteBuffer32_length); stream32.Read(_waveBuffer, 0, (int)_byteBuffer32_length); floatBuffer = new float[_floatBuffer_length / 2]; for (int i = 0; i < _floatBuffer_length / 2; i++) { floatBuffer[i] = _waveBuffer.FloatBuffer[i]; } } return(floatBuffer); }
public static void ApplyAutoTune(string fileToProcess, string tempFile, AutoTuneSettings autotuneSettings) { using (var reader = new WaveFileReader(fileToProcess)) { IWaveProvider stream32 = new Wave16ToFloatProvider(reader); IWaveProvider streamEffect = new AutoTuneWaveProvider(stream32, autotuneSettings); IWaveProvider stream16 = new WaveFloatTo16Provider(streamEffect); using (var converted = new WaveFileWriter(tempFile, stream16.WaveFormat)) { // buffer length needs to be a power of 2 for FFT to work nicely // however, make the buffer too long and pitches aren't detected fast enough // successful buffer sizes: 8192, 4096, 2048, 1024 // (some pitch detection algorithms need at least 2048) var buffer = new byte[8192]; int bytesRead; do { bytesRead = stream16.Read(buffer, 0, buffer.Length); converted.Write(buffer, 0, bytesRead); } while (bytesRead != 0 && converted.Length < reader.Length); } } }
public void StartDetect(int inputDevice) { WaveInEvent waveIn = new WaveInEvent(); waveIn.DeviceNumber = inputDevice; waveIn.WaveFormat = new WaveFormat(44100, 1); waveIn.DataAvailable += WaveIn_DataAvailable; bufferedWaveProvider = new BufferedWaveProvider(waveIn.WaveFormat); // begin record waveIn.StartRecording(); IWaveProvider stream = new Wave16ToFloatProvider(bufferedWaveProvider); Pitch pitch = new Pitch(stream); byte[] buffer = new byte[8192]; int bytesRead; Console.WriteLine("Play or sing a note! Press ESC to exit at any time. \n"); do { bytesRead = stream.Read(buffer, 0, buffer.Length); float freq = pitch.Get(buffer); if (freq != 0) { Console.WriteLine("Freq: " + freq + " | Note: " + GetNote(freq)); } } while (bytesRead != 0 && !(Console.KeyAvailable && Console.ReadKey(true).Key == ConsoleKey.Escape)); // stop recording waveIn.StopRecording(); waveIn.Dispose(); }
/// <summary> /// 44100Hz,2chのファイル読み込み("main"で追加される) /// </summary> /// <param name="filename"></param> public static void MainFileLoad(string filename) { IWaveProvider FloatStereo44100Provider; AudioFileReader reader; reader = new AudioFileReader(filename); audiofilereader = reader; Test.Print("Volume消す"); IWaveProvider stereo; if (reader.WaveFormat.Channels == 1) { if (reader.WaveFormat.Encoding == WaveFormatEncoding.IeeeFloat) { //NAudio.Wave.SampleProviders.MonoToStereoSampleProvider s = new NAudio.Wave.SampleProviders.MonoToStereoSampleProvider(reader); stereo = new Wave16ToFloatProvider(new MonoToStereoProvider16(new WaveFloatTo16Provider(reader))); WaveFormatConversionProvider conv = new WaveFormatConversionProvider(new WaveFormat(44100, 2), stereo); } else if (reader.WaveFormat.Encoding == WaveFormatEncoding.Pcm) { stereo = new Wave16ToFloatProvider(new MonoToStereoProvider16(reader)); } else { return; } } else { stereo = reader; } FloatStereo44100Provider = stereo;//最終的にこの形式に統一44100にするかどうかは検討の余地あり SoundDriver.AddWaveProvider(FloatStereo44100Provider, "main"); }
/// <summary> /// Load a pre-recorded WAV /// </summary> private void openToolStripMenuItem_Click(object sender, EventArgs e) { SetupChart(); OpenFileDialog open = new OpenFileDialog(); open.InitialDirectory = @"c:\users\chris\appdata\local\temp"; open.Filter = "Wave File (*.wav)|*.wav;"; if (open.ShowDialog() != DialogResult.OK) { return; } // This will almost certainly go south! ;) WaveFileReader waveFileReader = new WaveFileReader(open.FileName); Wave = new WaveFormatConversionStream(WaveFormat, WaveFormatConversionStream.CreatePcmStream(waveFileReader)); WaveToFloat = new Wave16ToFloatProvider(Wave); processToolStripMenuItem.Enabled = true; clearToolStripMenuItem.Enabled = true; startToolStripMenuItem.Enabled = false; stopToolStripMenuItem.Enabled = false; }
static void Main(string[] args) { PluginUtil pluginUtil = new PluginUtil(); pluginUtil.OpenPlugin(@"Plugins\RoughRider2.dll"); pluginUtil.OpenPlugin(@"Plugins\TAL-Reverb-4-64.dll"); //pluginUtil.ChangeParametersValue(); //Console.WriteLine(pluginUtil.GetListOfPlugins()); var url = "http://localhost:5000/"; var listener = new HttpListener(); listener.Prefixes.Add(url); while (true) { listener.Start(); var context = listener.GetContext(); var request = context.Request; if (request.HttpMethod.Equals("GET")) { if (request.Url.Equals(url + "getPlugins")) { Task.Factory.StartNew(() => { var response = context.Response; response.AddHeader("Content-Type", "application/json"); var responseString = pluginUtil.GetListOfPlugins(); var buffer = System.Text.Encoding.UTF8.GetBytes(responseString); response.ContentLength64 = buffer.Length; var output = response.OutputStream; output.Write(buffer, 0, buffer.Length); output.Close(); }); } } else if (request.HttpMethod.Equals("POST")) { if (request.Url.Equals(url + "changePluginsParameters")) { Task.Factory.StartNew(() => { var response = context.Response; string body; using (var reader = new StreamReader(request.InputStream)) body = reader.ReadToEnd(); pluginUtil.ChangeParametersValue(body); var buffer = System.Text.Encoding.UTF8.GetBytes(""); response.ContentLength64 = buffer.Length; response.StatusCode = 200; var output = response.OutputStream; output.Write(buffer, 0, buffer.Length); output.Close(); }); } else if (request.Url.Equals(url + "audioProcessing")) { Task.Factory.StartNew(() => { //var bytes = File.ReadAllBytes("Audio/Adele - Hello.mp3"); var response = context.Response; //string body; //using (var reader = new StreamReader(request.InputStream)) // body = reader.ReadToEnd(); //pluginUtil.AudioProcessing(bytes); //var floatArray2 = new float[bytes.Length / sizeof(float)]; //Buffer.BlockCopy(bytes, 0, floatArray2, 0, floatArray2.Length); //var mp3 = new Mp3FileReader("Audio/Adele - Hello.mp3"); //byte[] buffer = new byte[reader.Length]; //int read = reader.Read(buffer, 0, buffer.Length); //short[] sampleBuffer = new short[read / 2]; //Buffer.BlockCopy(buffer, 0, sampleBuffer, 0, read); //ISampleProvider audio = new AudioFileReader("Audio/Adele - Hello.mp3"); //float[] arr = new float[1024]; //audio.Read(arr, 1024, 1); //var byteArray = new byte[floatArray2.Length * sizeof(float)]; //Buffer.BlockCopy(floatArray2, 0, byteArray, 0, byteArray.Length); //var floatArray = bytes.Select(b => (float)Convert.ToDouble(b)).ToArray(); //var byteArray = floatArray.Select(f => Convert.ToByte(f)).ToArray(); float[] floatBuffer; using (MediaFoundationReader media = new MediaFoundationReader("Audio/Eminem - Venom (Official Music Video).mp3")) { int _byteBuffer32_length = (int)media.Length * 2; int _floatBuffer_length = _byteBuffer32_length / sizeof(float); IWaveProvider stream32 = new Wave16ToFloatProvider(media); WaveBuffer _waveBuffer = new WaveBuffer(_byteBuffer32_length); stream32.Read(_waveBuffer, 0, (int)_byteBuffer32_length); floatBuffer = new float[_floatBuffer_length]; for (int i = 0; i < _floatBuffer_length; i++) { floatBuffer[i] = _waveBuffer.FloatBuffer[i]; } } var pcm = new byte[floatBuffer.Length * 2]; int sampleIndex = 0, pcmIndex = 0; while (sampleIndex < floatBuffer.Length) { var outsample = (short)(floatBuffer[sampleIndex] * short.MaxValue); pcm[pcmIndex] = (byte)(outsample & 0xff); pcm[pcmIndex + 1] = (byte)((outsample >> 8) & 0xff); sampleIndex++; pcmIndex += 2; } Console.WriteLine("Here"); //pluginUtil.AudioProcessing(floatBuffer); using (MemoryStream ms = new MemoryStream(pcm)) //{ // // Construct the sound player // SoundPlayer player = new SoundPlayer(ms); // Console.WriteLine("Playing"); // player.Play(); // Console.WriteLine("End Playing"); //} using (FileStream fs = File.Create("myFile.wav")) { fs.Write(pcm, 0, pcm.Length); } //using (var writer = new LameMP3FileWriter(mp3FileName, reader.WaveFormat, bitRate)) // reader.CopyTo(writer); //File.WriteAllBytes("Audio/eqwad.mp3", pcm); //Console.WriteLine(floatBuffer.Length); //pluginUtil.AudioProcessing(floatBuffer); //Console.WriteLine("Bytes = "+new FloatByteMp3().GetBytes("Audio/Adele - Hello.mp3").Length); //Console.WriteLine("Floats = "+new FloatByteMp3().GetFloat("Audio/Adele - Hello.mp3").Length); //if (byteArray.Equals(bytes)) Console.WriteLine(true); //else //{ // Console.WriteLine(false); //} //File.WriteAllBytes("Audio/a10.mp3", bytes); var buffer = System.Text.Encoding.UTF8.GetBytes(""); response.ContentLength64 = buffer.Length; response.StatusCode = 200; var output = response.OutputStream; output.Write(buffer, 0, buffer.Length); output.Close(); }); } } } }
/// <summary> /// Cast the byte stream into a float buffer and launch the pitch detection, /// the smooth maker and raise events in case if the standard deviation is too high. /// </summary> /// <param name="buffer">Byte buffer corresponding to the raw signal.</param> /// <param name="bytesRecorded">Number of frames in the buffer.</param> public void pitchComputePeak(byte[] buffer, int bytesRecorded) { Stream stream = new MemoryStream(buffer); var reader = new RawSourceWaveStream(stream, recordingFormat); IWaveProvider stream32 = new Wave16ToFloatProvider(reader); PitchWaveProvider streamEffect = new PitchWaveProvider(stream32, this); IWaveProvider stream16 = new WaveFloatTo16Provider(streamEffect); var buffert = new byte[1024]; int bytesRead; do { bytesRead = stream16.Read(buffert, 0, buffert.Length); } while (bytesRead != 0); reader.Close(); stream.Close(); // Calculate the volume of the voice long totalSquare = 0; for (int i = 0; i < buffer.Length; i += 2) { short sample = (short)(buffer[i] | (buffer[i + 1] << 8)); totalSquare += sample * sample; } long meanSquare = 2 * totalSquare / buffert.Length; double rms = Math.Sqrt(meanSquare); double volume = rms / 32768.0; // volume is between 0.0 and 1.0 // I used the average of 8 values to have a better result than just with the volume value tabVol[0] = tabVol[1]; tabVol[1] = tabVol[2]; tabVol[2] = tabVol[3]; tabVol[3] = tabVol[4]; tabVol[4] = tabVol[5]; tabVol[5] = tabVol[6]; tabVol[6] = tabVol[7]; tabVol[7] = volume; volume = tabVol.Average(); // To compare easily the volume value I use an integer volume *= 10; volume = Convert.ToInt32(volume); //wiggle = the WIGGLE_SIZE(= 170) last elenents from pitchList if (pitchList.Count > WIGGLE_SIZE) { for (i = 0; i < WIGGLE_SIZE; i++) { wiggle[i] = pitchList[pitchList.Count - (WIGGLE_SIZE + 1) + i]; } } int newValues = pitchList.Count - oldPitchListSize; oldPitchListSize = pitchList.Count; for (i = newValues; i > 0; i--) { DrawingSheetAvatarViewModel.backgroundXMLVoiceRecordingEventStream?.Invoke(this, pitchList[pitchList.Count - 1 - i]); } this.PitchSmoothing(); if (canSendEvent) { lock (Lock) { double sd1 = StdDevTShort(); double sd2 = StdDevLong(); double sd = StdDev(); // each time the function is called we shift the values tab[0] = tab[1]; tab[1] = tab[2]; tab[2] = tab[3]; tab[3] = tab[4]; tab[4] = tab[5]; tab[5] = tab[6]; tab[6] = tab[7]; tab[7] = sd2; // If the rising is over, permit to don't count a peak as a rising up and down in a row if (tab[0] == 0 || tab[tab.Length - 1] == 0 || (tab[0] <= beginningValue + 5 && tab[0] >= beginningValue - 5 && advancementCounter > 2)) { state = 0; beginningValue = 0; advancementCounter = 0; } else { // If the curve is flat if (state == 0) { // Calculate the wanted index int indMax = localMaximum(tab); int indMin = localMinimum(tab); // If there is a big rising up if (tab[indMax] > tab[0] + peakThreshold) { state = 1; beginningValue = tab[0]; advancementCounter = 0; } // Or if there is a big rising down else if (tab[indMin] < tab[0] - peakThreshold) { state = -1; beginningValue = tab[0]; advancementCounter = 0; } else { beginningValue = 0; advancementCounter = 0; } } // If the curve is growing up or down else { double average = 0.0; // Calculate the average of the seven last value for (int cpt = 1; cpt < tab.Length - 1; cpt++) { average += tab[cpt]; } average /= tab.Length - 1; // The curve is flat, so it reinitialize all if (average > tab[0] - 3 && average < tab[0] + 3) { beginningValue = 0; advancementCounter = 0; state = 0; } // The counter advance else { advancementCounter++; } } } // All the feedback files of the different tests //using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"C:\Users\Public\TestFolder\test.txt", true)) //{ // file.WriteLine("frame n" + k + " -- state : " + state); //} //using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"C:\Users\Public\TestFolder\testShort.txt", true)) //{ // file.WriteLine(sd1); //} //using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"C:\Users\Public\TestFolder\testLong.txt", true)) //{ // file.WriteLine(sd2); //} //using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"C:\Users\Public\TestFolder\testVol.txt", true)) //{ // file.WriteLine("frame n" + k + " -- state : " + volume); //} // For testing the efficient of the peak detection we used the boring event to display the feedback. // ASu has now changed this to use a peak event if (volume >= 2) //volumethreshold) { if (state == 1) //&& !this.sent) { PeakEvent(this, new InstantFeedback("Rising Tone")); //this.sent = true; } if (state == -1) //&& !this.sent) { PeakEvent(this, new InstantFeedback("Falling Tone")); //this.sent = true; } } k++; i++; } } }
public static void DoMain(CdjData cdjdata) { //using NAudio.Wave; //シンプル再生(2つ作れば多重再生可能) IWaveProvider FloatStereo44100Provider; AudioFileReader reader; reader = new AudioFileReader(cdjdata.SOUND); reader.Volume = 0.1F; IWaveProvider stereo; if (reader.WaveFormat.Channels == 1) { if (reader.WaveFormat.Encoding == WaveFormatEncoding.IeeeFloat) { //NAudio.Wave.SampleProviders.MonoToStereoSampleProvider s = new NAudio.Wave.SampleProviders.MonoToStereoSampleProvider(reader); stereo = new Wave16ToFloatProvider(new MonoToStereoProvider16(new WaveFloatTo16Provider(reader))); WaveFormatConversionProvider conv = new WaveFormatConversionProvider(new WaveFormat(44100, 2), stereo); } else if (reader.WaveFormat.Encoding == WaveFormatEncoding.Pcm) { stereo = new Wave16ToFloatProvider(new MonoToStereoProvider16(reader)); } else { return; } } else { stereo = reader; } FloatStereo44100Provider = stereo;//最終的にこの形式に統一44100にするかどうかは検討の余地あり SoundDriver.AddWaveProvider(FloatStereo44100Provider, "main"); SoundDriver.Play(); //while (waveOut.PlaybackState == PlaybackState.Playing) //{ // Application.DoEvents(); // this.Text = reader.CurrentTime.ToString(); //} // 再生の終了を待つ // // 再生の終了を待たずにWaveOutのインスタンスが破棄されると、その時点で再生が停止する //inputAnalogFader inpfader = new inputAnalogFader(); inputMIDIFader inpfader = new inputMIDIFader(midiinput); inputMIDIRecord RecordL = new inputMIDIRecord(CdjData.Left, midiinput); inputMIDIRecord RecordR = new inputMIDIRecord(CdjData.Right, midiinput); inpfader.Initial(); SetDrawObjects(); //GHplaylineL = DX.LoadGraph("playline.png"); //GHplaylineR = DX.LoadGraph("playline2.png"); int MovieGraphHandle; int MovieGraphHandle1, MovieGraphHandle2; MovieGraphHandle1 = DX.LoadGraph("B3_TYPE42.avi"); MovieGraphHandle2 = DX.LoadGraph("E_Map_TYPE01a.avi"); // 描画先を裏画面に変更 DX.SetDrawScreen(DX.DX_SCREEN_BACK); // 画像を左右に動かす処理のための変数を初期化 now.set_startbpm(0, cdjdata.BPM); //now.set_startbpm(DX.GetNowCount(), music.BPM); Random random = new Random(); int dbg = 0; // メインループ while (DX.ProcessMessage() != -1) { //再生終了で抜ける /* * if ((DX.GetJoypadInputState(DX.DX_INPUT_KEY_PAD1) & DX.PAD_INPUT_9) != 0) * { * waveOut.Dispose(); * break; * } * if(waveOut.PlaybackState == PlaybackState.Stopped) * { * waveOut.Dispose(); * break; * } */ // 画面をクリア DX.ClearDrawScreen(); now.settime((int)reader.CurrentTime.TotalMilliseconds); cdjdata.SetStep(now.judgementlinestep); //ムービー処理 { // 画像を描画する座標を更新 if ((DX.GetJoypadInputState(DX.DX_INPUT_KEY_PAD1) & DX.PAD_INPUT_RIGHT) != 0) { MovieGraphHandle = MovieGraphHandle1; } else { MovieGraphHandle = MovieGraphHandle2; } // 画像を描画 //ムービー DX.SetDrawBlendMode(DX.DX_BLENDMODE_NOBLEND, 0); //ムービー調整 if (DX.GetMovieStateToGraph(MovieGraphHandle1) != 1) { DX.SeekMovieToGraph(MovieGraphHandle1, 0); DX.PlayMovieToGraph(MovieGraphHandle1); } if (DX.GetMovieStateToGraph(MovieGraphHandle2) != 1) { DX.SeekMovieToGraph(MovieGraphHandle2, 0); DX.PlayMovieToGraph(MovieGraphHandle2); } } //キューイングのディスクとか再生ラインとかカットイン矢印とかを描く foreach (DiscQueCutData o in cdjdata.lstquedata) { o.Cutin(inputFader.GetCutInState()); if (o.ActiveState == EnumActiveState.NEXT) { if (o.lr == CdjData.Left) { o.Queing(RecordL.DeltaAngle); } else { o.Queing(RecordR.DeltaAngle); } } } if (cdjdata.nowlr == 1 && inputFader.GetFaderState() == EnumFaderState.RIGHT) { //waveOut.Volume = 0; } else if (cdjdata.nowlr == -1 && inputFader.GetFaderState() == EnumFaderState.LEFT) { //waveOut.Volume = 0; } else { //waveOut.Volume = DEF_VOLUME; } RecordL.Update(); RecordR.Update(); inpfader.Update(); midiinput.update(DateTime.Now); //inputFader.GetFaderState(); //---------------------------------------------------------------------------------------- //デバッグ用 DX.DrawString(0, 0, "fader" + inputFader.GetFaderValue(), DX.GetColor(255, 255, 255)); DX.DrawString(0, 20, "cutin" + inputFader.GetCutInState().ToString(), DX.GetColor(255, 255, 255)); DX.DrawString(0, 40, "angle" + RecordR.DeltaAngle.ToString(), DX.GetColor(255, 255, 255)); DX.DrawString(0, 60, "Pos " + midiinput.Pos(-1).ToString(), DX.GetColor(255, 255, 255)); DX.DrawString(0, 80, "Spd " + midiinput.Speed(-1).ToString(), DX.GetColor(255, 255, 255)); //---------------------------------------------------------------------------------------- //描画処理 DoDraw(cdjdata); } }
/// <summary> /// Cast the byte stream into a float buffer and launch the pitch detection, /// the smooth maker and raise events in case if the standard deviation is too high. /// </summary> /// <param name="buffer">Byte buffer corresponding to the raw signal.</param> /// <param name="bytesRecorded">Number of frames in the buffer.</param> public void pitchCompute(byte[] buffer, int bytesRecorded) { Stream stream = new MemoryStream(buffer); var reader = new RawSourceWaveStream(stream, recordingFormat); IWaveProvider stream32 = new Wave16ToFloatProvider(reader); PitchWaveProvider streamEffect = new PitchWaveProvider(stream32, this); IWaveProvider stream16 = new WaveFloatTo16Provider(streamEffect); var buffert = new byte[1024]; int bytesRead; do { bytesRead = stream16.Read(buffert, 0, buffert.Length); } while (bytesRead != 0); reader.Close(); stream.Close(); this.PitchSmoothing(); if (canSendEvent && !MainWindow.main.audioProvider.replayMode) { lock (Lock) { double sd1 = StdDevTShort(); double sd2 = StdDevLong(); double sd = StdDev(); //Console.WriteLine("SD =" + sd + " Threshiold " + Threshold + "sd1 = " + sd1); if ((sd <= Threshold && sd != 0.0) && pitchList.Last() > 0 && !this.sent) { BoringEvent(this, new LongFeedback("", true)); this.sent = true; } if ((sd > Threshold || sd == 0.0) && this.sent) { BoringEvent(this, new LongFeedback("", false)); this.sent = false; } /* There is something wrong with the way the above code. The tooBoringText displays at the beginning of a boring episode * and then disappears after a few seconds. Then it flashes up again at the end of a boring episode for a few seconds * I tried to model the code below on the emotion recognition code which also uses long feedback and responds very quickly * to a change in facial expression. */ // Too much variation /* if (sd > ThresholdVariation && !this.sent && pitchList.Last() > 0) * { * BoringEvent(this, new LongFeedback(tooMuchVariationText, true)); * this.sent = true; * } * if (sd <= ThresholdVariation || sd == 0.0 && this.sent) * { * BoringEvent(this, new LongFeedback(tooMuchVariationText, false)); * this.sent = false; * } */ i++; } } }