public void CanStopEngine() { XAudio2 obj = XAudio2.Create(); obj.StartEngine(); obj.StopEngine(); }
static SoundEffect() { // This cannot fail. Device = new XAudio2(); try { Device.StartEngine(); // Let windows autodetect number of channels and sample rate. MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate); MasterVoice.SetVolume(_masterVolume, 0); // The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout. Speakers = (Speakers)MasterVoice.ChannelMask; } catch { // Release the device and null it as // we have no audio support. Device.Dispose(); Device = null; MasterVoice = null; } }
public void Resume() { if (m_canPlay) { m_audioEngine.StartEngine(); } }
public WaveManager() { xAudio = new XAudio2(); var mastering = new MasteringVoice(xAudio); mastering.SetVolume(1, 0); xAudio.StartEngine(); }
// CONSTRUCXTOR public SoundX(int SampleRate) { this.SampleRate = SampleRate; Stopped = true; if (anyInitFail) { return; } try { frameBuffer = new FrameBuffer <short>(SampleRate / FRAMES_PER_SECOND * CHANNELS, SampleRate / 20); noise = new Noise(SampleRate, MAX_SOUND_AMPLITUDE); xaudio = new XAudio2(); masteringVoice = new MasteringVoice(xaudio, CHANNELS, SampleRate); bufferEndEvent = new AutoResetEvent(false); var frameSizeBytes = SampleRate / FRAMES_PER_SECOND * CHANNELS * 2; for (int i = 0; i < RING_SIZE; i++) { audioBuffersRing[i] = new AudioBuffer() { AudioBytes = frameSizeBytes, LoopCount = 0, Flags = BufferFlags.None, }; memBuffers[i].Size = frameSizeBytes; memBuffers[i].Pointer = Utilities.AllocateMemory(memBuffers[i].Size); } sourceVoice = new SourceVoice(xaudio, new WaveFormat(SampleRate, BITS_PER_SAMPLE, CHANNELS), true); xaudio.StartEngine(); sourceVoice.BufferEnd += (o) => bufferEndEvent?.Set(); sourceVoice.Start(); playingTask = Task.Factory.StartNew(Loop, TaskCreationOptions.LongRunning); enabled = false; on = false; mute = false; Stopped = false; } catch (Exception) { anyInitFail = true; enabled = false; Stopped = true; } }
/// <summary> /// /// </summary> public override void Initialize() { try { if (Device == null) { Device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor, XAudio2Version.Version27); Device.StartEngine(); } var DeviceFormat = Device.GetDeviceDetails(0).OutputFormat; // Just use the default device. const int deviceId = 0; if (MasterVoice == null) { // Let windows autodetect number of channels and sample rate. MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate, deviceId); MasterVoice.SetVolume(_masterVolume, 0); } // The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout. var deviceDetails = Device.GetDeviceDetails(deviceId); Speakers = deviceDetails.OutputFormat.ChannelMask; var dev3d = Device3D; Log.Debug("Audio devices :"); for (int devId = 0; devId < Device.DeviceCount; devId++) { var device = Device.GetDeviceDetails(devId); Log.Debug("[{1}] {0}", device.DisplayName, devId); Log.Debug(" role : {0}", device.Role); Log.Debug(" id : {0}", device.DeviceID); } } catch (Exception e) { Log.Error(e.ToString()); // Release the device and null it as // we have no audio support. if (Device != null) { Device.Dispose(); Device = null; } MasterVoice = null; } soundWorld = new SoundWorld(Game); }
/// <summary> /// Initializes XAudio. /// </summary> internal static void PlatformInitialize() { try { if (Device == null) { #if !WINDOWS_UAP && DEBUG try { //Fails if the XAudio2 SDK is not installed Device = new XAudio2(XAudio2Flags.DebugEngine, ProcessorSpecifier.DefaultProcessor); Device.StartEngine(); } catch #endif { Device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor); Device.StartEngine(); } } // Just use the default device. #if WINDOWS_UAP string deviceId = null; #else const int deviceId = 0; #endif if (MasterVoice == null) { // Let windows autodetect number of channels and sample rate. MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate); } // The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout. #if WINDOWS_UAP Speakers = (Speakers)MasterVoice.ChannelMask; #else Speakers = Device.Version == XAudio2Version.Version27 ? Device.GetDeviceDetails(deviceId).OutputFormat.ChannelMask: (Speakers)MasterVoice.ChannelMask; #endif } catch { // Release the device and null it as // we have no audio support. if (Device != null) { Device.Dispose(); Device = null; } MasterVoice = null; } }
/// <summary> /// Инициализирует новый экзепляр класса <see cref="SoundService"/>. /// </summary> public SoundService() { cachedBuffers = new Dictionary <string, AudioBufferAndMetaData>(); lockObject = new object(); xAudio = new XAudio2(); masteringVoice = new MasteringVoice(xAudio); masteringVoice.SetVolume(1, 0); xAudio.StartEngine(); }
private static void InitializeXAudio2() { // This is mandatory when using any of SharpDX.MediaFoundation classes MediaManager.Startup(); // Starts The XAudio2 engine xaudio2 = new XAudio2(); xaudio2.StartEngine(); masteringVoice = new MasteringVoice(xaudio2); }
public AudioFx() { _xaudio2 = new XAudio2(); Task.Run(() => { _xaudio2.StartEngine(); _masteringVoice = new MasteringVoice(_xaudio2); }); _cues = new List <Cue>(); }
static SoundEffect() { Device = new XAudio2(); Device.StartEngine(); // Let windows autodetect number of channels and sample rate. MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate); MasterVoice.SetVolume(_masterVolume, 0); // The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout. Speakers = (Speakers)MasterVoice.ChannelMask; }
public XAudio2Mixer() { _xAudio = new XAudio2(); _xAudio.StartEngine(); _masteringVoice = new MasteringVoice(_xAudio); _buffers = new XAudioBuffer[NumBuffers]; for (var i = 0; i < NumBuffers; i++) { _buffers[i] = new XAudioBuffer(BufferSize); } }
public SoundManager() { try { xAudio = new XAudio2(); xAudio.StartEngine(); new MasteringVoice(xAudio).SetVolume(1); } catch (Exception ex) { Program.DumpExceptionTo(ex, "sound_exception.json"); IsSupported = false; } }
private void Form1_Load(object sender, EventArgs e) { xAudio2 = new XAudio2(); xAudio2.StartEngine(); masteringVoice = new MasteringVoice(xAudio2); textBox2.Text = "Nagrywanie wylaczone"; MMDeviceEnumerator enumerator = new MMDeviceEnumerator(); MMDeviceCollection audioDevices = enumerator.EnumerateAudioEndPoints(DataFlow.Render, DeviceState.Active); comboBox2.Items.AddRange(audioDevices.ToArray()); comboBox3.Items.Add("Bez"); comboBox3.Items.Add("Echo"); comboBox3.Items.Add("Reverb"); comboBox3.SelectedIndex = 0; }
static SoundEffect() { var flags = XAudio2Flags.None; #if !WINRT && DEBUG flags |= XAudio2Flags.DebugEngine; #endif try { // This cannot fail. Device = new XAudio2(flags, ProcessorSpecifier.DefaultProcessor); Device.StartEngine(); // Just use the default device. #if WINRT string deviceId = null; #else const int deviceId = 0; #endif // Let windows autodetect number of channels and sample rate. MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate, deviceId); MasterVoice.SetVolume(_masterVolume, 0); // The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout. #if WINRT Speakers = (Speakers)MasterVoice.ChannelMask; #else var deviceDetails = Device.GetDeviceDetails(deviceId); Speakers = deviceDetails.OutputFormat.ChannelMask; #endif } catch { // Release the device and null it as // we have no audio support. if (Device != null) { Device.Dispose(); Device = null; } MasterVoice = null; } }
public SoundManager(System.IntPtr handle, short BitsPerSample, short Channels, int SamplesPerSecond) { System.AppDomain.CurrentDomain.AssemblyResolve += new System.ResolveEventHandler(CurrentDomain_AssemblyResolve); SlimDX.Multimedia.WaveFormat format = new SlimDX.Multimedia.WaveFormat(); format.BitsPerSample = BitsPerSample; format.Channels = Channels; format.SamplesPerSecond = SamplesPerSecond; format.BlockAlignment = (short)(format.Channels * format.BitsPerSample / 8); format.AverageBytesPerSecond = format.SamplesPerSecond * format.BlockAlignment; //format.FormatTag = WaveFormatTag.Pcm; format.FormatTag = SlimDX.Multimedia.WaveFormatTag.Pcm; device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.AnyProcessor); device.StartEngine(); masteringVoice = new MasteringVoice(device, Channels, SamplesPerSecond); sourceVoice = new SourceVoice(device, format, VoiceFlags.None); //FilterParameters fp = new FilterParameters(); //fp.Frequency = 0.5f;//sourceVoice.FilterParameters.Frequency; //fp.OneOverQ = 0.5f;//sourceVoice.FilterParameters.OneOverQ; //fp.Type = FilterType.LowPassFilter; //sourceVoice.FilterParameters = fp; //sourceVoice.BufferEnd += new System.EventHandler<ContextEventArgs>(sourceVoice_BufferEnd); // sourceVoice.StreamEnd += new System.EventHandler(sourceVoice_StreamEnd); // sourceVoice.BufferStart += new System.EventHandler<ContextEventArgs>(sourceVoice_BufferStart); // sourceVoice.VoiceError += new EventHandler<ErrorEventArgs>(sourceVoice_VoiceError); sourceVoice.Volume = 0.5f; buffer = new AudioBuffer(); buffer.AudioData = new System.IO.MemoryStream(); waveFormat = format; bytesPerSample = (waveFormat.BitsPerSample / 8) * Channels; for (int i = 0; i < BUFFER_COUNT; i++) { //sampleData[i] = new float[SAMPLE_SIZE * Channels]; sampleData[i] = new short[SAMPLE_SIZE * Channels]; bData[i] = new byte[SAMPLE_SIZE * bytesPerSample]; } sourceVoice.SubmitSourceBuffer(buffer); }
public SoundController(string dir) { soundsDir = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, dir); if (!Directory.Exists(soundsDir)) { initialized = false; DebugWindow.LogError("Sounds dir not found, continue working without any sound."); return; } xAudio2 = new XAudio2(); xAudio2.StartEngine(); masteringVoice = new MasteringVoice(xAudio2); /*var reverb = new Reverb(xAudio2); * var effectDescriptor = new EffectDescriptor(reverb); * masteringVoice.SetEffectChain(effectDescriptor); * masteringVoice.EnableEffect(0);*/ var soundFiles = Directory.GetFiles(soundsDir, "*.wav"); Sounds = new Dictionary <string, MyWave>(soundFiles.Length); /* * foreach (var file in soundFiles) * { * var fileInfo = new FileInfo(file); * var soundStream = new SoundStream(File.OpenRead(file)); * var waveFormat = soundStream.Format; * * var buffer = new AudioBuffer() * { * Stream = soundStream.ToDataStream(), AudioBytes = (int) soundStream.Length, Flags = BufferFlags.EndOfStream * }; * soundStream.Close(); * Sounds[fileInfo.Name.Split('.').First()] = new MyWave() * { * Buffer = buffer, WaveFormat = waveFormat, DecodedPacketsInfo = soundStream.DecodedPacketsInfo * }; * } */ initialized = true; }
public AudioFx(BaseGame game) { Game = game; Effects = new XAudio2(); Task.Run(() => { Effects.StartEngine(); _effectsVoice = new MasteringVoice(Effects); _effectsVoice.SetVolume(EffectVolume); if (!EnableAudio) { _effectsVoice.SetVolume(0); } }); _effectCues = new List <Cue>(); Music = new XAudio2(); Task.Run(() => { Music.StartEngine(); _musicVoice = new MasteringVoice(Music); _musicVoice.SetVolume(MusicVolume); if (!EnableAudio) { _musicVoice.SetVolume(0); } }); _musicCues = new List <Cue>(); Synth = new XAudio2(); Task.Run(() => { Synth.StartEngine(); _synthVoice = new MasteringVoice(Synth); _synthVoice.SetVolume(SynthVolume); if (!EnableAudio) { _synthVoice.SetVolume(0); } }); _musicCues = new List <Cue>(); _speechSynthesizer = new SpeechSynthesizer(); }
internal static void InitializeSoundEffect() { try { if (Device == null) { { Device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor); Device.StartEngine(); } } // Just use the default device. //string deviceId = null; if (MasterVoice == null) { // Let windows autodetect number of channels and sample rate. MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate); } // The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout. //Speakers = (Speakers)MasterVoice.ChannelMask; Speakers = SharpDX.Multimedia.Speakers.Stereo; } catch { // Release the device and null it as // we have no audio support. if (Device != null) { Device.Dispose(); Device = null; } MasterVoice = null; } }
public BrowserForm() { InitializeComponent(); var examples = new Folder { Text = "Examples", Path = @"..\..\Examples" }; treeView.Nodes.Add(examples); Audio = new XAudio2(); Audio.StartEngine(); Master = new MasteringVoice(Audio); var format = new WaveFormat(32000, 16, 1); Voice = new SourceVoice(Audio, format, VoiceFlags.None, 4.0f); Voice.BufferEnd += Voice_BufferEnd; Voice.Start(); }
public void PlayPPM(IntPtr win) { Rate = 192000; //44100 on cheapo, 96000 on AC97, 192000 on HD Audio // its the number of samples that exist for each second of audio channels = 2; // 1 = mono, 2 = stereo PPMSamples = (int)(0.0225 * Rate * channels); // 22 or 22.5ms in samples, rounded up // no. of bytes per second = channels * rate * bytes in one sample microsec = Rate / 10000.0; // 192 = 1ms, 19.2 = 0.1ms or 1mis @ 192khz PPMchannels = new Dictionary<int, double>(); frame = new List<short>(); Amplitude = 32760; /*WaveFile wFile; wFile = new WaveFile(channels, 16, Rate); */ //Set channels to neutral except throttle, throttle = zero. PPMchannels.Add(1, 10.0); //Throttle PPMchannels.Add(2, 50.0); //Ailerons PPMchannels.Add(3, 50.0); //Stab PPMchannels.Add(4, 50.0); //Rudder PPMchannels.Add(5, 50.0); PPMchannels.Add(6, 50.0); PPMchannels.Add(7, 50.0); PPMchannels.Add(8, 50.0); byte[] data = GenPPM(); /*wFile.SetData(data, data.Length); wFile.WriteFile(@"C:\Users\kang\Desktop\test.wav"); */ ms = new MemoryStream(); ms.SetLength(0); ms.Write(data, 0, data.Length); ms.Position = 0; wf = new WaveFormat(); wf.FormatTag = WaveFormatTag.Pcm; wf.BitsPerSample = (short)16; wf.Channels = channels; wf.SamplesPerSecond = Rate; wf.BlockAlignment = (short)(wf.Channels * wf.BitsPerSample / 8); wf.AverageBytesPerSecond = wf.SamplesPerSecond * wf.BlockAlignment; device = new XAudio2(); device.StartEngine(); masteringVoice = new MasteringVoice(device); srcVoice = new SourceVoice(device, wf); buffer = new AudioBuffer(); buffer.AudioData = ms; buffer.AudioBytes = (int)data.Length; buffer.Flags = SlimDX.XAudio2.BufferFlags.None; srcVoice.BufferStart += new EventHandler<ContextEventArgs>(srcVoice_BufferStart); srcVoice.FrequencyRatio = 1; srcVoice.SubmitSourceBuffer(buffer); srcVoice.Start(); }
public static void Main(string[] args) { xaudio2 = new XAudio2(); xaudio2.StartEngine(); var masteringVoice = new MasteringVoice(xaudio2); if (!string.IsNullOrEmpty(Properties.Settings.Default.BackgroundMusicPath) && Directory.Exists(Properties.Settings.Default.BackgroundMusicPath)) { var musicFiles = Directory.GetFiles(Properties.Settings.Default.BackgroundMusicPath, "*.wav"); if (musicFiles.Length > 0) { backgroundPlayer = new TrackPlayer(xaudio2, musicFiles); } } var listener = new UdpClient(10009); listener.BeginReceive(new AsyncCallback(ReceiveCallback), listener); effectManager = new EffectManager(xaudio2, 4, Properties.Settings.Default.FXPath); // Wait until its done int count = 1; while (true) { Thread.Sleep(10); if (Console.KeyAvailable) { var key = Console.ReadKey(); if (key.Key == ConsoleKey.Escape) { break; } switch (key.Key) { case ConsoleKey.A: effectManager.Play("Scream.wav"); break; case ConsoleKey.B: effectManager.Play("Violin screech.wav"); break; case ConsoleKey.N: if (backgroundPlayer != null) { backgroundPlayer.NextTrack(); } break; case ConsoleKey.V: if (key.Modifiers.HasFlag(ConsoleModifiers.Shift)) { backgroundVolume -= 0.1f; } else { backgroundVolume += 0.1f; } if (backgroundVolume < 0f) { backgroundVolume = 0f; } if (backgroundVolume > 1f) { backgroundVolume = 1f; } break; } } var muteMusic = effectManager.AreAnyPlaying && autoMuteBackground ? 0.2f : 0f; if (backgroundPlayer != null) { backgroundPlayer.Volume = backgroundVolume - muteMusic; } if (count % 50 == 0) { Console.Write("."); Console.Out.Flush(); } Thread.Sleep(10); count++; } listener.Close(); if (backgroundPlayer != null) { backgroundPlayer.Stop(); } if (trackPlayer != null) { trackPlayer.Stop(); } effectManager.Dispose(); Thread.Sleep(500); masteringVoice.Dispose(); xaudio2.StopEngine(); xaudio2.Dispose(); }
/// <summary> /// Initializes XAudio2 and MasteringVoice. And registers itself as an <see cref="IContentReaderFactory"/> /// </summary> /// <exception cref="InvalidOperationException">Is thrown when the IContentManager is not an instance of <see cref="ContentManager"/>.</exception> /// <exception cref="AudioException">Is thrown when the <see cref="AudioManager"/> instance could not be initialized (either due to unsupported features or missing audio-device).</exception> public override void Initialize() { base.Initialize(); contentManager = Content as ContentManager; if (contentManager == null) { throw new InvalidOperationException("Unable to initialize AudioManager. Expecting IContentManager to be an instance of ContentManager"); } try { #if DEBUG && !WIN8METRO && !WP8 && !DIRECTX11_1 try { // "XAudio2Flags.DebugEngine" is supported only in XAudio 2.7, but not in newer versions // msdn.microsoft.com/en-us/library/windows/desktop/microsoft.directx_sdk.xaudio2.xaudio2create(v=vs.85).aspx Device = new XAudio2(XAudio2Flags.DebugEngine, ProcessorSpecifier.DefaultProcessor); Device.StartEngine(); } catch (Exception) #endif { Device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor); Device.StartEngine(); } } catch (SharpDXException ex) { DisposeCore(); throw new AudioException("Error creating XAudio device.", ex); } #if !W8CORE && !DIRECTX11_1 if (Device.DeviceCount == 0) { DisposeCore(); throw new AudioException("No default audio devices detected."); } #endif #if W8CORE || DIRECTX11_1 string deviceId = null; #else const int deviceId = 0; #endif try { MasteringVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate, deviceId); } catch (SharpDXException ex) { DisposeCore(); #if W8CORE if (ex.ResultCode == AudioManager.NotFound) { throw new AudioException("No default audio devices detected."); } else #endif { throw new AudioException("Error creating mastering voice.", ex); } } MasteringVoice.SetVolume(masterVolume); #if W8CORE || DIRECTX11_1 Speakers = (Speakers)MasteringVoice.ChannelMask; #else var deviceDetails = Device.GetDeviceDetails(deviceId); Speakers = deviceDetails.OutputFormat.ChannelMask; #endif if (IsMasteringLimiterEnabled) { try { CreateMasteringLimitier(); } catch (Exception) { DisposeCore(); throw; } } if (IsSpatialAudioEnabled) { try { x3DAudio = new X3DAudio(Speakers, speedOfSound); } catch (Exception) { DisposeCore(); throw; } } if (IsReverbEffectEnabled) { try { CreateReverbSubmixVoice(); } catch (Exception) { DisposeCore(); throw; } } contentManager.ReaderFactories.Add(new AudioContentReaderFactory()); }
private void Initialize(string SoundDeviceName, int maxVoicesNbr) { //Default Xaudio2 objects ========== _xaudio2 = ToDispose(new XAudio2()); if (SoundDeviceName == null) { _deviceDetail = _xaudio2.GetDeviceDetails(0); } _soundDevices = new List <string>(); int customDeviceId = 0; //Get all sound devices for (int i = 0; i < _xaudio2.DeviceCount; i++) { _soundDevices.Add(_xaudio2.GetDeviceDetails(i).DisplayName); if (SoundDeviceName == _xaudio2.GetDeviceDetails(i).DisplayName) { _deviceDetail = _xaudio2.GetDeviceDetails(i); customDeviceId = i; } } logger.Info("s33m3 sound engine started for device : " + _deviceDetail.DisplayName); _x3DAudio = new X3DAudio(_deviceDetail.OutputFormat.ChannelMask); if (SoundDeviceName == null) { _masteringVoice = ToDispose(new MasteringVoice(_xaudio2, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate, 0)); } else { _masteringVoice = ToDispose(new MasteringVoice(_xaudio2, _deviceDetail.OutputFormat.Channels, _deviceDetail.OutputFormat.SampleRate, customDeviceId)); } //Default state values ============= _maxVoicePoolPerFileType = maxVoicesNbr; _soundDataSources = new Dictionary <string, ISoundDataSource>(); _soundVoices = new Dictionary <int, ISoundVoice[]>(); _soundProcessingQueue = new List <ISoundVoice>(); _listener = new Listener(); //Start Sound voice processing thread _syncro = new ManualResetEvent(false); _d3dEngine.RunningThreadedWork.Add("SoundEngine"); _d3dEngine.OnShuttingDown += d3dEngine_OnShuttingDown; _thread = new Thread(DataSoundPocessingAsync) { Name = "SoundEngine" }; //Start the main loop _stopThreading = false; _thread.Start(); GeneralSoundVolume = 1.0f; GlobalMusicVolume = 1; GlobalFXVolume = 1; _xaudio2.StartEngine(); }
static void Main(string[] args) { Keys = BassKeys; KeyNotes = BassKeyNotes; KeyOctaves = BassKeyOctaves; var devices = Midi.midiInGetNumDevs(); var deviceHandle = IntPtr.Zero; var deviceCaps = new Midi.MidiInCaps(); for (var device = 0U; device < devices; device++) { Midi.midiInOpen(out deviceHandle, device, MidiProc, IntPtr.Zero, Midi.CALLBACK_FUNCTION); Midi.midiInGetDevCaps(deviceHandle, ref deviceCaps, (uint)Marshal.SizeOf(deviceCaps)); Console.WriteLine(deviceCaps.name); Midi.midiInStart(deviceHandle); } var input = new DirectInput(); var keyboard = new Keyboard(input); keyboard.Acquire(); var audio = new XAudio2(); audio.StartEngine(); var master = new MasteringVoice(audio); var format = new WaveFormat(44100, 16, 1); var source = new SourceVoice(audio, format); BufferEnd = new AutoResetEvent(false); source.BufferEnd += Source_BufferEnd; source.Start(); var buffers = new AudioBuffer[2]; var pointers = new DataPointer[buffers.Length]; for (int buffer = 0; buffer < buffers.Length; buffer++) { pointers[buffer] = new DataPointer(Utilities.AllocateClearedMemory(1024), 1024); buffers[buffer] = new AudioBuffer(pointers[buffer]); source.SubmitSourceBuffer(buffers[buffer], null); } var index = 0; var data = new byte[1024]; var time = 0.0; var keyboardState = new KeyboardState(); while (true) { BufferEnd.WaitOne(); keyboard.GetCurrentState(ref keyboardState); for (int x = 0; x < data.Length; x += 2) { var delta = 1.0 / format.SampleRate; var value = 0d; var count = 0; for (var note = 24; note < MidiNotes.Length; note++) { MidiNotes[note] = false; } for (var key = 0; key < Keys.Length; key++) { var noteIndex = 24 + (KeyOctaves[key] * 12) + KeyNotes[key]; if (keyboardState.IsPressed(Keys[key])) { MidiNotes[noteIndex] = true; MidiVelocity[noteIndex] = 1.0f; } } for (var note = 24; note < MidiNotes.Length; note++) { if (MidiNotes[note]) { if (NoteVelocity[note] >= 1.0 - (Attack * delta)) { NoteVelocity[note] = 1.0f; } else { NoteVelocity[note] += (Attack * delta); } } else { if (NoteVelocity[note] <= (Release * delta)) { NoteVelocity[note] = 0.0f; } else { NoteVelocity[note] -= (Release * delta); } } } for (var octave = 0; octave < 8; octave++) { for (var note = 0; note < 12; note++) { var noteIndex = 24 + (octave * 12) + note; if (NoteVelocity[noteIndex] != 0.0) { value += Waves.Sine(time, Notes[note] * MidiOctaves[octave], 0.0) * MidiVelocity[noteIndex] * NoteVelocity[noteIndex]; //value += Waves.Square(time, Notes[note] * MidiOctaves[octave], 0.0) * MidiVelocity[noteIndex] * NoteVelocity[noteIndex]; //value += Waves.Triangle(time, Notes[note] * MidiOctaves[octave], 0.0) * MidiVelocity[noteIndex] * NoteVelocity[noteIndex]; value += Waves.Sawtooth(time, Notes[note] * MidiOctaves[octave], 0.0) * MidiVelocity[noteIndex] * NoteVelocity[noteIndex]; count++; } } } var value2 = (short)((value / 10.0) * short.MaxValue); data[x] = (byte)(value2 & 0xff); data[x + 1] = (byte)(value2 >> 8); time += delta; } pointers[index].CopyFrom(data); source.SubmitSourceBuffer(buffers[index], null); index++; if (index == buffers.Length) { index = 0; } } }
void OpenDevice() { try { // 相关内容已在Version28中移除,但仍建议保留这块代码,请优先选择XAudio2 // 详见 https://docs.microsoft.com/en-us/windows/win32/xaudio2/xaudio2-versions _xaudio2 = new XAudio2(XAudio2Version.Version27); for (int i = 0; i < _xaudio2.DeviceCount; i++) { DeviceDetails device = _xaudio2.GetDeviceDetails(i); if (device.Role == DeviceRole.GlobalDefaultDevice) { _dev = device.DeviceID; break; } } _xaudio2.Dispose(); } catch { var enumerator = new MMDeviceEnumerator(); var device = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console); _dev = device?.ID; } Debug.Assert(_xaudio2 == null || _xaudio2.IsDisposed);// 如果不是可能会导致内存溢出 if (_dev == null) { // 在CreateMasteringVoice时将szDeviceId指定默认值NULL会使XAudio2选择全局默认音频设备 // 由于之前我们已经主动获取设备ID了,为了避免出现意外情况,这里直接抛错就行了 throw new NotSupportedException("没有扬声器"); } _xaudio2 = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor); /* * We use XAUDIO2_DEFAULT_CHANNELS instead of _channels. On * Xbox360, this means 5.1 output, but on Windows, it means "figure out * what the system has." It might be preferable to let XAudio2 blast * stereo output to appropriate surround sound configurations * instead of clamping to 2 channels, even though we'll configure the * Source Voice for whatever number of channels you supply. */ _masteringVoice = new MasteringVoice(_xaudio2, XAUDIO2_DEFAULT_CHANNELS, _sampleRate, _dev); _waveFormat = new WaveFormatEx(SDL_AudioFormat.F32, _channels, _sampleRate); _sourceVoice = new SourceVoice(_xaudio2 , _waveFormat , VoiceFlags.NoSampleRateConversion | VoiceFlags.NoPitch , 1.0f //, Callbacks.Instance); , true); _sourceVoice.BufferEnd += OnBufferEnd; _sourceVoice.VoiceError += OnVoiceError; _bufferSize = _waveFormat.BlockAlign * _samples; //_hidden.handle = GCHandle.Alloc(this); //_hidden.device = GCHandle.ToIntPtr(_hidden.handle); _hidden.semaphore = new Semaphore(1, 1); // We feed a Source, it feeds the Mastering, which feeds the device. _hidden.mixlen = _bufferSize; _hidden.mixbuf = (byte *)Marshal.AllocHGlobal(2 * _hidden.mixlen); _hidden.nextbuf = _hidden.mixbuf; Native.SetMemory(_hidden.mixbuf, _waveFormat.Silence, (size_t)(2 * _hidden.mixlen)); // Pre-allocate buffers _hidden.audioBuffersRing = new AudioBuffer[2]; for (int i = 0; i < _hidden.audioBuffersRing.Length; i++) { _hidden.audioBuffersRing[i] = new AudioBuffer(); } // Start everything playing! _xaudio2.StartEngine(); _sourceVoice.Start(XAUDIO2_COMMIT_NOW); }