示例#1
0
 private static MMDevice GetAudioMultiMediaEndpoint(DataFlow dataFlow, Role role)
 {
     using (var enumerator = new MMDeviceEnumerator())
     {
         return(enumerator.GetDefaultAudioEndpoint(dataFlow, role));
     }
 }
 public void CanGetDefaultAudioEndpoint()
 {
     OSUtils.RequireVista();
     MMDeviceEnumerator enumerator = new MMDeviceEnumerator();
     MMDevice defaultAudioEndpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
     Assert.IsNotNull(defaultAudioEndpoint);
 }
示例#3
0
        public override void Update()
        {
            // TODO: Get from settings
            var fps = 25;

            if (VolumeDisplay == null)
                return;
            if (VolumeDisplay.Ttl < 1)
                return;

            var decreaseAmount = 500/fps;
            VolumeDisplay.Ttl = VolumeDisplay.Ttl - decreaseAmount;
            if (VolumeDisplay.Ttl < 128)
                VolumeDisplay.Transparancy = (byte) (VolumeDisplay.Transparancy - 20);

            try
            {
                var enumerator = new MMDeviceEnumerator();
                var volumeFloat =
                    enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console)
                        .AudioEndpointVolume.MasterVolumeLevelScalar;
                VolumeDisplay.Volume = (int) (volumeFloat*100);
            }
            catch (COMException)
            {
            }
        }
    public static void SwitchMute()
    {
        NAudio.CoreAudioApi.MMDeviceEnumerator MMDE = new NAudio.CoreAudioApi.MMDeviceEnumerator();
        //Get all the devices, no matter what condition or status
        NAudio.CoreAudioApi.MMDevice dev = MMDE.GetDefaultAudioEndpoint(NAudio.CoreAudioApi.DataFlow.Render, NAudio.CoreAudioApi.Role.Communications);

        dev.AudioEndpointVolume.Mute = !dev.AudioEndpointVolume.Mute;
    }
 public MainWindowViewModel()
 {
     synchronizationContext = SynchronizationContext.Current;
     var enumerator = new MMDeviceEnumerator();
     CaptureDevices = enumerator.EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.Active).ToArray();
     var defaultDevice = enumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console);
     SelectedDevice = CaptureDevices.FirstOrDefault(c => c.ID == defaultDevice.ID);
 }
    public static void VolumeDown(int parDownPercent)
    {
        NAudio.CoreAudioApi.MMDeviceEnumerator MMDE = new NAudio.CoreAudioApi.MMDeviceEnumerator();
        //Get all the devices, no matter what condition or status
        NAudio.CoreAudioApi.MMDevice dev = MMDE.GetDefaultAudioEndpoint(NAudio.CoreAudioApi.DataFlow.Render, NAudio.CoreAudioApi.Role.Communications);

        dev.AudioEndpointVolume.VolumeStepDown();
    }
示例#7
0
        /// <summary>
        /// Constructor for device panel creation.
        /// </summary>
        public VolumePanel()
        {
            this.devicePanel = true;
            var deviceEnumerator = new MMDeviceEnumerator();
            device = deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);

            InitializeComponent();
        }
 public void CanGetDefaultAudioEndpoint()
 {
     if (Environment.OSVersion.Version.Major >= 6)
     {
         MMDeviceEnumerator enumerator = new MMDeviceEnumerator();
         MMDevice defaultAudioEndpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
         Assert.IsNotNull(defaultAudioEndpoint);
     }            
 }
示例#9
0
 static MMDevice GetDefaultAudioEndpoint()
 {
     if (Environment.OSVersion.Version.Major < 6)
     {
         throw new NotSupportedException("WASAPI supported only on Windows Vista and above");
     }
     var enumerator = new MMDeviceEnumerator();
     return enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
 }
示例#10
0
 public Recorder()
 {
     wavein = new WaveIn();
     wavein.DeviceNumber = 0;
     wavein.WaveFormat = new WaveFormat();
     devEnum = new MMDeviceEnumerator();
     defaultDevice = devEnum.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Multimedia);
     wavein.DataAvailable += Wavein_DataAvailable;
     checkFolders();
 }
    public RealTimeSoundData() {

      var enumerator = new MMDeviceEnumerator();
      var captureDevices = enumerator.EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.Active).ToArray();
      var defaultDevice = enumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console);
      device = captureDevices.FirstOrDefault(c => c.ID == defaultDevice.ID);
      capture = new WasapiCapture(device);
      context = SynchronizationContext.Current;
      capture.DataAvailable += Capture_DataAvailable;
    }
 public WasapiCaptureViewModel()
 {
     synchronizationContext = SynchronizationContext.Current;
     var enumerator = new MMDeviceEnumerator();
     CaptureDevices = enumerator.EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.Active).ToArray();
     var defaultDevice = enumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console);
     SelectedDevice = CaptureDevices.FirstOrDefault(c => c.ID == defaultDevice.ID);
     RecordCommand = new DelegateCommand(Record);
     StopCommand = new DelegateCommand(Stop) { IsEnabled = false };
     RecordingsViewModel = new RecordingsViewModel();
 }
示例#13
0
        public VolumeControl()
        {
            InitializeComponent();
            MMDeviceEnumerator deviceEnumerator = new MMDeviceEnumerator();
            device = deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
            tbVolume.Value = (int)(Math.Round(device.AudioEndpointVolume.MasterVolumeLevelScalar * 100));

            if (device.AudioEndpointVolume.Mute)
            {
                btnMuteUnmute.Image = Properties.Resources.Mute;
            }
        }
示例#14
0
 /// <summary>
 ///     Инициализация основного аудио устройства
 /// </summary>
 private void InitDevice()
 {
     try
     {
         var devEnum = new MMDeviceEnumerator();
         _audioDevice = devEnum.GetDefaultAudioEndpoint(0, (Role) 1);
     }
     catch (Exception e)
     {
         MessageBox.Show("Error: " + e.Message, "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
     }
 }
示例#15
0
        private NAudioEngine()
        {
            sampleAggregator = new SampleAggregator(fftDataSize);

            var deviceEnumerator = new MMDeviceEnumerator();
            var defaultDevice = deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
            this.capture = new WasapiLoopbackCapture(defaultDevice);
            capture.ShareMode = AudioClientShareMode.Shared;

            capture.DataAvailable += CaptureOnDataAvailable;

            capture.StartRecording();
        }
示例#16
0
 public static bool IsMuted()
 {
     bool isMuted = false;
     try
     {
         MMDeviceEnumerator DevEnum = new MMDeviceEnumerator();
         MMDevice device = DevEnum.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
         isMuted = device.AudioEndpointVolume.Mute;                
     }
     catch (Exception)
     {
     }
     return isMuted;
 }
示例#17
0
        private static void LogAudioDevices()
        {
            Console.Out.WriteLine("Looking for audio capture devices:");
              for (int n = 0; n < WaveIn.DeviceCount; n++)
              {
            var dev = WaveIn.GetCapabilities(n);
            Console.Out.WriteLine("  Device: {0}, channels={1}", dev.ProductName, dev.Channels);
              }

              Console.Out.WriteLine("Looking for audio MMD:");
              var mmd = new MMDeviceEnumerator();
              var mddDev = mmd.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
              Console.Out.WriteLine("  [default] {0} {1}", mddDev.ID, mddDev.FriendlyName);
        }
示例#18
0
 public static int GetVolume()
 {
     int result = 100;
     try
     {
         MMDeviceEnumerator DevEnum = new MMDeviceEnumerator();
         MMDevice device = DevEnum.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
         //device.AudioEndpointVolume.Mute
         result = (int)(device.AudioEndpointVolume.MasterVolumeLevelScalar * 100);
     }
     catch (Exception)
     {
     }
     return result;
 }
示例#19
0
        protected override void OnInitialized(EventArgs e)
        {
            base.OnInitialized(e);

            ico_trayIcon.TrayMouseDoubleClick += Ico_trayIcon_TrayMouseDoubleClick;

            NAudio.CoreAudioApi.MMDeviceEnumerator enumer = new NAudio.CoreAudioApi.MMDeviceEnumerator();
            dev = enumer.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
            dev.AudioEndpointVolume.OnVolumeNotification += AudioEndpointVolume_OnVolumeNotification;

            VolumeActual = (int)(dev.AudioEndpointVolume.MasterVolumeLevelScalar * 100);
            VolumeMax    = DEFAULT_MAX_VOL;

            slider_maxVol.Value = VolumeMax;
        }
示例#20
0
 public static void Update()
 {
     if (device == null)
     {
         MMDeviceEnumerator de = new MMDeviceEnumerator();
         device = de.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
     }
     if (ts == null)
     {
         ts = new ThreadStart(getVolume);
     }
     if (!threadRunning)
     {
         thread = new Thread(ts);
         threadRunning = true;
         thread.Start();
     }
 }
示例#21
0
        private void t_volume_Tick(object sender, EventArgs e)
        {
            var device = new MMDeviceEnumerator();
            var mic = device.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Multimedia);
            var vol = (int) (mic.AudioMeterInformation.MasterPeakValue*100);

            pb_volume.Value = vol;
            l_volume.Text = vol + "%";

            if (vol >= warnVol && warnVol != 0 && !isActiv)
            {
                switch (Properties.Settings.Default.warnTone)
                {
                    case 0:
                        SystemSounds.Asterisk.Play();
                        break;
                    case 1:
                        SystemSounds.Beep.Play();
                        break;
                    case 2:
                        SystemSounds.Exclamation.Play();
                        break;
                    case 3:
                        SystemSounds.Hand.Play();
                        break;
                    case 4:
                        SystemSounds.Question.Play();
                        break;
                    case 5:
                        var player = new SoundPlayer(Properties.Settings.Default.customTone);
                        player.Play();
                        break;
                }

                BackColor = Properties.Settings.Default.bgColor;
                isActiv = true;
            }
            else if (vol < warnVol)
            {
                BackColor = SystemColors.Control;
                isActiv = false;
            }
        }
示例#22
0
 void waveIn_DataAvailable(object sender, WaveInEventArgs e)
 {
     float[] volume = new float[8];
     for (int i = 0; i < 8; i++)
     {
         volume[i] = 0f;
         for (int index = e.BytesRecorded * i / 8; index < e.BytesRecorded * (i + 1) / 8; index += 2)
         {
             short sample = (short)((e.Buffer[index + 1] << 8) | e.Buffer[index + 0]);
             float val    = Math.Abs(sample / 32768f);
             if (val > volume[i])
             {
                 volume[i] = val;
             }
         }
     }
     NAudio.CoreAudioApi.MMDeviceEnumerator devEnum       = new NAudio.CoreAudioApi.MMDeviceEnumerator();
     NAudio.CoreAudioApi.MMDevice           defaultDevice = devEnum.GetDefaultAudioEndpoint(NAudio.CoreAudioApi.DataFlow.Render, NAudio.CoreAudioApi.Role.Multimedia);
     VolumeBar.Value = (100 - (defaultDevice.AudioMeterInformation.MasterPeakValue * 100f));
     VolumeBar.Value = (100 - (volume[3] * 100));
     Provider.AddSamples(e.Buffer, 0, e.BytesRecorded);
 }
示例#23
0
        public static void PlaySong(IEnumerable<Track> tracks)
        {
            var enumerator = new MMDeviceEnumerator();
            var defaultDevice = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
            var waveFormat = WaveFormat.CreateIeeeFloatWaveFormat(defaultDevice.AudioClient.MixFormat.SampleRate, 1);

            var wasapiOut = new WasapiOut(AudioClientShareMode.Shared, false, 60);
            MediaBankBase bank = new FenderStratCleanB(waveFormat);
            MediaBankBase bankBass = new RockdaleBassBridge(waveFormat);

            var mixer = new MixingSampleProvider(waveFormat);

            var trackSampleProviders =
                tracks.Select(t => new TrackSampleProvider(t.Patch == MediaPatch.CleanGuitar ? bank : bankBass, t))
                    .ToArray();
            var playedTracks = new List<int>();

            foreach(var track in trackSampleProviders)
            {
                track.OnPhrasePlaying += (sender, args) =>
                {
                    var channel = args.Track.Channel;
                    var phrase = args.Phrase;

                    if(playedTracks.Contains(channel))
                    {
                        AsyncConsole.WriteLine();
                        PrintUtils.PrintContentTable();

                        playedTracks.Clear();
                    }

                    PrintUtils.PrintContent(phrase.Notes != null && phrase.Notes.Length > 0
                        ? string.Join(",", phrase.Notes)
                        : phrase.Command.ToString(), channel);

                    playedTracks.Add(channel);
                };
                mixer.AddMixerInput(track);
            }

            wasapiOut.Init(new VolumeSampleProvider(mixer)
            {
                Volume = 0.7f
            });

            PrintUtils.Init(trackSampleProviders.Length);

            PrintUtils.PrintHeaderOfTable();
            PrintUtils.PrintRowDividerTable();
            PrintUtils.PrintContentTable();

            wasapiOut.Play();

            var resetEvent = new ManualResetEvent(false);

            wasapiOut.PlaybackStopped += (sender, args) =>
            {
                resetEvent.Set();
                if(args.Exception != null)
                {
                    throw args.Exception;
                }
            };

            resetEvent.WaitOne();
            Console.WriteLine();
            PrintUtils.PrintFooterOfTable();
        }
        public void CanGetAudioClockClient()
        {
            OSUtils.RequireVista();
            var enumerator = new MMDeviceEnumerator();

            var captureClient = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console).AudioClient;

            var REFTIMES_PER_MILLISEC = 10000;

            captureClient.Initialize(AudioClientShareMode.Shared, AudioClientStreamFlags.None, 
                REFTIMES_PER_MILLISEC * 100, 0, captureClient.MixFormat, Guid.Empty);

            // get AUDCLNT_E_NOT_INITIALIZED if not init    
            
            var clock = captureClient.AudioClockClient;
            Console.WriteLine("Clock Frequency: {0}",clock.Frequency);
            ulong p;
            ulong qpc;
            clock.GetPosition(out p, out qpc);
            Console.WriteLine("Clock Position: {0}:{1}",p,qpc );
            Console.WriteLine("Adjusted Position: {0}", clock.AdjustedPosition);
            Console.WriteLine("Can Adjust Position: {0}", clock.CanAdjustPosition);
            Console.WriteLine("Characteristics: {0}", clock.Characteristics);
            captureClient.Dispose();
        }
示例#25
0
        private void InitializeAudioClient()
        {
            var enumerator = new MMDeviceEnumerator();
            var captureDevice = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
            audioClient = captureDevice.AudioClient;

            int recordBufferLength = samplingRate; // 20ms worth of recording
            recordBuffer = new float[recordBufferLength * channelCount];

            long requestedDuration = 10000 * period * 2;

            audioClient.Initialize(AudioClientShareMode.Shared,
                AudioClientStreamFlags.Loopback,
                requestedDuration,
                0,
                WaveFormat.CreateIeeeFloatWaveFormat(samplingRate, channelCount),
                Guid.Empty);

            capClient = audioClient.AudioCaptureClient;
            audioClient.Start();
        }
示例#26
0
        public SpeechService(ServiceCreationInfo info)
            : base("speech", info)
        {
            mVoice = new SpVoice();

            // Select voice
            string voiceName = null;
            try
            {
                voiceName = info.Configuration.Voice;
            }
            catch (RuntimeBinderException) {}

            if (!string.IsNullOrEmpty(voiceName))
            {
                SpObjectToken voiceToken = null;

                CultureInfo culture = new CultureInfo("en-US");
                foreach (var voice in mVoice.GetVoices())
                {
                    var token = voice as SpObjectToken;
                    if (token == null)
                        continue;

                    if (culture.CompareInfo.IndexOf(token.Id, voiceName, CompareOptions.IgnoreCase) < 0)
                        continue;

                    voiceToken = token;
                }

                if (voiceToken != null)
                    mVoice.Voice = voiceToken;
            }

            // Select output. Why isn't this default any longer?
            var enumerator = new MMDeviceEnumerator();
            MMDevice endpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
            if (endpoint != null)
            {
                foreach (var output in mVoice.GetAudioOutputs())
                {
                    var token = output as SpObjectToken;
                    if (token == null)
                        continue;

                    if (token.Id.IndexOf(endpoint.ID) < 0)
                        continue;

                    mVoice.AudioOutput = token;
                    break;
                }
            }

            mVoiceCommands = new Dictionary<string, DeviceBase.VoiceCommand>();

            mInput = new AudioInput();

            mRecognizer = new SpeechRecognitionEngine();
            mRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(OnSpeechRecognized);
            mRecognizer.RecognizerUpdateReached += new EventHandler<RecognizerUpdateReachedEventArgs>(OnUpdateRecognizer);
            mRecognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(OnRecognizeCompleted);

            var grammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "computer" })));
            mRecognizer.LoadGrammar(grammar);

            var speechFormat = new SpeechAudioFormatInfo(44100, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
            mRecognizer.SetInputToAudioStream(mInput.mStream, speechFormat);

            mRecognizer.RecognizeAsync(RecognizeMode.Multiple);
        }
 /// <summary>
 /// Gets the default audio capture device
 /// </summary>
 /// <returns>The default audio capture device</returns>
 public static MMDevice GetDefaultCaptureDevice()
 {
     MMDeviceEnumerator devices = new MMDeviceEnumerator();
     return devices.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console);
 }
示例#28
0
        /// <summary>
        /// Gets the default audio capture device
        /// </summary>
        /// <returns>The default audio capture device</returns>
        public static MMDevice GetDefaultCaptureDevice()
        {
            MMDeviceEnumerator devices = new MMDeviceEnumerator();

            return(devices.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia));
        }
 public void SetAudioVolume(float volume)
 {
     // Instantiate an Enumerator to find audio devices
     MMDE = new MMDeviceEnumerator();
     // Get all the devices, no matter what condition or status
     device = MMDE.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
     previousVolume = device.AudioEndpointVolume.MasterVolumeLevelScalar;
     device.AudioEndpointVolume.Mute = false;
     device.AudioEndpointVolume.MasterVolumeLevelScalar = volume;
 }
        private void LoadDevices()
        {
            _deviceEnumerator = _deviceEnumerator?? new MMDeviceEnumerator();

            var defaultCapture = _deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console);
            var defaultRender = _deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);

            _micDevices = new List<DeviceItem>();
            foreach (var device in _deviceEnumerator.EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.All))
            {
                var item = new DeviceItem
                {
                    Device = device,
                    Default = defaultCapture.ID == device.ID,
                    Index = _micDevices.Count
                };
                _micDevices.Add(item);
            }

            micphone_selector.BeginUpdate();
            _micDevices.Sort((item1, item2) => item1.Default ? -1 : 1);
            micphone_selector.DataSource = _micDevices;
            micphone_selector.EndUpdate();

            _speakerDevices = new List<DeviceItem>();
            foreach (var device in _deviceEnumerator.EnumerateAudioEndPoints(DataFlow.Render, DeviceState.All))
            {
                var item = new DeviceItem
                {
                    Device = device,
                    Default = defaultRender.ID == device.ID,
                    Index = _speakerDevices.Count
                };
                _speakerDevices.Add(item);
            }

            speaker_selector.BeginUpdate();
            _speakerDevices.Sort((item1, item2) => item1.Default ? -1 : 1);
            speaker_selector.DataSource = _speakerDevices;
            speaker_selector.EndUpdate();
        }
示例#31
0
        public AudioInput()
        {
            var tokenSource = new CancellationTokenSource();
            var token = tokenSource.Token;

            // Kick off our voice task.
            var task = Task.Run(async () =>
            {
                var enumerator = new MMDeviceEnumerator();
                string deviceId = null;

                while (!token.IsCancellationRequested)
                {
                    string newDeviceId = null;
                    try
                    {
                        try
                        {
                            MMDevice endpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console);
                            newDeviceId = endpoint.ID;
                        }
                        catch
                        {}

                        // Default endpoint changed.
                        if (mWaveIn != null && (newDeviceId != deviceId))
                        {
                            deviceId = newDeviceId;

                            // We've switched current default device
                            Log.Info("Input device changed.");

                            mRecording = false;
                            DisposeWaveIn();
                        }

                        deviceId = newDeviceId;

                        // On-demand create mWaveIn
                        if (mWaveIn == null && deviceId != null)
                        {
                            CreateWaveIn();
                        }

                        // Try start the recording
                        if (mWaveIn != null && !mRecording)
                        {
                            lock (mLock)
                            {
                                try
                                {
                                    mWaveIn.StartRecording();
                                    mRecording = true;
                                    Log.Info("Recording started");
                                }
                                catch (NAudio.MmException exception)
                                {
                                    Log.Debug("NAudio exception when starting recording. Exception: {0}", exception.Message);

                                    // Purge instance to force a recreate next turn.
                                    DisposeWaveIn();
                                }
                            }
                        }
                    }
                    catch (Exception exception)
                    {
                        // Eat it all to prevent the voice task from dying due to random exceptions we haven't noticed yet.
                        Log.Error("Unhandled exception in voice task. Exception: {0}", exception.Message);
                    }

                    // It's enough if we poll once a second.
                    await Task.Delay(1000);
                }
                Log.Info("Finishing voice task");
            });
        }
 /// <summary>
 /// Gets the default audio capture device
 /// </summary>
 /// <returns>The default audio capture device</returns>
 public static MMDevice GetDefaultCaptureDevice()
 {
     MMDeviceEnumerator devices = new MMDeviceEnumerator();
     return devices.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
 }
示例#33
0
        private void OnTimerTick(object sender, EventArgs e)
        {
            MMDeviceEnumerator devEnum = new MMDeviceEnumerator();
            MMDevice defaultDevice =
              devEnum.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);

            float level = defaultDevice.AudioMeterInformation.MasterPeakValue;

            // show audio level
            int newValue = (int)(level * 100);

            if (newValue > audioProgress.Maximum)
            {
                audioProgress.Value = audioProgress.Maximum;
            } else {
                audioProgress.Value = (int)(level * 100);
            }
            audioLevelText.Text = level.ToString();

            if (level > (float)audioThreshold.Value)
            {
                makeScreenshot();

                // and start screenShot timer
                screensTakenInRow = 0;
                screenshotTimer.Start();
            }
        }
示例#34
0
 private AudioClient GetAudioClient()
 {
     MMDeviceEnumerator enumerator = new MMDeviceEnumerator();
     MMDevice defaultAudioEndpoint = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console);
     AudioClient audioClient = defaultAudioEndpoint.AudioClient;
     Assert.IsNotNull(audioClient);
     return audioClient;
 }
 private AudioControl()
 {
     MMDeviceEnumerator enumer = new MMDeviceEnumerator();
     multimediaDevice = enumer.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
 }
示例#36
0
        /// <summary>
        /// Gets the default audio capture device
        /// </summary>
        /// <returns>The default audio capture device</returns>
        public static MMDevice GetDefaultCaptureDevice()
        {
            var devices = new MMDeviceEnumerator();

            return(devices.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Console));
        }
示例#37
0
 private static void SetVolumeForWIndowsVista78(int value)
 {
     try
     {
         MMDeviceEnumerator DevEnum = new MMDeviceEnumerator();
         MMDevice device = DevEnum.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
         device.AudioEndpointVolume.MasterVolumeLevelScalar = (float)value / 100.0f;
     }
     catch (Exception)
     {
     }
 }