/// <summary>
        /// Gets the audio session specified by an audio session number.
        /// </summary>
        /// <param name="index">The session number. If there are n sessions, the sessions are numbered from 0 to n – 1. To get the number of sessions, call the GetCount method.</param>
        /// <returns>The <see cref="AudioSessionControl"/> of the specified session number.</returns>
        public AudioSessionControl GetSession(int index)
        {
            AudioSessionControl session;

            CoreAudioAPIException.Try(GetSessionNative(index, out session), InterfaceName, "GetSession");
            return(session);
        }
Exemple #2
0
        /// <summary>
        /// Retrieves an audio endpoint device that is identified by an endpoint ID string.
        /// </summary>
        /// <param name="id">Endpoint ID. The caller typically obtains this string from the <see cref="MMDevice.DeviceID"/> property or any method of the <see cref="IMMNotificationClient"/>.</param>
        /// <returns><see cref="MMDevice"/> instance for specified device.</returns>
        public MMDevice GetDevice(string id)
        {
            IntPtr ptr;

            CoreAudioAPIException.Try(GetDeviceNative(id, out ptr), InterfaceName, "GetDevice");
            return(new MMDevice(ptr));
        }
Exemple #3
0
        /// <summary>
        ///     Retrieves a pointer to the next available space in the rendering endpoint buffer into
        ///     which the caller can write a data packet.
        /// </summary>
        /// <param name="numFramesRequested">
        ///     The number of audio frames in the data packet that the caller plans to write to the requested space in the buffer.
        ///     If the call succeeds, the size of the buffer area pointed to by return value matches the size specified in
        ///     <paramref name="numFramesRequested" />.
        /// </param>
        /// <returns>
        ///     A pointer variable into which the method writes the starting address of the buffer area into which the caller
        ///     will write the data packet.
        /// </returns>
        public IntPtr GetBuffer(int numFramesRequested)
        {
            IntPtr ptr;

            CoreAudioAPIException.Try(GetBufferNative(numFramesRequested, out ptr), InterfaceName, "GetBuffer");
            return(ptr);
        }
Exemple #4
0
        /// <summary>
        ///     Queries the audio endpoint device for its
        ///     hardware-supported functions.
        /// </summary>
        /// <returns>A hardware support mask that indicates the hardware capabilities of the audio endpoint device.</returns>
        public EndpointHardwareSupportFlags QueryHardwareSupport()
        {
            EndpointHardwareSupportFlags res;

            CoreAudioAPIException.Try(QueryHardwareSupportNative(out res), InterfaceName, "QueryHardwareSupport");
            return(res);
        }
Exemple #5
0
        /// <summary>
        ///     Gets the peak sample value for the channels in the audio stream.
        /// </summary>
        /// <returns>
        ///     The peak sample value for the audio stream. The peak value is a number in the normalized range from 0.0 to
        ///     1.0.
        /// </returns>
        public float GetPeakValue()
        {
            float peak;

            CoreAudioAPIException.Try(GetPeakValueNative(out peak), InterfaceName, "GetPeakValue");
            return(peak);
        }
        /// <summary>
        ///     The GetNextPacketSize method retrieves the number of frames in the next data packet in
        ///     the capture endpoint buffer.
        /// For more information, see <see href="http://msdn.microsoft.com/en-us/library/dd370860(v=vs.85).aspx"/>.
        /// </summary>
        /// <returns>The number of the audio frames in the next capture packet.</returns>
        public int GetNextPacketSize()
        {
            int t;

            CoreAudioAPIException.Try(GetNextPacketSizeNative(out t), InterfaceName, "GetNextPacketSize");
            return(t);
        }
        /// <summary>
        ///     Gets the muting state of the audio stream that enters or leaves the
        ///     audio endpoint device.
        /// </summary>
        /// <returns>If the method returns <c>true, the stream is muted. If <c>false</c>, the stream is not muted.</c></returns>
        public bool GetMute()
        {
            NativeBool result;

            CoreAudioAPIException.Try(GetMuteNative(out result), C, "GetMute");
            return(result);
        }
        /// <summary>
        ///     Gets the volume level, in decibels, of the specified
        ///     channel in the audio stream that enters or leaves the audio endpoint device.
        /// </summary>
        /// <param name="channel">
        ///     The channel number. If the audio stream contains n channels, the channels are numbered from 0 to
        ///     n–1.
        /// </param>
        /// <returns>
        ///     Volume level in decibels. To get the range of volume levels obtained from this
        ///     method, call the <see cref="GetVolumeRange" /> method.
        /// </returns>
        public float GetChannelVolumeLevel(int channel)
        {
            float result;

            CoreAudioAPIException.Try(GetChannelVolumeLevelNative(channel, out result), C, "GetChannelVolumeLevel");
            return(result);
        }
        /// <summary>
        ///     Queries the audio endpoint device for its
        ///     hardware-supported functions.
        /// </summary>
        /// <returns>A hardware support mask that indicates the hardware capabilities of the audio endpoint device.</returns>
        public EndpointHardwareSupportFlags QueryHardwareSupport()
        {
            EndpointHardwareSupportFlags result;

            CoreAudioAPIException.Try(QueryHardwareSupportNative(out result), C, "QueryHardWareSupport");
            return(result);
        }
        /// <summary>
        ///     Gets the master volume level of the audio stream
        ///     that enters or leaves the audio endpoint device. The volume level is expressed as a
        ///     normalized, audio-tapered value in the range from 0.0 to 1.0.
        /// </summary>
        /// <returns>
        ///     Volume level. The level is expressed as a normalized value in the range from
        ///     0.0 to 1.0.
        /// </returns>
        public float GetMasterVolumeLevelScalar()
        {
            float result;

            CoreAudioAPIException.Try(GetMasterVolumeLevelScalarNative(out result), C, "GetMasterVolumeLevelScalar");
            return(result);
        }
Exemple #11
0
 /// <summary>
 ///     Gets the peak sample values for all the channels in the
 ///     audio stream.
 ///     <seealso cref="MeteringChannelCount" />
 /// </summary>
 /// <param name="channelCount">
 ///     The channel count. This parameter also specifies the number of elements in the returned
 ///     array. If the specified count does not match the number of channels in the stream, the method returns error code
 ///     <see cref="HResult.E_INVALIDARG" />.
 /// </param>
 /// <returns>
 ///     An array of peak sample values. he array contains one element for each channel in the stream. The peak values
 ///     are numbers in the normalized range from 0.0 to 1.0.
 /// </returns>
 public float[] GetChannelsPeakValues(int channelCount)
 {
     float[] val;
     CoreAudioAPIException.Try(GetChannelsPeakValuesNative(channelCount, out val), InterfaceName,
                               "GetChannelsPeakValues");
     return(val);
 }
Exemple #12
0
        /// <summary>
        ///     Retrieves the stream format that the audio engine uses for its
        ///     internal processing of shared-mode streams.
        /// </summary>
        /// <remarks>
        ///     For more information, see
        ///     <see href="https://msdn.microsoft.com/en-us/library/windows/desktop/dd370872(v=vs.85).aspx" />.
        /// </remarks>
        /// <returns>The mix format that the audio engine uses for its internal processing of shared-mode streams.</returns>
        public WaveFormat GetMixFormat()
        {
            WaveFormat waveFormat;

            CoreAudioAPIException.Try(GetMixFormatNative(out waveFormat), InterfaceName, "GetMixFormat");
            return(waveFormat);
        }
        /// <summary>
        /// The <see cref="GetCount"/> method retrieves a count of the devices in the device collection.
        /// </summary>
        /// <returns>The number of devices in the device collection.</returns>
        public int GetCount()
        {
            int count = 0;

            CoreAudioAPIException.Try(GetCountNative(out count), "IMMDeviceCollection", "GetCount");
            return(count);
        }
Exemple #14
0
        /// <summary>
        ///     Retrieves the maximum latency for the current stream and can
        ///     be called any time after the stream has been initialized.
        /// </summary>
        /// <remarks>
        ///     Rendering clients can use this latency value to compute the minimum amount of data that
        ///     they can write during any single processing pass. To write less than this minimum is to
        ///     risk introducing glitches into the audio stream. For more information, see
        ///     <see href="https://msdn.microsoft.com/en-us/library/windows/desktop/dd370874(v=vs.85).aspx" />.
        /// </remarks>
        /// <returns>A value representing the latency. The time is expressed in 100-nanosecond units.</returns>
        public long GetStreamLatency()
        {
            long latency;

            CoreAudioAPIException.Try(GetStreamLatencyNative(out latency), InterfaceName, "GetStreamLatency");
            return(latency);
        }
Exemple #15
0
        /// <summary>
        ///     Retrieves the number of frames of padding in the endpoint
        ///     buffer.
        /// </summary>
        /// <returns>The frame count (the number of audio frames of padding in the buffer).</returns>
        /// <remarks>
        ///     The size of one frame = <c>(number of bits per sample)/8 * (number of channels)</c>
        /// </remarks>
        public int GetCurrentPadding()
        {
            int padding;

            CoreAudioAPIException.Try(GetCurrentPaddingNative(out padding), InterfaceName, "GetCurrentPadding");
            return(padding);
        }
Exemple #16
0
 /// <summary>
 ///     Initializes the audio stream.
 /// </summary>
 /// <param name="shareMode">
 ///     The sharing mode for the connection. Through this parameter, the client tells the audio engine
 ///     whether it wants to share the audio endpoint device with other clients.
 /// </param>
 /// <param name="streamFlags">Flags to control creation of the stream.</param>
 /// <param name="hnsBufferDuration">
 ///     The buffer capacity as a time value (expressed in 100-nanosecond units). This parameter
 ///     contains the buffer size that the caller requests for the buffer that the audio application will share with the
 ///     audio engine (in shared mode) or with the endpoint device (in exclusive mode). If the call succeeds, the method
 ///     allocates a buffer that is a least this large.
 /// </param>
 /// <param name="hnsPeriodicity">
 ///     The device period. This parameter can be nonzero only in exclusive mode. In shared mode,
 ///     always set this parameter to 0. In exclusive mode, this parameter specifies the requested scheduling period for
 ///     successive buffer accesses by the audio endpoint device. If the requested device period lies outside the range that
 ///     is set by the device's minimum period and the system's maximum period, then the method clamps the period to that
 ///     range. If this parameter is 0, the method sets the device period to its default value. To obtain the default device
 ///     period, call the <see cref="GetDevicePeriodNative" /> method. If the
 ///     <see cref="AudioClientStreamFlags.StreamFlagsEventCallback" /> stream flag is set and
 ///     <see cref="AudioClientShareMode.Exclusive" /> is set as the <paramref name="shareMode" />, then
 ///     <paramref name="hnsPeriodicity" /> must be nonzero and equal to <paramref name="hnsBufferDuration" />.
 /// </param>
 /// <param name="waveFormat">
 ///     The format descriptor. For more information, see
 ///     <see href="https://msdn.microsoft.com/en-us/library/windows/desktop/dd370875(v=vs.85).aspx" />.
 /// </param>
 /// <param name="audioSessionGuid">
 ///     A value that identifies the audio session that the stream belongs to. If the
 ///     <see cref="Guid" /> identifies a session that has been previously opened, the method adds the stream to that
 ///     session. If the GUID does not identify an existing session, the method opens a new session and adds the stream to
 ///     that session. The stream remains a member of the same session for its lifetime. Use <see cref="Guid.Empty" /> to
 ///     use the default session.
 /// </param>
 /// <remarks>
 ///     For more information, see
 ///     <see href="https://msdn.microsoft.com/en-us/library/windows/desktop/dd370875(v=vs.85).aspx" />.
 /// </remarks>
 public void Initialize(AudioClientShareMode shareMode, AudioClientStreamFlags streamFlags,
                        long hnsBufferDuration, long hnsPeriodicity, WaveFormat waveFormat, Guid audioSessionGuid)
 {
     CoreAudioAPIException.Try(
         InitializeNative(shareMode, streamFlags, hnsBufferDuration, hnsPeriodicity, waveFormat, audioSessionGuid),
         InterfaceName, "Initialize");
 }
Exemple #17
0
        /// <summary>
        ///     Returns the size (maximum capacity) of the endpoint buffer.
        /// </summary>
        /// <returns>The number of audio frames that the buffer can hold.</returns>
        /// <remarks>
        ///     The size of one frame = <c>(number of bits per sample)/8 * (number of channels)</c>
        /// </remarks>
        /// <returns>HRESULT</returns>
        public int GetBufferSize()
        {
            int bufferSize;

            CoreAudioAPIException.Try(GetBufferSizeNative(out bufferSize), InterfaceName, "GetBufferSize");
            return(bufferSize);
        }
        /// <summary>
        ///     Gets the number of channels in the audio stream that enters
        ///     or leaves the audio endpoint device.
        /// </summary>
        /// <returns>The number of channels in the audio stream.</returns>
        public int GetChannelCount()
        {
            int result;

            CoreAudioAPIException.Try(GetChannelCountNative(out result), C, "GetChannelCount");
            return(result);
        }
        /// <summary>
        /// The <see cref="ItemAtNative"/> method retrieves a pointer to the specified item in the device collection.
        /// </summary>
        /// <param name="deviceIndex">The device number. If the collection contains n devices, the devices are numbered 0 to n– 1.</param>
        /// <returns>The <see cref="MMDevice"/> object of the specified item in the device collection.</returns>
        public MMDevice ItemAt(int deviceIndex)
        {
            IntPtr device;

            CoreAudioAPIException.Try(ItemAtNative(deviceIndex, out device), "IMMDeviceCollection", "Item");
            return(new MMDevice(device));
        }
Exemple #20
0
        /// <summary>
        ///     Accesses additional services from the audio client object.
        /// </summary>
        /// <param name="riid">
        ///     The interface ID for the requested service. For a list of all available values, see
        ///     <see href="https://msdn.microsoft.com/en-us/library/windows/desktop/dd370873(v=vs.85).aspx" />.
        /// </param>
        /// <returns>
        ///     A pointer into which the method writes the address of an instance of the requested interface.
        ///     Through this method, the caller obtains a counted reference to the interface. The caller is responsible for
        ///     releasing the interface, when it is no longer needed, by calling the interface's Release method.
        /// </returns>
        /// <remarks>
        ///     For more information, see
        ///     <see href="https://msdn.microsoft.com/en-us/library/windows/desktop/dd370873(v=vs.85).aspx" />.
        /// </remarks>
        public IntPtr GetService(Guid riid)
        {
            IntPtr ptr;

            CoreAudioAPIException.Try(GetServiceNative(riid, out ptr), InterfaceName, "GetService");
            return(ptr);
        }
Exemple #21
0
        /// <summary>
        /// Creates a COM object with the specified interface.
        /// </summary>
        /// <param name="iid">The interface identifier. This parameter is a reference to a GUID that identifies the interface that the caller requests be activated. The caller will use this interface to communicate with the COM object.</param>
        /// <param name="context">The execution context in which the code that manages the newly created object will run. </param>
        /// <param name="activationParams">Use <see cref="IntPtr.Zero"/> as the default value. See http://msdn.microsoft.com/en-us/library/windows/desktop/dd371405%28v=vs.85%29.aspx for more details.</param>
        /// <returns>A pointer variable into which the method writes the address of the interface specified by parameter <paramref name="iid"/>.</returns>
        public IntPtr Activate(Guid iid, CLSCTX context, IntPtr activationParams)
        {
            IntPtr ptr;

            CoreAudioAPIException.Try(ActivateNative(iid, context, activationParams, out ptr), InterfaceName, "Activate");
            return(ptr);
        }
Exemple #22
0
        /// <summary>
        /// Generates a collection of audio endpoint devices that meet the specified criteria.
        /// </summary>
        /// <param name="dataFlow">The data-flow direction for the endpoint device.</param>
        /// <param name="stateMask">The state or states of the endpoints that are to be included in the collection.</param>
        /// <returns><see cref="MMDeviceCollection"/> which contains the enumerated devices.</returns>
        public MMDeviceCollection EnumAudioEndpoints(DataFlow dataFlow, DeviceState stateMask)
        {
            IntPtr pcollection;

            CoreAudioAPIException.Try(EnumAudioEndpointsNative(dataFlow, stateMask, out pcollection), InterfaceName,
                                      "EnumAudioEndpoints");
            return(new MMDeviceCollection(pcollection));
        }
Exemple #23
0
        /// <summary>
        /// Retrieves a simple audio volume control.
        /// </summary>
        /// <param name="crossProcessSession">Specifies whether the request is for a cross-process session. Set to TRUE if the session is cross-process. Set to FALSE if the session is not cross-process.</param>
        /// <param name="audioSessionGuid">If the GUID does not identify a session that has been previously opened, the call opens a new but empty session. If the value is Guid.Empty, the method assigns the stream to the default session.</param>
        /// <returns><see cref="SimpleAudioVolume"/> instance.</returns>
        public SimpleAudioVolume GetSimpleAudioVolume(Guid audioSessionGuid, bool crossProcessSession)
        {
            SimpleAudioVolume v;

            CoreAudioAPIException.Try(GetSimpleAudioVolumeNative(audioSessionGuid, crossProcessSession, out v),
                                      "IAudioSessionManager", "GetSimpleAudioVolume");
            return(v);
        }
Exemple #24
0
        /// <summary>
        /// Retrieves an audio session control.
        /// </summary>
        /// <param name="audioSessionGuid">If the GUID does not identify a session that has been previously opened, the call opens a new but empty session. If the value is Guid.Empty, the method assigns the stream to the default session.</param>
        /// <param name="streamFlags">Specifies the status of the flags for the audio stream.</param>
        /// <returns><see cref="AudioSessionControl"/> instance.</returns>
        public AudioSessionControl GetAudioSessionControl(Guid audioSessionGuid, int streamFlags)
        {
            AudioSessionControl sessionControl;

            CoreAudioAPIException.Try(GetAudioSessionControlNative(audioSessionGuid, streamFlags, out sessionControl),
                                      "IAudioSessionManager", "GetAudioSessionControl");
            return(sessionControl);
        }
Exemple #25
0
        /// <summary>
        /// Returns the default audio endpoint for the specified data-flow direction and role.
        /// </summary>
        /// <param name="dataFlow">The data-flow direction for the endpoint device.</param>
        /// <param name="role">The role of the endpoint device.</param>
        /// <returns><see cref="MMDevice"/> instance of the endpoint object for the default audio endpoint device.</returns>
        public MMDevice GetDefaultAudioEndpoint(DataFlow dataFlow, Role role)
        {
            IntPtr ptr;

            CoreAudioAPIException.Try(GetDefaultAudioEndpointNative(dataFlow, role, out ptr), InterfaceName,
                                      "GetDefaultAudioEndpoint");
            return(new MMDevice(ptr));
        }
        /// <summary>
        /// Gets a pointer to the audio session enumerator object.
        /// </summary>
        /// <returns>a session enumerator object that the client can use to enumerate audio sessions on the audio device.</returns>
        /// <remarks>The client is responsible for releasing the returned <see cref="AudioSessionEnumerator"/>.</remarks>
        public AudioSessionEnumerator GetSessionEnumerator()
        {
            AudioSessionEnumerator sessionEnumerator;

            CoreAudioAPIException.Try(GetSessionEnumeratorNative(out sessionEnumerator), InterfaceName,
                                      "GetSessionEnumerator");
            return(sessionEnumerator);
        }
Exemple #27
0
        /// <summary>
        ///     Gets the number of channels in the audio stream that
        ///     are monitored by peak meters.
        /// </summary>
        /// <returns>The number of channels.</returns>
        public int GetMeteringChannelCount()
        {
            int channelCount;

            CoreAudioAPIException.Try(GetMeteringChannelCountNative(out channelCount), InterfaceName,
                                      "GetMeteringChannelCount");
            return(channelCount);
        }
 /// <summary>
 ///     Deletes the registration of a client's
 ///     notification callback interface that the client registered in a previous call to the
 ///     <see cref="RegisterControlChangeNotify" /> method.
 /// </summary>
 /// <param name="notify">
 ///     The callback instance to unregister. The client passed this same object to the endpoint volume
 ///     object in the previous call to the <see cref="RegisterControlChangeNotify" /> method.
 /// </param>
 public void UnregisterControlChangeNotify(IAudioEndpointVolumeCallback notify)
 {
     if (notify == null)
     {
         throw new ArgumentNullException("notify");
     }
     CoreAudioAPIException.Try(UnregisterControlChangeNotifyNative(notify), C, "UnregisterControlChangeNotify");
 }
Exemple #29
0
        /// <summary>
        /// Retrieves an interface to the device's property store.
        /// </summary>
        /// <param name="storageAccess">The storage-access mode. This parameter specifies whether to open the property store in read mode, write mode, or read/write mode.</param>
        /// <returns><see cref="PropertyStore"/> for the <see cref="MMDevice"/>.</returns>
        public PropertyStore OpenPropertyStore(StorageAccess storageAccess)
        {
            IntPtr propstorePtr;

            CoreAudioAPIException.Try(OpenPropertyStoreNative(storageAccess, out propstorePtr),
                                      "IMMDevice", "OpenPropertyStore");
            return(new PropertyStore(propstorePtr));
        }
 /// <summary>
 /// Registers the application to receive a notification when a session is created.
 /// </summary>
 /// <param name="sessionNotification">The application's implementation of the <see cref="IAudioSessionNotification"/> interface.</param>
 /// <remarks>
 /// Use the <see cref="AudioSessionNotification"/> class as the default implementation for the <paramref name="sessionNotification"/> parameter.
 ///
 /// <c>Note:</c> Make sure to call the <see cref="RegisterSessionNotification"/> from an MTA-Thread. Also make sure to enumerate all sessions after calling this method.
 /// </remarks>
 public void RegisterSessionNotification(IAudioSessionNotification sessionNotification)
 {
     if (Thread.CurrentThread.GetApartmentState() != ApartmentState.MTA)
     {
         throw new InvalidOperationException("RegisterSessionNotification has to be called from an MTA-Thread.");
     }
     CoreAudioAPIException.Try(RegisterSessionNotificationNative(sessionNotification), InterfaceName,
                               "RegisterSessionNotification");
 }