Ejemplo n.º 1
0
    private void initHwEncoder(string codec)
    {
        MediaFormat format = new MediaFormat();

        string mime_type = null;

        switch (codec)
        {
        case "amrnb":
            mime_type = MediaFormat.MimetypeAudioAmrNb;
            format.SetInteger(MediaFormat.KeySampleRate, 8000);
            format.SetInteger(MediaFormat.KeyBitRate, 7950);
            break;

        case "amrwb":
            mime_type = MediaFormat.MimetypeAudioAmrWb;
            format.SetInteger(MediaFormat.KeySampleRate, 16000);
            format.SetInteger(MediaFormat.KeyBitRate, 18250);
            break;
        }

        if (mime_type != null)
        {
            format.SetString(MediaFormat.KeyMime, mime_type);
            format.SetInteger(MediaFormat.KeyChannelCount, 1);
            format.SetInteger(MediaFormat.KeyMaxInputSize, bufferSize);
            format.SetInteger(MediaFormat.KeyLatency, 1);
            format.SetInteger(MediaFormat.KeyPriority, 0);
            audioEncoder = new HwEncoder(mime_type, format, this);
            audioEncoder.start();
        }
    }
Ejemplo n.º 2
0
        /// <summary>
        /// Encodes a signal from the specified file.
        /// </summary>
        ///
        /// <param name="fileName">File name to save the signal to.</param>
        /// <param name="signal">The audio signal that should be saved to disk.</param>
        ///
        public static void EncodeToFile(string fileName, Signal signal)
        {
            string fileExtension = FormatHandlerAttribute.GetNormalizedExtension(fileName);

            IAudioEncoder encoder = FormatEncoderAttribute.GetEncoder(fileExtension, encoderTypes, encoders.Value);

            if (encoder != null)
            {
                // open stream
                using (FileStream stream = new FileStream(fileName, FileMode.Create, FileAccess.Write))
                {
                    // open decoder
                    encoder.Open(stream);

                    // write all audio frames
                    encoder.Encode(signal);

                    encoder.Close();
                }

                return;
            }

            throw new ArgumentException(String.Format("No suitable encoder has been found for the file format {0}. If ", fileExtension) +
                                        "you are trying to encode .wav files, please add a reference to Accord.Audio.DirectSound, and make sure you " +
                                        "are using at least one type from assembly in your code (to make sure the assembly is loaded).", "fileName");
        }
Ejemplo n.º 3
0
    public void stop()
    {
        if (!running)
        {
            return;
        }
        running = false;

        if (audioRecorder != null)
        {
            try
            {
                audioRecorder.StopRecording();
            }
            catch (Exception)
            {
            }
            audioRecorder.Dispose();
            audioRecorder = null;
        }

        if (audioEncoder != null)
        {
            audioEncoder.stop();
            audioEncoder.Dispose();
            audioEncoder = null;
        }

        lock (outputBuffers)
        {
            outputBuffers.Clear();
        }
    }
        public GridViewConvert(Main main)
        {
            _main = main;

              InitializeComponent();

              // Listen to Messages
              // Setup message queue for receiving Messages
              IMessageQueue queueMessage = ServiceScope.Get<IMessageBroker>().GetOrCreate("message");
              queueMessage.OnMessageReceive += OnMessageReceive;

              IMessageQueue queueMessageEncoding = ServiceScope.Get<IMessageBroker>().GetOrCreate("encoding");
              queueMessageEncoding.OnMessageReceive += OnMessageReceiveEncoding;

              audioEncoder = ServiceScope.Get<IAudioEncoder>();

              // Load the Settings
              gridColumns = new GridViewColumnsConvert();

              dataGridViewConvert.AutoGenerateColumns = false;
              dataGridViewConvert.DataSource = bindingList;

              // Now Setup the columns, we want to display
              CreateColumns();

              CreateContextMenu();
        }
Ejemplo n.º 5
0
        public GridViewConvert(Main main)
        {
            _main = main;

            InitializeComponent();

            // Listen to Messages
            // Setup message queue for receiving Messages
            IMessageQueue queueMessage = ServiceScope.Get <IMessageBroker>().GetOrCreate("message");

            queueMessage.OnMessageReceive += OnMessageReceive;

            IMessageQueue queueMessageEncoding = ServiceScope.Get <IMessageBroker>().GetOrCreate("encoding");

            queueMessageEncoding.OnMessageReceive += OnMessageReceiveEncoding;

            audioEncoder = ServiceScope.Get <IAudioEncoder>();

            // Load the Settings
            gridColumns = new GridViewColumnsConvert();

            dataGridViewConvert.AutoGenerateColumns = false;
            dataGridViewConvert.DataSource          = bindingList;

            // Now Setup the columns, we want to display
            CreateColumns();

            CreateContextMenu();
        }
#pragma warning restore CS0067

        public unsafe FFmpegFileSource(string path, bool repeat, IAudioEncoder audioEncoder, bool useVideo = true)
        {
            if (!File.Exists(path))
            {
                if (!Uri.TryCreate(path, UriKind.Absolute, out Uri result))
                {
                    throw new ApplicationException($"Requested path is not a valid file path or not a valid Uri: {path}.");
                }
            }

            if ((audioEncoder != null))
            {
                _FFmpegAudioSource = new FFmpegAudioSource(audioEncoder);
                _FFmpegAudioSource.CreateAudioDecoder(path, null, repeat, false);
                _FFmpegAudioSource.InitialiseDecoder();

                _FFmpegAudioSource.OnAudioSourceEncodedSample += _FFmpegAudioSource_OnAudioSourceEncodedSample;
                _FFmpegAudioSource.OnAudioSourceRawSample     += _FFmpegAudioSource_OnAudioSourceRawSample;
                _FFmpegAudioSource.OnEndOfFile += _FFmpegAudioSource_OnEndOfFile;
            }

            if (useVideo)
            {
                _FFmpegVideoSource = new FFmpegVideoSource();
                _FFmpegVideoSource.CreateVideoDecoder(path, null, repeat, false);
                _FFmpegVideoSource.InitialiseDecoder();

                _FFmpegVideoSource.OnVideoSourceEncodedSample   += _FFmpegVideoSource_OnVideoSourceEncodedSample;
                _FFmpegVideoSource.OnVideoSourceRawSampleFaster += _FFmpegVideoSource_OnVideoSourceRawSampleFaster;
                _FFmpegVideoSource.OnEndOfFile += _FFmpegVideoSource_OnEndOfFile;
            }
        }
        public EncodingAudioStreamWrapper(IAviAudioStreamInternal baseStream, IAudioEncoder encoder, bool ownsEncoder)
            : base(baseStream)
        {
            Argument.IsNotNull(encoder, nameof(encoder));

            this.encoder     = encoder;
            this.ownsEncoder = ownsEncoder;
        }
Ejemplo n.º 8
0
        public AudioStreamer(IAudioEncoder encoder, VoiceEndPointInfo endPointInfo, CancellationToken cancellationToken = default)
        {
            Encoder                  = encoder;
            EndPointInfo             = endPointInfo;
            _globalCancellationToken = cancellationToken;

            new Thread(() => AudioLoop().Wait()).Start();
        }
Ejemplo n.º 9
0
        public EncodingAudioStreamWrapper(IAviAudioStreamInternal baseStream, IAudioEncoder encoder, bool ownsEncoder)
            : base(baseStream)
        {
            Contract.Requires(baseStream != null);
            Contract.Requires(encoder != null);

            this.encoder = encoder;
            this.ownsEncoder = ownsEncoder;
        }
        public EncodingAudioStreamWrapper(IAviAudioStreamInternal baseStream, IAudioEncoder encoder, bool ownsEncoder)
            : base(baseStream)
        {
            Contract.Requires(baseStream != null);
            Contract.Requires(encoder != null);

            this.encoder     = encoder;
            this.ownsEncoder = ownsEncoder;
        }
        /// <summary>
        /// Creates a new basic RTP session that captures and renders audio to/from the default system devices.
        /// </summary>
        /// <param name="audioEncoder">A 3rd party audio encoder that can be used to encode and decode
        /// specific audio codecs.</param>
        /// <param name="externalSource">Optional. An external source to use in combination with the source
        /// provided by this end point. The application will need to signal which source is active.</param>
        /// <param name="disableSource">Set to true to disable the use of the audio source functionality, i.e.
        /// don't capture input from the microphone.</param>
        /// <param name="disableSink">Set to true to disable the use of the audio sink functionality, i.e.
        /// don't playback audio to the speaker.</param>
        public WindowsAudioEndPoint(IAudioEncoder audioEncoder,
                                    int audioOutDeviceIndex = AUDIO_OUTPUTDEVICE_INDEX,
                                    int audioInDeviceIndex  = AUDIO_INPUTDEVICE_INDEX,
                                    bool disableSource      = false,
                                    bool disableSink        = false)
        {
            logger = SIPSorcery.LogFactory.CreateLogger <WindowsAudioEndPoint>();

            _audioEncoder = audioEncoder;

            _disableSource = disableSource;
            _disableSink   = disableSink;

            if (!_disableSink)
            {
                try
                {
                    // Playback device.
                    _waveOutEvent = new WaveOutEvent();
                    _waveOutEvent.DeviceNumber = audioOutDeviceIndex;
                    _waveProvider = new BufferedWaveProvider(_waveFormat);
                    _waveProvider.DiscardOnBufferOverflow = true;
                    _waveOutEvent.Init(_waveProvider);
                }
                catch (Exception excp)
                {
                    logger.LogWarning(0, excp, "WindowsAudioEndPoint failed to initialise playback device.");
                    OnAudioSinkError?.Invoke($"WindowsAudioEndPoint failed to initialise playback device. {excp.Message}");
                }
            }

            if (!_disableSource)
            {
                if (WaveInEvent.DeviceCount > 0)
                {
                    if (WaveInEvent.DeviceCount > audioInDeviceIndex)
                    {
                        _waveInEvent = new WaveInEvent();
                        _waveInEvent.BufferMilliseconds = AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                        _waveInEvent.NumberOfBuffers    = INPUT_BUFFERS;
                        _waveInEvent.DeviceNumber       = audioInDeviceIndex;
                        _waveInEvent.WaveFormat         = _waveFormat;
                        _waveInEvent.DataAvailable     += LocalAudioSampleAvailable;
                    }
                    else
                    {
                        OnAudioSourceError?.Invoke($"The requested audio input device index {audioInDeviceIndex} exceeds the maximum index of {WaveInEvent.DeviceCount - 1}.");
                    }
                }
                else
                {
                    OnAudioSourceError?.Invoke("No audio capture devices are available.");
                }
            }
        }
Ejemplo n.º 12
0
#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable.
        public FFmpegAudioSource(IAudioEncoder audioEncoder)
#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable.
        {
            if (audioEncoder == null)
            {
                throw new ApplicationException("Audio encoder provided is null");
            }

            _audioFormatManager = new MediaFormatManager <AudioFormat>(_supportedAudioFormats);
            _audioEncoder       = audioEncoder;
        }
Ejemplo n.º 13
0
 public AudioContext(AudioFormat audioFormat, IAudioFilter resampler, IDtxFilter dtxFilter, IAudioTwoWayFilter speechEnhancer, IAudioEncoder encoder)
 {
     AudioFormat            = audioFormat;
     Resampler              = resampler;
     SpeechEnhancementStack = speechEnhancer;
     Encoder        = encoder;
     DtxFilter      = dtxFilter;
     ResampleBuffer = new byte[audioFormat.BytesPerFrame];
     CancelBuffer   = new short[audioFormat.SamplesPerFrame];
     EncodeBuffer   = new short[audioFormat.SamplesPerFrame];
     SendBuffer     = new short[audioFormat.SamplesPerFrame];
 }
Ejemplo n.º 14
0
        /// <summary>Adds new encoding audio stream.</summary>
        /// <param name="encoder">Encoder to be used.</param>
        /// <param name="ownsEncoder">Whether encoder should be disposed with the writer.</param>
        /// <returns>Newly added audio stream.</returns>
        /// <remarks>
        /// <para>
        /// Stream is initialized to be to be encoded with the specified encoder.
        /// Method <see cref="IAviAudioStream.WriteBlock"/> expects data in the same format as encoder (see encoder's docs).
        /// The data is passed to the encoder and the encoded result is written to the stream.
        /// </para>
        /// <para>
        /// The encoder defines the following properties of the stream:
        /// <see cref="IAviAudioStream.ChannelCount"/>, <see cref="IAviAudioStream.SamplesPerSecond"/>,
        /// <see cref="IAviAudioStream.BitsPerSample"/>, <see cref="IAviAudioStream.BytesPerSecond"/>,
        /// <see cref="IAviAudioStream.Granularity"/>, <see cref="IAviAudioStream.Format"/>,
        /// <see cref="IAviAudioStream.FormatSpecificData"/>.
        /// These properties cannot be modified.
        /// </para>
        /// </remarks>
        public IAviAudioStream AddEncodingAudioStream(IAudioEncoder encoder, bool ownsEncoder = true)
        {
            Argument.IsNotNull(encoder, nameof(encoder));

            return(AddStream <IAviAudioStreamInternal>(index =>
            {
                var stream = new AviAudioStream(index, this, 1, 44100, 16);
                var encodingStream = new EncodingAudioStreamWrapper(stream, encoder, ownsEncoder);
                var asyncStream = new AsyncAudioStreamWrapper(encodingStream);
                return asyncStream;
            }));
        }
Ejemplo n.º 15
0
        public unsafe FFmpegMicrophoneSource(string path, IAudioEncoder audioEncoder) : base(audioEncoder)
        {
            string inputFormat = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "dshow"
                : RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ? "alsa"
                : RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? "avfoundation"
                : throw new NotSupportedException($"Cannot find adequate input format - OSArchitecture:[{RuntimeInformation.OSArchitecture}] - OSDescription:[{RuntimeInformation.OSDescription}]");

            AVInputFormat *aVInputFormat = ffmpeg.av_find_input_format(inputFormat);

            CreateAudioDecoder(path, aVInputFormat, false, true);

            InitialiseDecoder();
        }
Ejemplo n.º 16
0
        public GridViewRip(Main main)
        {
            _main = main;

            // Setup message queue for receiving Messages
            IMessageQueue queueMessage = ServiceScope.Get <IMessageBroker>().GetOrCreate("message");

            queueMessage.OnMessageReceive += OnMessageReceive;

            InitializeComponent();

            dataGridViewRip.CurrentCellDirtyStateChanged += dataGridViewRip_CurrentCellDirtyStateChanged;

            // Listen to Messages
            IMessageQueue queueMessageEncoding = ServiceScope.Get <IMessageBroker>().GetOrCreate("encoding");

            queueMessageEncoding.OnMessageReceive += OnMessageReceiveEncoding;

            audioEncoder       = ServiceScope.Get <IAudioEncoder>();
            mediaChangeMonitor = ServiceScope.Get <IMediaChangeMonitor>();
            mediaChangeMonitor.MediaInserted += mediaChangeMonitor_MediaInserted;
            mediaChangeMonitor.MediaRemoved  += mediaChangeMonitor_MediaRemoved;

            // Get number of CD Drives found and initialise a Bindinglist for every drove
            int driveCount = BassCd.BASS_CD_GetDriveCount();

            if (driveCount == 0)
            {
                bindingList.Add(new SortableBindingList <CDTrackDetail>()); // In case of no CD, we want a Dummy List
            }
            _main.RipButtonsEnabled = false;

            for (int i = 0; i < driveCount; i++)
            {
                bindingList.Add(new SortableBindingList <CDTrackDetail>());
                if (BassCd.BASS_CD_IsReady(i))
                {
                    _main.RipButtonsEnabled = true;
                }
            }

            // Prepare the Gridview
            gridColumns = new GridViewColumnsRip();
            dataGridViewRip.AutoGenerateColumns = false;
            dataGridViewRip.DataSource          = bindingList[0];

            // Now Setup the columns, we want to display
            CreateColumns();

            SetStatusLabel("");
        }
Ejemplo n.º 17
0
        /// <summary>Adds new encoding audio stream.</summary>
        /// <param name="encoder">Encoder to be used.</param>
        /// <param name="ownsEncoder">Whether encoder should be disposed with the writer.</param>
        /// <returns>Newly added audio stream.</returns>
        /// <remarks>
        /// <para>
        /// Stream is initialized to be to be encoded with the specified encoder.
        /// Method <see cref="IAviAudioStream.WriteBlock"/> expects data in the same format as encoder (see encoder's docs).
        /// The data is passed to the encoder and the encoded result is written to the stream.
        /// </para>
        /// <para>
        /// The encoder defines the following properties of the stream:
        /// <see cref="IAviAudioStream.ChannelCount"/>, <see cref="IAviAudioStream.SamplesPerSecond"/>,
        /// <see cref="IAviAudioStream.BitsPerSample"/>, <see cref="IAviAudioStream.BytesPerSecond"/>,
        /// <see cref="IAviAudioStream.Granularity"/>, <see cref="IAviAudioStream.Format"/>,
        /// <see cref="IAviAudioStream.FormatSpecificData"/>.
        /// These properties cannot be modified.
        /// </para>
        /// </remarks>
        public IAviAudioStream AddEncodingAudioStream(IAudioEncoder encoder, bool ownsEncoder = true)
        {
            Contract.Requires(encoder != null);
            Contract.Requires(Streams.Count < 100);
            Contract.Ensures(Contract.Result <IAviAudioStream>() != null);

            return(AddStream <IAviAudioStreamInternal>(index =>
            {
                var stream = new AviAudioStream(index, this, 1, 44100, 16);
                var encodingStream = new EncodingAudioStreamWrapper(stream, encoder, ownsEncoder);
                var asyncStream = new AsyncAudioStreamWrapper(encodingStream);
                return asyncStream;
            }));
        }
Ejemplo n.º 18
0
        public GridViewRip(Main main)
        {
            _main = main;

              // Setup message queue for receiving Messages
              IMessageQueue queueMessage = ServiceScope.Get<IMessageBroker>().GetOrCreate("message");
              queueMessage.OnMessageReceive += OnMessageReceive;

              InitializeComponent();

              dataGridViewRip.CurrentCellDirtyStateChanged += dataGridViewRip_CurrentCellDirtyStateChanged;

              // Listen to Messages
              IMessageQueue queueMessageEncoding = ServiceScope.Get<IMessageBroker>().GetOrCreate("encoding");
              queueMessageEncoding.OnMessageReceive += OnMessageReceiveEncoding;

              audioEncoder = ServiceScope.Get<IAudioEncoder>();
              mediaChangeMonitor = ServiceScope.Get<IMediaChangeMonitor>();
              mediaChangeMonitor.MediaInserted += mediaChangeMonitor_MediaInserted;
              mediaChangeMonitor.MediaRemoved += mediaChangeMonitor_MediaRemoved;

              // Get number of CD Drives found and initialise a Bindinglist for every drove
              int driveCount = BassCd.BASS_CD_GetDriveCount();
              if (driveCount == 0)
            bindingList.Add(new SortableBindingList<CDTrackDetail>()); // In case of no CD, we want a Dummy List

              _main.RipButtonsEnabled = false;

              for (int i = 0; i < driveCount; i++)
              {
            bindingList.Add(new SortableBindingList<CDTrackDetail>());
            if (BassCd.BASS_CD_IsReady(i))
            {
              _main.RipButtonsEnabled = true;
            }
              }

              // Prepare the Gridview
              gridColumns = new GridViewColumnsRip();
              dataGridViewRip.AutoGenerateColumns = false;
              dataGridViewRip.DataSource = bindingList[0];

              // Now Setup the columns, we want to display
              CreateColumns();

              SetStatusLabel("");
        }
        /// <summary>
        /// Creates a new basic RTP session that captures and renders audio to/from the default system devices.
        /// </summary>
        /// <param name="audioEncoder">An audio encoder that can be used to encode and decode
        /// specific audio codecs.</param>
        /// <param name="externalSource">Optional. An external source to use in combination with the source
        /// provided by this end point. The application will need to signal which source is active.</param>
        /// <param name="disableSource">Set to true to disable the use of the audio source functionality, i.e.
        /// don't capture input from the microphone.</param>
        /// <param name="disableSink">Set to true to disable the use of the audio sink functionality, i.e.
        /// don't playback audio to the speaker.</param>
        public WindowsAudioEndPoint(IAudioEncoder audioEncoder,
                                    int audioOutDeviceIndex = AUDIO_OUTPUTDEVICE_INDEX,
                                    int audioInDeviceIndex  = AUDIO_INPUTDEVICE_INDEX,
                                    bool disableSource      = false,
                                    bool disableSink        = false)
        {
            logger = SIPSorcery.LogFactory.CreateLogger <WindowsAudioEndPoint>();

            _audioFormatManager = new MediaFormatManager <AudioFormat>(audioEncoder.SupportedFormats);
            _audioEncoder       = audioEncoder;

            _audioOutDeviceIndex = audioOutDeviceIndex;
            _disableSource       = disableSource;
            _disableSink         = disableSink;

            if (!_disableSink)
            {
                InitPlaybackDevice(_audioOutDeviceIndex, DefaultAudioPlaybackRate.GetHashCode());
            }

            if (!_disableSource)
            {
                if (WaveInEvent.DeviceCount > 0)
                {
                    if (WaveInEvent.DeviceCount > audioInDeviceIndex)
                    {
                        _waveInEvent = new WaveInEvent();
                        _waveInEvent.BufferMilliseconds = AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                        _waveInEvent.NumberOfBuffers    = INPUT_BUFFERS;
                        _waveInEvent.DeviceNumber       = audioInDeviceIndex;
                        _waveInEvent.WaveFormat         = _waveSourceFormat;
                        _waveInEvent.DataAvailable     += LocalAudioSampleAvailable;
                    }
                    else
                    {
                        logger.LogWarning($"The requested audio input device index {audioInDeviceIndex} exceeds the maximum index of {WaveInEvent.DeviceCount - 1}.");
                        OnAudioSourceError?.Invoke($"The requested audio input device index {audioInDeviceIndex} exceeds the maximum index of {WaveInEvent.DeviceCount - 1}.");
                    }
                }
                else
                {
                    logger.LogWarning("No audio capture devices are available.");
                    OnAudioSourceError?.Invoke("No audio capture devices are available.");
                }
            }
        }
        /// <summary>
        /// Creates a new basic RTP session that captures and renders audio to/from the default system devices.
        /// </summary>
        /// <param name="audioEncoder">A 3rd party audio encoder that can be used to encode and decode
        /// specific audio codecs.</param>
        /// <param name="externalSource">Optional. An external source to use in combination with the source
        /// provided by this end point. The application will need to signal which source is active.</param>
        /// <param name="disableSource">Set to true to disable the use of the audio source functionality, i.e.
        /// don't capture input from the microphone.</param>
        /// <param name="disableSink">Set to true to disable the use of the audio sink functionality, i.e.
        /// don't playback audio to the speaker.</param>
        public WindowsAudioEndPoint(IAudioEncoder audioEncoder, IAudioSource externalSource = null, bool disableSource = false, bool disableSink = false)
        {
            _audioEncoder = audioEncoder;

            _disableSource = disableSource;
            _disableSink   = disableSink;

            if (externalSource != null)
            {
                _externalSource = externalSource;

                // Pass the encoded audio sample to the RTP transport. If this class ever supported additional codecs,
                // such as Opus, the idea would be to change to receive raw samples from the external source and then
                // do the custom encoding before handing over to the transport.
                _externalSource.OnAudioSourceEncodedSample += (audioFormat, durationRtpUnits, sample)
                                                              => OnAudioSourceEncodedSample?.Invoke(audioFormat, durationRtpUnits, sample);
            }

            if (!_disableSink)
            {
                // Render device.
                _waveOutEvent = new WaveOutEvent();
                _waveOutEvent.DeviceNumber = AUDIO_OUTPUTDEVICE_INDEX;
                _waveProvider = new BufferedWaveProvider(_waveFormat);
                _waveProvider.DiscardOnBufferOverflow = true;
                _waveOutEvent.Init(_waveProvider);
            }

            if (!_disableSource)
            {
                if (WaveInEvent.DeviceCount > 0)
                {
                    _waveInEvent = new WaveInEvent();
                    _waveInEvent.BufferMilliseconds = AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                    _waveInEvent.NumberOfBuffers    = INPUT_BUFFERS;
                    _waveInEvent.DeviceNumber       = AUDIO_INPUTDEVICE_INDEX;
                    _waveInEvent.WaveFormat         = _waveFormat;
                    _waveInEvent.DataAvailable     += LocalAudioSampleAvailable;
                }
                else
                {
                    throw new ApplicationException("No audio capture devices are available.");
                }
            }
        }
Ejemplo n.º 21
0
        /// <summary>
        /// Creates a new instance of <see cref="EncodedAudioProvider"/>.
        /// </summary>
        /// <param name="AudioProvider">The <see cref="IAudioProvider"/> to wrap.</param>
        /// <param name="AudioEncoder">The <see cref="IAudioEncoder"/> to use.</param>
        public EncodedAudioProvider(IAudioProvider AudioProvider, IAudioEncoder AudioEncoder)
        {
            if (AudioProvider == null)
            {
                throw new ArgumentNullException(nameof(AudioProvider));
            }

            if (AudioEncoder == null)
            {
                throw new ArgumentNullException(nameof(AudioEncoder));
            }

            _audioProvider = AudioProvider;
            _audioEncoder  = AudioEncoder;

            WaveFormat = AudioEncoder.WaveFormat;

            AudioProvider.RecordingStopped += (Sender, Args) => RecordingStopped?.Invoke(Sender, Args);

            AudioProvider.DataAvailable += AudioProviderOnDataAvailable;
        }
Ejemplo n.º 22
0
        public IAudioProvider GetAudioSource(int FrameRate, out WaveFormat Wf)
        {
            Wf = new WaveFormat(44100, 16, Stereo ? 2 : 1);

            IAudioEncoder audioEncoder = SelectedBitRate != 0 && IsLamePresent && Encode ? new Mp3EncoderLame(Wf.Channels, Wf.SampleRate, SelectedBitRate) : null;

            if (SelectedAudioSource is WaveInDevice)
            {
                return(new WaveInProvider(SelectedAudioSource as WaveInDevice, Wf));
            }

            if (!(SelectedAudioSource is MMDevice))
            {
                return(null);
            }

            IAudioProvider audioSource = new LoopbackProvider((MMDevice)SelectedAudioSource);

            Wf = audioSource.WaveFormat;

            return(audioEncoder == null ? audioSource : new EncodedAudioProvider(audioSource, audioEncoder));
        }
        public FFmpegFileSource(string path, bool repeat, IAudioEncoder audioEncoder)
        {
            if (!File.Exists(path))
            {
                throw new ApplicationException($"Requested path for FFmpeg file source could not be found {path}.");
            }

            _audioFormatManager = new MediaFormatManager <AudioFormat>(_supportedAudioFormats);
            _videoFormatManager = new MediaFormatManager <VideoFormat>(_supportedVideoFormats);

            _audioEncoder      = audioEncoder;
            _fileSourceDecoder = new FileSourceDecoder(path, repeat);
            _videoEncoder      = new FFmpegVideoEncoder();
            _fileSourceDecoder.OnVideoFrame += FileSourceDecoder_OnVideoFrame;
            _fileSourceDecoder.OnAudioFrame += FileSourceDecoder_OnAudioFrame;
            _fileSourceDecoder.OnEndOfFile  += () =>
            {
                logger.LogDebug($"File source decode complete for {path}.");
                OnEndOfFile?.Invoke();
                _fileSourceDecoder.Dispose();
            };
        }
Ejemplo n.º 24
0
        public AudioContext GetAudioContext()
        {
            var resampler     = new ResampleFilter(rawAudioFormat, transmittedAudioFormat);
            var conferenceDtx = new DtxFilter(transmittedAudioFormat);

            IAudioTwoWayFilter enhancer = null;

            switch (enhancementStack)
            {
            case SpeechEnhancementStack.None:
                enhancer = new NullEchoCancelFilter(mediaConfig.ExpectedAudioLatency, mediaConfig.FilterLength, transmittedAudioFormat, AudioFormat.Default);
                break;

            case SpeechEnhancementStack.Speex:
                enhancer = new SpeexEchoCanceller2(mediaConfig, transmittedAudioFormat, AudioFormat.Default);
                break;

            case SpeechEnhancementStack.WebRtc:
                enhancer = new WebRtcFilter(mediaConfig.ExpectedAudioLatency, mediaConfig.FilterLength, transmittedAudioFormat, AudioFormat.Default, mediaConfig.EnableAec, mediaConfig.EnableDenoise, mediaConfig.EnableAgc);
                break;
            }

            IAudioEncoder encoder = null;

            switch (codecType)
            {
            case AudioCodecType.G711M:
                encoder = new G711MuLawEncoder(transmittedAudioFormat);
                break;

            case AudioCodecType.Speex:
                encoder = new SpeexEncoder(transmittedAudioFormat);
                break;
            }

            var ctx = new AudioContext(transmittedAudioFormat, resampler, conferenceDtx, enhancer, encoder);

            return(ctx);
        }
Ejemplo n.º 25
0
        /// <summary>
        /// Creates a new basic RTP session that captures and renders audio to/from the default system devices.
        /// </summary>
        public PortAudioEndPoint(IAudioEncoder audioEncoder)
        {
            _audioEncoder = audioEncoder;

            var apiType = PortAudioHostApiType.DirectSound;

            if (Environment.OSVersion.Platform == PlatformID.Unix)
            {
                apiType = PortAudioHostApiType.Alsa;
            }

            _portAudioOutputDevice = PortAudioHostApi.SupportedHostApis.Where(x => x.HostApiType == apiType).First().DefaultOutputDevice;

            _outputDevicePump = new PortAudioDevicePump(_portAudioOutputDevice, AUDIO_CHANNEL_COUNT,
                                                        new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, AUDIO_BYTES_PER_SAMPLE),
                                                        TimeSpan.FromMilliseconds(SAMPLING_PERIOD_MILLISECONDS), AUDIO_SAMPLING_RATE, ReadAudioDataCalback);

            _portAudioInputDevice = PortAudioHostApi.SupportedHostApis.Where(x => x.HostApiType == apiType).First().DefaultInputDevice;

            _inputDevicePump = new PortAudioDevicePump(_portAudioInputDevice, AUDIO_CHANNEL_COUNT,
                                                       new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, AUDIO_BYTES_PER_SAMPLE),
                                                       TimeSpan.FromMilliseconds(SAMPLING_PERIOD_MILLISECONDS), AUDIO_SAMPLING_RATE, WriteDataCallback);
        }
Ejemplo n.º 26
0
    public void stop()
    {
        if (!running)
        {
            return;
        }
        running = false;

        AVAudioSession.SharedInstance().SetActive(false);

        if (audioRecorder != null)
        {
            try
            {
                audioRecorder.InputNode.RemoveTapOnBus(0);
                audioRecorder.Stop();
                audioRecorder.Reset();
            }
            catch (Exception)
            {
            }
            audioRecorder.Dispose();
            audioRecorder = null;
        }

        if (audioEncoder != null)
        {
            audioEncoder.stop();
            audioEncoder.Dispose();
            audioEncoder = null;
        }

        lock (outputBuffers)
        {
            outputBuffers.Clear();
        }
    }
        private IEnumerator InitializeMicrophone()
        {
            Debug.Log("Trying to initialize microphone");

            yield return Application.RequestUserAuthorization(UserAuthorization.Microphone);

            if (Application.HasUserAuthorization(UserAuthorization.Microphone))
            {
                Debug.Log("Microphone access was granted");

                foreach( var device in recorder.Devices)
                {
                    Debug.Log(device);
                }

                sampleFrequency = recorder.Start(recorder.Devices.First(), (data,count) =>
                {
                    if ( audioEncoder == null )
                    {
                        audioEncoder    = new SpeexAudioEncoder(false,BandMode.Wide,false,10);
                        audioStream  = new EventStream((buffer,bufferOffset,bufferCount) =>
                        {
                            var playerNetworkBehaviour = GetComponent<PlayerNetworkBehaviour>();
                            if ( playerNetworkBehaviour != null )
                            {
                                playerNetworkBehaviour.SendVoiceInput(buffer,bufferOffset,bufferCount,sampleFrequency);
                            }
                        });

                        audioEncoder.Open(audioStream,1,sampleFrequency);
                    }

                    audioEncoder.Write(data,count);
                });
            }
        }
Ejemplo n.º 28
0
        private IEnumerator InitializeMicrophone()
        {
            Debug.Log("Trying to initialize microphone");

            yield return(Application.RequestUserAuthorization(UserAuthorization.Microphone));

            if (Application.HasUserAuthorization(UserAuthorization.Microphone))
            {
                Debug.Log("Microphone access was granted");

                foreach (var device in recorder.Devices)
                {
                    Debug.Log(device);
                }

                sampleFrequency = recorder.Start(recorder.Devices.First(), (data, count) =>
                {
                    if (audioEncoder == null)
                    {
                        audioEncoder = new SpeexAudioEncoder(false, BandMode.Wide, false, 10);
                        audioStream  = new EventStream((buffer, bufferOffset, bufferCount) =>
                        {
                            var playerNetworkBehaviour = GetComponent <PlayerNetworkBehaviour>();
                            if (playerNetworkBehaviour != null)
                            {
                                playerNetworkBehaviour.SendVoiceInput(buffer, bufferOffset, bufferCount, sampleFrequency);
                            }
                        });

                        audioEncoder.Open(audioStream, 1, sampleFrequency);
                    }

                    audioEncoder.Write(data, count);
                });
            }
        }
Ejemplo n.º 29
0
 /// <summary>
 /// Creates a new instance of <see cref="AudioFileWriter"/>.
 /// </summary>
 public AudioFileWriter(string FileName, IAudioEncoder Encoder)
     : this(new FileStream(FileName, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read), Encoder)
 {
 }
Ejemplo n.º 30
0
 /// <summary>
 /// Creates a new instance of <see cref="AudioFileWriter"/>.
 /// </summary>
 public AudioFileWriter(Stream OutStream, IAudioEncoder Encoder)
     : this(OutStream, Encoder.WaveFormat, Encoder.RequiresRiffHeader)
 {
     _encoder = Encoder;
 }
Ejemplo n.º 31
0
 public AudioFileEncoder(IFileReader fileReader, IAudioEncoder encoder)
 {
     this.fileReader = fileReader;
     this.encoder    = encoder;
 }
Ejemplo n.º 32
0
 private void initOpusEncoder()
 {
     audioEncoder = new OpusEncoder(sampleRate, 24000, channels, Concentus.Enums.OpusApplication.OPUS_APPLICATION_RESTRICTED_LOWDELAY, this);
     audioEncoder.start();
 }
Ejemplo n.º 33
0
    public void stop()
    {
        if (!running)
        {
            return;
        }
        running = false;

        if (echoCanceller != null)
        {
            try
            {
                echoCanceller.Release();
                echoCanceller.Dispose();
            }
            catch (Exception)
            {
            }
            echoCanceller = null;
        }

        if (noiseSuppressor != null)
        {
            try
            {
                noiseSuppressor.Release();
                noiseSuppressor.Dispose();
            }
            catch (Exception)
            {
            }
            noiseSuppressor = null;
        }

        if (audioRecorder != null)
        {
            try
            {
                audioRecorder.Stop();
                audioRecorder.Release();
            }
            catch (Exception)
            {
            }
            audioRecorder.Dispose();
            audioRecorder = null;
        }

        if (audioEncoder != null)
        {
            audioEncoder.stop();
            audioEncoder.Dispose();
            audioEncoder = null;
        }

        buffer       = null;
        shortsBuffer = null;
        bufferSize   = 0;
        lock (outputBuffers)
        {
            outputBuffers.Clear();
        }


        AudioManager am = (AudioManager)MainActivity.Instance.GetSystemService(Context.AudioService);

        if (Build.VERSION.SdkInt < BuildVersionCodes.O)
        {
            if (focusListener != null)
            {
#pragma warning disable CS0618 // Type or member is obsolete
                am.AbandonAudioFocus(focusListener);
#pragma warning restore CS0618 // Type or member is obsolete
                focusListener.Dispose();
                focusListener = null;
            }
        }
        else
        {
            if (focusListener != null)
            {
                if (focusRequest != null)
                {
                    am.AbandonAudioFocusRequest(focusRequest);
                    focusRequest.Dispose();
                    focusRequest = null;
                }
                focusListener.Dispose();
                focusListener = null;
            }
        }
    }
Ejemplo n.º 34
0
        public int Encode(MediaAnalyzeInfo mediaAnalyzeInfo, IAudioEncoder audioEncoder, IAudioEncoderSettings settings, Action <string> logAction, Action <string> progressAction, out string outputFileName)
        {
            // Get AviSynth script
            AviSynthScriptService aviSynthScriptService = ServiceFactory.GetService <AviSynthScriptService>();

            // Open the AviSynth script
            AviSynthFileService aviSynthFileService = ServiceFactory.GetService <AviSynthFileService>();

            // Get the AviSynth audio script
            string avsScript = aviSynthScriptService.CreateAviSynthAudioScript(mediaAnalyzeInfo);

            // Try to open the Avs Script
            IAviSynthAudioSourceService audioSourceService = null;

            while (true)
            {
                try
                {
                    using (var avsFile = aviSynthFileService.OpenAviSynthScriptFile(avsScript))
                    {
                        break;
                    }
                }
                catch (Exception)
                {
                    // Check if we already tried again
                    if (audioSourceService != null)
                    {
                        throw;
                    }

                    // In case it fails, try to create audio AviSynth script with the DirectShowSource
                    audioSourceService = ServiceFactory.GetService <AviSynthDirectShowAudioSourceService>();

                    avsScript = aviSynthScriptService.CreateAviSynthAudioScript(mediaAnalyzeInfo, audioSourceService: audioSourceService);

                    continue;
                }
            }

            // Determine the output filename
            outputFileName = $"{mediaAnalyzeInfo.Filename}.reencode.{settings.FileExtension}".GetNewFileName();

            // Open the AviSynth Script to generate the timecodes
            using (var avsFile = aviSynthFileService.OpenAviSynthScriptFile(avsScript))
            {
                // Check for audio existence
                if (avsFile.Clip.AudioSamplesCount == 0)
                {
                    throw new ApplicationException("Can't find audio stream!");
                }

                // Calculate Size in Bytes
                long totalSizeInBytes = avsFile.Clip.AudioSamplesCount * avsFile.Clip.AudioBytesPerSample * avsFile.Clip.AudioChannelsCount;

                // Define format type tag
                // 1 for int, 3 for float
                int formatTypeTag = 1;
                if (avsFile.Clip.AudioSampleType == AvsAudioSampleType.FLOAT)
                {
                    formatTypeTag = 3;
                }

                using (var process = new Process())
                {
                    // Create the ProcessStartInfo object
                    ProcessStartInfo info = new ProcessStartInfo
                    {
                        // Command line arguments, to be passed to encoder
                        // {0} means output file name
                        // {1} means samplerate in Hz
                        // {2} means bits per sample
                        // {3} means channel count
                        // {4} means samplecount
                        // {5} means size in bytes
                        // {6} means format (1 int, 3 float)
                        // {7} means target bitrate
                        Arguments = string.Format(
                            audioEncoder.ExecutableArguments,
                            outputFileName,
                            avsFile.Clip.AudioSampleRate,
                            avsFile.Clip.AudioBitsPerSample,
                            avsFile.Clip.AudioChannelsCount,
                            avsFile.Clip.AudioSamplesCount,
                            totalSizeInBytes,
                            formatTypeTag,
                            mediaAnalyzeInfo.TargetAudioBitrate
                            ),

                        FileName = audioEncoder.EncoderFileName,

                        UseShellExecute        = false,
                        RedirectStandardInput  = true,
                        RedirectStandardOutput = true,
                        RedirectStandardError  = true,
                        CreateNoWindow         = true
                    };

                    process.StartInfo = info;

                    Debug.WriteLine(info.Arguments);

                    // Start the process
                    process.Start();

                    // TODO: Revisit that
                    //process.PriorityClass = m_processPriority;

                    // Read the Standard output character by character
                    Task.Run(() => process.ReadStreamPerCharacter(true, new Action <Process, string>((p, str) => Debug.WriteLine(str))));

                    // Read the Standard error character by character
                    Task.Run(() => process.ReadStreamPerCharacter(false, new Action <Process, string>((p, str) => Debug.WriteLine(str))));

                    try
                    {
                        using (Stream processInputStream = process.StandardInput.BaseStream)
                        {
                            // Check if we need to write WAV Header
                            if (audioEncoder.WriteHeader)
                            {
                                logAction?.Invoke($"Audio encoding: {mediaAnalyzeInfo.Filename} Writing header data to encoder's StdIn...");
                                WriteHeader(audioEncoder.HeaderType, processInputStream, avsFile, totalSizeInBytes, settings.ChannelMask, formatTypeTag);
                            }

                            logAction?.Invoke($"Audio encoding: {mediaAnalyzeInfo.Filename} Writing PCM data to encoder's StdIn...");

                            // Calculate the frame buffer total size
                            int frameBufferTotalSize = MAX_SAMPLES_PER_ONCE * avsFile.Clip.AudioChannelsCount * avsFile.Clip.AudioBitsPerSample / 8;

                            // Allocate the frame buffer
                            byte[] frameBuffer = new byte[frameBufferTotalSize];

                            // Get the handle for the frame buffer
                            GCHandle bufferHandle = GCHandle.Alloc(frameBuffer, GCHandleType.Pinned);

                            try
                            {
                                // Set a current frame sample indicator
                                int currentFrameSample = 0;

                                // Start passing the audio frames to the encoder's standard input stream
                                while (currentFrameSample < avsFile.Clip.AudioSamplesCount)
                                {
                                    // Check for unexpected process exit
                                    if (process != null && process.HasExited)
                                    {
                                        throw new ApplicationException($"Unexpected encoder termination with exit code: {process.ExitCode}");
                                    }

                                    // Calculate how many frame samples to read
                                    int framesSamplesToRead = Math.Min((int)(avsFile.Clip.AudioSamplesCount - currentFrameSample), MAX_SAMPLES_PER_ONCE);

                                    int bytesRead = framesSamplesToRead * avsFile.Clip.AudioBytesPerSample * avsFile.Clip.AudioChannelsCount;

                                    // Read the audio frame samples and copy them to the frame buffer
                                    avsFile.ReadAudioSamples(bufferHandle.AddrOfPinnedObject(), currentFrameSample, framesSamplesToRead);

                                    // Calculate the current progress
                                    double progress = ((double)currentFrameSample / (double)avsFile.Clip.AudioSamplesCount) * 100.0;
                                    progressAction?.Invoke($"Progress {progress:#0.00}%");

                                    // Write the frame samples to the encoder's standard input stream
                                    processInputStream.Write(frameBuffer, 0, bytesRead);
                                    processInputStream.Flush();

                                    // Advance the current frame sample indicator
                                    currentFrameSample += framesSamplesToRead;

                                    // Signal the OS to run other threads in our time slice
                                    Thread.Yield();
                                }
                            }
                            finally
                            {
                                // Free the frame buffer handle
                                bufferHandle.Free();
                            }
                        }

                        if (process != null)
                        {
                            logAction?.Invoke($"Audio encoding: {mediaAnalyzeInfo.Filename} Finalizing encoder");

                            // Wait for the process to exit
                            process.WaitForExit();

                            // Debug write the exit code
                            Debug.WriteLine($"Exit code: {process.ExitCode}");
                        }
                    }
                    finally
                    {
                        // Sanity check for non exited process
                        if (process != null && !process.HasExited)
                        {
                            // Kill the process
                            process.Kill();

                            // Wait for the process to exit
                            process.WaitForExit();

                            // Debug write the exit code
                            Debug.WriteLine($"Exit code: {process.ExitCode}");
                        }
                    }

                    // Return the process exit code
                    return(process?.ExitCode ?? 0);
                }
            }
        }