コード例 #1
0
ファイル: VoiceSynth.cs プロジェクト: bm98/FS20_HudBar
        // will be called when speaking has finished
        private void MediaSourceInputNode_MediaSourceCompleted(MediaSourceAudioInputNode sender, object args)
        {
            _audioGraph?.Stop( );
            _deviceOutputNode?.Start( ); // restart output - needed ??

            EndOfSpeak( );
        }
コード例 #2
0
ファイル: VoiceSynth.cs プロジェクト: bm98/FS20_HudBar
        // Async Speak output of a text
        private async Task SpeakAsyncLow(string text)
        {
            _speaking = true; // locks additional calls for Speak until finished talking this bit

            // Speech
            if (!_canSpeak || _synthesizer == null || _audioGraph == null || _deviceOutputNode == null)
            {
                LOG.LogError($"SpeakAsyncLow: Some item do not exist: cannot speak..\n[{_synthesizer}] [{_audioGraph}] [{_deviceOutputNode}]");
                EndOfSpeak( );
                return;
            }

            // Generate a new, independent audio stream from plain text.
            _stream = await _synthesizer.SynthesizeTextToStreamAsync(text);

            // Must create a MediaSource obj to derive a Stream Consumer InputNode
            _mediaSource?.Dispose( ); // clean old
            _mediaSource = MediaSource.CreateFromStream(_stream, _stream.ContentType);

            if (_mediaSourceInputNode != null)
            {
                // clean old nodes
                _mediaSourceInputNode.MediaSourceCompleted -= MediaSourceInputNode_MediaSourceCompleted; // detach handler
                _mediaSourceInputNode.Dispose( );
            }
            // create the InputNode
            var resultMS = await _audioGraph.CreateMediaSourceAudioInputNodeAsync(_mediaSource);

            if (resultMS.Status != MediaSourceAudioInputNodeCreationStatus.Success)
            {
                // Cannot create input node
                LOG.LogError($"SpeakAsyncLow: MediaSourceAudioInputNode creation: {resultMS.Status}\nExtError: {resultMS.ExtendedError}");
                EndOfSpeak( );
                return; // cannot speak
            }
            _mediaSourceInputNode = resultMS.Node;
            // add a handler to stop and signale when finished
            _mediaSourceInputNode.MediaSourceCompleted += MediaSourceInputNode_MediaSourceCompleted;
            _mediaSourceInputNode.AddOutgoingConnection(_deviceOutputNode); // connect the graph
            // Speak it
            _audioGraph.Start( );                                           // Speaks in the current Thread - cannot be waited for in the same thread
        }
コード例 #3
0
        // </SnippetCreateFileOutputNode>



        // <SnippetCreateMediaSourceInputNode>
        private async Task CreateMediaSourceInputNode(System.Uri contentUri)
        {
            if (audioGraph == null)
            {
                return;
            }

            var adaptiveMediaSourceResult = await AdaptiveMediaSource.CreateFromUriAsync(contentUri);

            if (adaptiveMediaSourceResult.Status != AdaptiveMediaSourceCreationStatus.Success)
            {
                Debug.WriteLine("Failed to create AdaptiveMediaSource");
                return;
            }

            var mediaSource = MediaSource.CreateFromAdaptiveMediaSource(adaptiveMediaSourceResult.MediaSource);
            CreateMediaSourceAudioInputNodeResult mediaSourceAudioInputNodeResult =
                await audioGraph.CreateMediaSourceAudioInputNodeAsync(mediaSource);

            if (mediaSourceAudioInputNodeResult.Status != MediaSourceAudioInputNodeCreationStatus.Success)
            {
                switch (mediaSourceAudioInputNodeResult.Status)
                {
                case MediaSourceAudioInputNodeCreationStatus.FormatNotSupported:
                    Debug.WriteLine("The MediaSource uses an unsupported format");
                    break;

                case MediaSourceAudioInputNodeCreationStatus.NetworkError:
                    Debug.WriteLine("The MediaSource requires a network connection and a network-related error occurred");
                    break;

                case MediaSourceAudioInputNodeCreationStatus.UnknownFailure:
                default:
                    Debug.WriteLine("An unknown error occurred while opening the MediaSource");
                    break;
                }
                return;
            }

            mediaSourceInputNode = mediaSourceAudioInputNodeResult.Node;
        }
コード例 #4
0
 // <SnippetMediaSourceCompleted>
 private void MediaSourceInputNode_MediaSourceCompleted(MediaSourceAudioInputNode sender, object args)
 {
     audioGraph.Stop();
 }
コード例 #5
0
ファイル: VoiceSynth.cs プロジェクト: bm98/FS20_HudBar
        // Init the AudioGraph
        //  despite the Aync methods - this will exec synchronously to get the InitPhase  only get done when all is available
        private void InitAudioGraph( )
        {
            LOG.Log("InitAudioGraph: Begin");
            if (!_canSpeak)
            {
                LOG.Log("InitAudioGraph: Canceled with _canSpeak = false");
                return; // cannot even try..
            }

            // MUST WAIT UNTIL all items are created, else one may call Speak too early...
            // cleanup existing items
            if (_mediaSourceInputNode != null)
            {
                if (_deviceOutputNode != null)
                {
                    _mediaSourceInputNode.RemoveOutgoingConnection(_deviceOutputNode);
                }
                _mediaSourceInputNode.Dispose( );
                _mediaSourceInputNode = null;
            }
            if (_deviceOutputNode != null)
            {
                _deviceOutputNode.Dispose( ); _deviceOutputNode = null;
            }
            if (_audioGraph != null)
            {
                _audioGraph.Dispose( ); _audioGraph = null;
            }

            // Create an AudioGraph
            AudioGraphSettings settings = new AudioGraphSettings(_renderCat)
            {
                PrimaryRenderDevice = null, // If PrimaryRenderDevice is null, the default playback device will be used.
            };
            // We await here the execution without providing an async method ...
            var resultAG = WindowsRuntimeSystemExtensions.AsTask(AudioGraph.CreateAsync(settings));

            resultAG.Wait( );
            if (resultAG.Result.Status != AudioGraphCreationStatus.Success)
            {
                LOG.LogError($"InitAudioGraph: Failed to create AudioGraph with RenderCategory: {_renderCat}");
                LOG.LogError($"InitAudioGraph: AudioGraph creation: {resultAG.Result.Status}, TaskStatus: {resultAG.Status}"
                             + $"\nExtError: {resultAG.Result.ExtendedError}");
                _canSpeak = false;
                return;
            }
            _audioGraph = resultAG.Result.Graph;
            LOG.Log($"InitAudioGraph: AudioGraph: [{_audioGraph.EncodingProperties}]");

            // Create a device output node
            // The output node uses the PrimaryRenderDevice of the audio graph.
            // We await here the execution without providing an async method ...
            var resultDO = WindowsRuntimeSystemExtensions.AsTask(_audioGraph.CreateDeviceOutputNodeAsync());

            resultDO.Wait( );
            if (resultDO.Result.Status != AudioDeviceNodeCreationStatus.Success)
            {
                // Cannot create device output node
                LOG.LogError($"InitAudioGraph: DeviceOutputNode creation: {resultDO.Result.Status}, TaskStatus: {resultDO.Status}"
                             + $"\nExtError: {resultDO.Result.ExtendedError}");
                _canSpeak = false;
                return;
            }
            _deviceOutputNode = resultDO.Result.DeviceOutputNode;
            LOG.Log($"InitAudioGraph: DeviceOutputNode: [{_deviceOutputNode.Device}]");
            LOG.Log($"InitAudioGraph: InitAudioGraph-END");
        }