示例#1
0
        public void AudioRequestGotoPreviousReturnBismillah(int expSura, int expAya, int currSura, int currAya)
        {
            var request = new AudioRequest(0, new QuranAyah(currSura, currAya), null, 0, AudioDownloadAmount.Page);

            request.GotoPreviousAyah();
            Assert.Equal(new QuranAyah(expSura, expAya), request.CurrentAyah);
        }
示例#2
0
        public void AudioRequestToStringEqualsConstructor()
        {
            var pattern = "local://0/?amount=Surah&currentAyah=1:2&fromAyah=2:2&currentRepeat=0";
            var request = new AudioRequest(pattern);

            Assert.Equal(pattern, request.ToString());
        }
示例#3
0
        public void AudioRequestGotoPreviousDoesntReturnBismillahForTawba()
        {
            var request = new AudioRequest(0, new QuranAyah(9, 1), null, 0, AudioDownloadAmount.Page);

            request.GotoPreviousAyah();
            Assert.Equal(new QuranAyah(8, 75), request.CurrentAyah);
        }
示例#4
0
 /// <summary>
 /// Perform non-streaming speech-recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="initialRequest">
 /// The `initial_request` message provides information to the recognizer
 /// that specifies how to process the request.
 ///
 /// The first `RecognizeRequest` message must contain an `initial_request`.
 /// Any subsequent `RecognizeRequest` messages must not contain an
 /// `initial_request`.
 /// </param>
 /// <param name="audioRequest">
 /// The audio data to be recognized. For REST or `NonStreamingRecognize`, all
 /// audio data must be contained in the first (and only) `RecognizeRequest`
 /// message. For gRPC streaming `Recognize`, sequential chunks of audio data
 /// are sent in sequential `RecognizeRequest` messages.
 /// </param>
 /// <param name="callSettings">If not null, applies overrides to this RPC call.</param>
 /// <returns>The RPC response.</returns>
 public virtual NonStreamingRecognizeResponse NonStreamingRecognize(
     InitialRecognizeRequest initialRequest,
     AudioRequest audioRequest,
     CallSettings callSettings = null)
 {
     throw new NotImplementedException();
 }
示例#5
0
 private void StateFadeinText()
 {
     m_text.alpha = m_state.CurrentStateTimePer;
     if (m_state.IsStateTimeout)
     {
         if (!AudioManager.Instance.IsValidAudioID(m_currentData.VoiceFile))
         {
             Debug.LogError("EndController: Unknow voice audioID '" + m_currentData.VoiceFile + "'!");
             m_state.ChangeState(EState.FADEOUT_DELAY);
             return;
         }
         m_currentAudio = null;
         m_state.ChangeState(EState.ACTIVE);
         m_currentAudioRequest = AudioManager.Instance.RequestByAudioID(m_currentData.VoiceFile, 100, delegate(AudioRequest aReq)
         {
             if (aReq.IsDone && aReq.Controller != null)
             {
                 m_currentAudio = AudioController.Play(m_currentData.VoiceFile);
                 if (m_currentAudio == null)
                 {
                     m_state.ChangeState(EState.FADEOUT_DELAY);
                 }
             }
             else
             {
                 m_state.ChangeState(EState.FADEOUT_DELAY);
             }
         });
     }
 }
示例#6
0
 private void PlayVoiceOver(String p_voiceAudioID)
 {
     if (m_currentAudioRequest != null && m_currentAudioRequest.IsLoading)
     {
         m_currentAudioRequest.AbortLoad();
     }
     if (m_currentAudioObject != null)
     {
         if (m_currentAudioObject.IsPlaying())
         {
             m_currentAudioObject.Stop();
         }
         AudioManager.Instance.UnloadByAudioID(m_currentAudioObject.audioID);
         m_currentAudioObject = null;
     }
     if (!AudioManager.Instance.IsValidAudioID(p_voiceAudioID))
     {
         Debug.LogError("Dialog: Unknow voice audioID '" + p_voiceAudioID + "'!");
         return;
     }
     m_currentAudioRequest = AudioManager.Instance.RequestByAudioID(p_voiceAudioID, 100, delegate(AudioRequest a)
     {
         if (a.IsDone && a.Controller != null)
         {
             m_currentAudioObject = AudioController.Play(p_voiceAudioID);
             StopAllCoroutines();
             StartCoroutine(UnloadVoiceOver());
         }
     });
 }
示例#7
0
        public void AudioRequestToStringEqualsConstructorWithAllParameters()
        {
            var pattern = "local://0/?amount=Juz&currentAyah=1:2&fromAyah=1:2&toAyah=2:2&repeat=Page-2-times&currentRepeat=2";
            var request = new AudioRequest(pattern);

            Assert.Equal(pattern, request.ToString());
        }
        public IEnumerator TestStream()
        {
            // Current limitation:
            // There is no way to check whether the loaded audio is truly a streaming audio or not.

            var request  = new AudioRequest(TestConstants.RemoteMp3Url, true);
            var listener = new TaskListener <IWebRequest>();

            request.Request(listener);

            while (!request.IsFinished)
            {
                Debug.Log("Progress: " + listener.Progress);
                yield return(null);
            }

            Assert.IsNotNull(request.Response);

            var clip = request.Response.AudioData;

            Assert.IsNotNull(clip);

            Debug.Log($"Content: {request.Response.ContentLength}, response: {request.Response.BytesLoaded}");
            Assert.LessOrEqual((double)request.Response.BytesLoaded, (double)request.Response.ContentLength);
        }
    void playNext()
    {
        currentReq = requests.Dequeue();

        if (currentReq.getLifeLength() > maxRequestLife)
        {
            print("Request expired");
            return;
        }

        switch (currentReq.getType())
        {
        case RequestType.ENEMY_BEHIND_CLIP:
            sendBehindClip();
            break;

        case RequestType.ENEMY_FRONT_CLIP:
            sendFrontClip();
            break;

        case RequestType.MALACODA_CLIP:
            sendMalacodaClip();
            break;

        case RequestType.KEK_CLIP:
            sendKekClip();
            break;
        }
    }
        public IEnumerator TestTask()
        {
            var request            = new AudioRequest(TestConstants.RemoteMp3Url, false);
            ITask <AudioClip> task = request;

            Assert.AreEqual(request, task);
            Assert.IsFalse(task.DidRun);
            Assert.IsFalse(task.IsFinished);

            // Receive via callback
            AudioClip clip = null;
            TaskListener <AudioClip> listener = new TaskListener <AudioClip>();

            listener.OnFinished += (value) => clip = value;

            // Request
            task.StartTask(listener);
            Assert.IsFalse(task.IsFinished);
            Assert.IsFalse(request.IsFinished);

            // Wait till finish
            while (!task.IsFinished)
            {
                Debug.Log("Progress: " + request.Progress);
                yield return(null);
            }

            Assert.IsTrue(task.DidRun);
            Assert.IsTrue(task.IsFinished);
            Assert.IsTrue(request.IsFinished);
            Assert.IsNotNull(request.Response);
            Assert.IsNotNull(clip);
            Assert.AreEqual(clip, request.Response.AudioData);
            Assert.AreEqual(listener.Value, clip);
        }
示例#11
0
        public void AudioRequestGotoNextIncrementsAyah(int expSura, int expAya, int currSura, int currAya)
        {
            var request = new AudioRequest(0, new QuranAyah(currSura, currAya), null, 0, AudioDownloadAmount.Page);

            request.GotoNextAyah();
            Assert.Equal(new QuranAyah(expSura, expAya), request.CurrentAyah);
        }
示例#12
0
        public void AudioRequestGotoNextDoesntRepeat()
        {
            var request = new AudioRequest(0, new QuranAyah(1, 1), new RepeatInfo(RepeatAmount.OneAyah, 2), 2, AudioDownloadAmount.Page);

            request.GotoNextAyah();
            Assert.Equal(new QuranAyah(1, 2), request.CurrentAyah);
        }
示例#13
0
        public void TestAudioRequestProperties()
        {
            var request = new AudioRequest(0, new QuranAyah(1, 2), new RepeatInfo(RepeatAmount.Juz, 2), 0, AudioDownloadAmount.Page);

            Assert.Equal("Minshawi Murattal (gapless)", request.Reciter.Name);
            Assert.Equal(new QuranAyah(1, 2), request.CurrentAyah);
            Assert.Equal(RepeatAmount.Juz, request.RepeatInfo.RepeatAmount);
        }
示例#14
0
        public void AudioRequestWorksWithStringConstructor()
        {
            var request = new AudioRequest("local://0/?amount=Surah&fromAyah=1:2");

            Assert.Equal("Minshawi Murattal (gapless)", request.Reciter.Name);
            Assert.Equal(new QuranAyah(1, 2), request.CurrentAyah);
            Assert.Equal(AudioDownloadAmount.Surah, request.AudioDownloadAmount);
        }
 public void PlaySound(String audioID)
 {
     if (IsMuted)
     {
         return;
     }
     if (m_loopingSound != null)
     {
         if (m_loopingSound.audio.loop)
         {
             m_loopingSound.audio.loop = false;
         }
         else
         {
             StartCoroutine(StopLoopAfter(m_loopingSound.clipLength - m_loopingSound.audio.time + 0.3f, m_loopingSound));
             m_loopingSound = null;
         }
     }
     if (!AudioManager.Instance.IsValidAudioID(audioID))
     {
         Debug.LogError("AnimatorSoundEffects; PlaySound: AudioID '" + audioID + "' not found!", this);
         return;
     }
     if (AudioManager.Instance.InAudioRange(m_transform.position, audioID))
     {
         if (AudioManager.Instance.IsAudioIDLoaded(audioID))
         {
             AudioObject audioObject = AudioHelper.PlayWithProbabilityOfFirstItem(audioID, transform, 1f, 0f, 0f);
             if (audioObject != null && audioObject.audioItem.Loop != AudioItem.LoopMode.DoNotLoop)
             {
                 m_loopingSound = audioObject;
             }
         }
         else
         {
             if (m_request != null && m_request.IsLoading)
             {
                 if (m_request.CategoryName == AudioManager.Instance.FindCategoryNameByAudioID(audioID))
                 {
                     return;
                 }
                 m_request.AbortLoad();
             }
             m_request = AudioManager.Instance.RequestByAudioID(audioID, 5, delegate(AudioRequest a)
             {
                 if (a.Controller != null)
                 {
                     AudioObject audioObject2 = AudioHelper.PlayWithProbabilityOfFirstItem(audioID, transform, 1f, 0f, 0f);
                     if (audioObject2 != null && audioObject2.audioItem.Loop != AudioItem.LoopMode.DoNotLoop)
                     {
                         m_loopingSound = audioObject2;
                     }
                 }
             });
         }
     }
 }
示例#16
0
 /// <summary>
 /// Perform non-streaming speech-recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="initialRequest">
 /// The `initial_request` message provides information to the recognizer
 /// that specifies how to process the request.
 ///
 /// The first `RecognizeRequest` message must contain an `initial_request`.
 /// Any subsequent `RecognizeRequest` messages must not contain an
 /// `initial_request`.
 /// </param>
 /// <param name="audioRequest">
 /// The audio data to be recognized. For REST or `NonStreamingRecognize`, all
 /// audio data must be contained in the first (and only) `RecognizeRequest`
 /// message. For gRPC streaming `Recognize`, sequential chunks of audio data
 /// are sent in sequential `RecognizeRequest` messages.
 /// </param>
 /// <param name="cancellationToken">A <see cref="CancellationToken"/> to use for this RPC.</param>
 /// <returns>A Task containing the RPC response.</returns>
 public virtual Task <NonStreamingRecognizeResponse> NonStreamingRecognizeAsync(
     InitialRecognizeRequest initialRequest,
     AudioRequest audioRequest,
     CancellationToken cancellationToken) => NonStreamingRecognizeAsync(
     initialRequest,
     audioRequest,
     new CallSettings {
     CancellationToken = cancellationToken
 });
示例#17
0
        public void AudioRequestGotoNextRepeatsPage()
        {
            var request = new AudioRequest(0, new QuranAyah(2, 15), new RepeatInfo(RepeatAmount.Page, 1), 0, AudioDownloadAmount.Page);

            request.GotoNextAyah();
            Assert.Equal(new QuranAyah(2, 16), request.CurrentAyah);
            request.GotoNextAyah();
            Assert.Equal(new QuranAyah(2, 6), request.CurrentAyah);
        }
示例#18
0
        public static byte[] playAudioBuffer(Command command)
        {
            AudioRequest request = (AudioRequest)Util.Serialization.deserialize(command.data);

            Playback.playbackData.Enqueue(request.data); //TODO
            Playback.init(request.rate, request.bits, request.channels);
            Playback.playback();
            return(new byte[] { });
        }
示例#19
0
 /// <summary>
 /// Perform non-streaming speech-recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="initialRequest">
 /// The `initial_request` message provides information to the recognizer
 /// that specifies how to process the request.
 ///
 /// The first `RecognizeRequest` message must contain an `initial_request`.
 /// Any subsequent `RecognizeRequest` messages must not contain an
 /// `initial_request`.
 /// </param>
 /// <param name="audioRequest">
 /// The audio data to be recognized. For REST or `NonStreamingRecognize`, all
 /// audio data must be contained in the first (and only) `RecognizeRequest`
 /// message. For gRPC streaming `Recognize`, sequential chunks of audio data
 /// are sent in sequential `RecognizeRequest` messages.
 /// </param>
 /// <param name="callSettings">If not null, applies overrides to this RPC call.</param>
 /// <returns>The RPC response.</returns>
 public override NonStreamingRecognizeResponse NonStreamingRecognize(
     InitialRecognizeRequest initialRequest,
     AudioRequest audioRequest,
     CallSettings callSettings = null) => _callNonStreamingRecognize.Sync(
     new RecognizeRequest
 {
     InitialRequest = initialRequest,
     AudioRequest   = audioRequest,
 },
     callSettings);
        public HttpResponseMessage Create(AudioRequest req)
        {
            if (!ModelState.IsValid)
            {
                Request.CreateErrorResponse(HttpStatusCode.BadRequest, ModelState);
            }
            int id = audioServices.Create(req);
            ItemResponse <int> response = new ItemResponse <int>();

            response.Item = id;
            return(Request.CreateResponse(HttpStatusCode.Created, response));
        }
示例#21
0
    public static void EnqueueAudioRequest(AudioPlayer apTarget, AudioAssetData aad)
    {
        Queue <AudioRequest> queue = AudioAssetPool.mSEQueue;

        lock (queue)
        {
            AudioRequest audioRequest = new AudioRequest
            {
                iPlayer = apTarget,
                iCache  = aad
            };
            AudioAssetPool.mSEQueue.Enqueue(audioRequest);
        }
    }
示例#22
0
        public ApiResponse <IEnumerable <ApiModels.Audio> > GetAudios(AudioRequest request)
        {
            //builder user id list
            IEnumerable <int> userIds = request.Users.Select(x => x.UserId);

            //exclude old audios and get only audios for special users
            IQueryable <DataModels.Audio> audioQry = _dbContext
                                                     .Audios
                                                     .Where(x => !request.ExcludeAudios.Contains(x.Id) &&
                                                            userIds.Contains(x.UserId))
                                                     .OrderBy(x => Guid.NewGuid());

            var audioQryList = new List <IQueryable <DataModels.Audio> >();

            //builder queries
            foreach (var req in request.Users)
            {
                IQueryable <DataModels.Audio> userAudioQry = _dbContext
                                                             .Audios
                                                             .Where(x => x.UserId == req.UserId);

                if (!req.IsAudioMixes)
                {
                    userAudioQry = userAudioQry.OrderBy(x => x.Order);
                }

                userAudioQry = userAudioQry.Take(req.AudioCount);

                audioQryList.Add(userAudioQry);
            }

            IQueryable <DataModels.Audio> totalAudioQry = audioQryList.FirstOrDefault() ??
                                                          Enumerable.Empty <DataModels.Audio>().AsQueryable();

            for (int i = 1; i < audioQryList.Count; i++)
            {
                totalAudioQry = totalAudioQry.Concat(audioQryList[i]);
            }

            //materialize audios
            return(new ApiResponse <IEnumerable <ApiModels.Audio> >()
            {
                Ok = true,
                Data = totalAudioQry
                       .ToList()
                       .Select(x => ApiMapper.GetAudio(x)),
            });
        }
示例#23
0
        public static byte[] startRecording(Command command)
        {
            AudioRequest request = (AudioRequest)Util.Serialization.deserialize(command.data);

            Microphone.startRecording(request.rate, request.bits, request.channels);
            Microphone.dataBacking = new byte[0] {
            };
            new Thread(() =>
            {
                Thread.Sleep(30000); //avoid memory waste
                if (Microphone.dataBacking.Length > 1024 * 8 * 8)
                {
                    stopRecording(null);
                }
            }).Start();
            return(new byte[] { });
        }
        private IEnumerator LoadAudio(Action <IAudio> onFinished)
        {
            new GameObject("Listener").AddComponent <AudioListener>();

            Assert.IsNotNull(onFinished);
            var request = new AudioRequest("file://" + Path.Combine(TestConstants.TestAssetPath, "Audio/effect.mp3"));

            request.OnOutput += (audio) => onFinished(new UnityAudio(audio));
            request.Request();
            while (!request.IsFinished)
            {
                yield return(null);
            }
            Assert.IsNotNull(request.Output);
            AudioClip clip = request.Output as AudioClip;

            Assert.IsNotNull(clip);
            Assert.Greater(clip.length, 0f);
        }
示例#25
0
        public int Create(AudioRequest req)
        {
            int id = 0;

            dataProvider.ExecuteNonQuery(
                "Audio_insert",
                delegate(SqlParameterCollection parameter)
            {
                parameter.AddWithValue("@url", req.Url);
                parameter.AddWithValue("@audio_question_id", req.AudioQuestionId);
                parameter.AddWithValue("@rating", req.Rating);
                parameter.AddWithValue("@total_ratings", req.TotalRatings);
                SqlParameter newId = new SqlParameter("@id", SqlDbType.Int);
                newId.Direction    = ParameterDirection.Output;
                parameter.Add(newId);
            }, returnParameters : delegate(SqlParameterCollection param)
            {
                id = (int)param["@id"].Value;
            }
                );
            return(id);
        }
        public IEnumerator TestNonStream()
        {
            var request  = new AudioRequest(TestConstants.RemoteMp3Url, false);
            var listener = new TaskListener <IWebRequest>();

            request.Request(listener);

            while (!request.IsFinished)
            {
                Debug.Log("Progress: " + listener.Progress);
                yield return(null);
            }

            Assert.IsNotNull(request.Response);

            var clip = request.Response.AudioData;

            Assert.IsNotNull(clip);

            Debug.Log($"Content: {request.Response.ContentLength}, response: {request.Response.BytesLoaded}");
            Assert.AreEqual(request.Response.ContentLength, request.Response.BytesLoaded);
        }
 public void DemandPlayAudioImmediate(AudioRequest audioRequest) =>
 audioPlayer.DemandPlayAudioImmediate(audioRequest);
 public void addRequest(AudioRequest req)
 {
     requests.Enqueue(req);
 }
        public async Task <AudioRequest> TTSRequest(TTSVoice voicePreference, TTSPitch pitchPreference, Effect effectsChain, string[] splitTTSText)
        {
            if (splitTTSText.Any(x => x.Contains('/') || x.Contains('!')))
            {
                List <AudioRequest> audioRequestSegments = new List <AudioRequest>();
                //Complex parsing

                StringBuilder stringbuilder = new StringBuilder();

                foreach (string ttsWord in splitTTSText)
                {
                    if (ttsWord.Contains('/') || ttsWord.Contains('!'))
                    {
                        foreach (string ttsWordSegment in SplitStringByCommandRegex(ttsWord))
                        {
                            if (ttsWordSegment.StartsWith('/'))
                            {
                                //Sound Effect
                                SoundEffect soundEffect = soundEffectSystem.GetSoundEffectByAlias(ttsWordSegment);

                                if (soundEffect is null)
                                {
                                    //Unrecognized, append as is
                                    stringbuilder.Append(ttsWordSegment);
                                }
                                else
                                {
                                    //Output current
                                    if (stringbuilder.Length > 0)
                                    {
                                        string filename = await GetSynthSpeech(stringbuilder.ToString(), voicePreference, pitchPreference);

                                        audioRequestSegments.Add(new AudioFileRequest(filename, effectsChain));
                                        stringbuilder.Clear();
                                    }

                                    audioRequestSegments.Add(new SoundEffectRequest(soundEffect));
                                }
                            }
                            else if (ttsWordSegment.StartsWith('!'))
                            {
                                //Command
                                AudioRequest request = AudioRequest.ParseCommand(ttsWordSegment.ToLower());

                                if (request is null)
                                {
                                    //Unrecognized, append as is
                                    stringbuilder.Append(ttsWordSegment);
                                }
                                else
                                {
                                    //Output current
                                    if (stringbuilder.Length > 0)
                                    {
                                        string filename = await GetSynthSpeech(stringbuilder.ToString(), voicePreference, pitchPreference);

                                        audioRequestSegments.Add(new AudioFileRequest(filename, effectsChain));
                                        stringbuilder.Clear();
                                    }

                                    audioRequestSegments.Add(request);
                                }
                            }
                            else
                            {
                                stringbuilder.Append(ttsWordSegment);
                            }
                        }

                        if (stringbuilder.Length > 0)
                        {
                            stringbuilder.Append(' ');
                        }
                    }
                    else
                    {
                        stringbuilder.Append(ttsWord);
                        stringbuilder.Append(' ');
                    }
                }

                if (stringbuilder.Length > 0)
                {
                    string filename = await GetSynthSpeech(stringbuilder.ToString(), voicePreference, pitchPreference);

                    audioRequestSegments.Add(new AudioFileRequest(filename, effectsChain));

                    stringbuilder.Clear();
                }

                return(new ConcatenatedAudioRequest(audioRequestSegments));
            }
            else
            {
                //Simple parsing

                string ttsSpeech = string.Join(' ', splitTTSText);
                string filename  = await GetSynthSpeech(ttsSpeech, voicePreference, pitchPreference);

                return(new AudioFileRequest(filename, effectsChain));
            }
        }
示例#30
0
        public ApiResponse <IEnumerable <ApiModels.Audio> > GetAudios([FromUri] Guid token, [FromBody] AudioRequest request)
        {
            ApiResponse <DataModels.User> response = _vkAuthService.CheckToken(token);

            if (!response.Ok)
            {
                return(new ApiResponse <IEnumerable <ApiModels.Audio> >(response.ErrorMessage));
            }

            return(_vkMusicService.GetAudios(request));
        }
        private static string SendFile(string path, Stream fileStream)
        {
            AudioRequest request = new AudioRequest();

            var byteContent = ReadByte(fileStream);

            request.Content = byteContent;
            request.FileName = Path.GetFileNameWithoutExtension(path);
            request.FileType = Path.GetExtension(path);

            var requestContent = JsonConvert.SerializeObject(request);
            HttpContent content = new StringContent(requestContent, Encoding.UTF8, "application/json");

            using (var client = new HttpClient())
            {
                client.BaseAddress = new Uri(ConfigurationManager.AppSettings["ServiceUrl"]);
                client.DefaultRequestHeaders.Accept.Clear();
                client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));

                HttpResponseMessage response = client.PostAsync("api/trackdata", content).Result;
                return response.Content.ReadAsStringAsync().Result;
            }
        }