コード例 #1
0
ファイル: Presentation.cs プロジェクト: daisy/obi
        // Create a list of ManagedAudioMedia from audio file being imported
        // Split by duration, unless 0 or less.
        private List <ManagedAudioMedia> ImportAudioFromFile(string path, double durationMs)
        {
            ManagedAudioMedia media         = ImportAudioFromFile(path);
            double            totalDuration = media.Duration.AsMilliseconds;
            // if duration is 0 or less, just one phrase
            int    phrases         = durationMs <= 0.0 ? 1 : (int)Math.Floor(totalDuration / durationMs);
            double lastPhraseBegin = phrases * durationMs;
            double remaining       = totalDuration - lastPhraseBegin;

            if (remaining < durationMs / 10.0)
            {
                lastPhraseBegin -= durationMs;
            }
            else
            {
                ++phrases;
            }
            List <ManagedAudioMedia> audioMediaList = new List <ManagedAudioMedia>(phrases);

            for (double time = lastPhraseBegin; time > 0.0; time -= durationMs)
            {
                audioMediaList.Insert(0, media.Split(new Time((long)(time * Time.TIME_UNIT))));
            }
            audioMediaList.Insert(0, media);
            return(audioMediaList);
        }
コード例 #2
0
        /// <summary>
        /// < Detects phrases, accepts timing parameters in milliseconds
        /// </summary>
        /// <param name="audio"></param>
        /// <param name="threshold"></param>
        /// <param name="GapLength"></param>
        /// <param name="before"></param>
        /// <returns></returns>
        public static List <ManagedAudioMedia> Apply(ManagedAudioMedia audio, long threshold, double GapLength, double before)
        {
            AudioLibPCMFormat audioPCMFormat = new AudioLibPCMFormat(audio.AudioMediaData.PCMFormat.Data.NumberOfChannels, audio.AudioMediaData.PCMFormat.Data.SampleRate, audio.AudioMediaData.PCMFormat.Data.BitDepth);
            List <long>       timingList     = AudioLib.PhraseDetection.Apply(audio.AudioMediaData.OpenPcmInputStream(),
                                                                              audioPCMFormat,
                                                                              threshold,
                                                                              (long)GapLength * AudioLibPCMFormat.TIME_UNIT,
                                                                              (long)before * AudioLibPCMFormat.TIME_UNIT);

            List <ManagedAudioMedia> detectedAudioMediaList = new List <ManagedAudioMedia>();

            //Console.WriteLine("returned list count " + timingList.Count);
            if (timingList == null)
            {
                detectedAudioMediaList.Add(audio);
            }
            else
            {
                for (int i = timingList.Count - 1; i >= 0; i--)
                {
                    //Console.WriteLine("splitting " + timingList[i] + " asset time " + audio.Duration.AsLocalUnits);
                    ManagedAudioMedia splitAsset = audio.Split(new Time(Convert.ToInt64(timingList[i])));
                    //ManagedAsset.MediaData.getMediaDataManager().addMediaData(splitAsset.MediaData);
                    detectedAudioMediaList.Insert(0, splitAsset);
                    //MessageBox.Show(Convert.ToDouble(alPhrases[i]).ToString());
                }
                if (RetainSilenceInBeginningOfPhrase && audio.Duration.AsMilliseconds > 200)
                {
                    detectedAudioMediaList.Insert(0, audio);
                }
            }

            return(detectedAudioMediaList);
        }
コード例 #3
0
        /// <summary>
        /// Stop recording or monitoring.
        /// </summary>
        public void Stop()
        {
            bool wasRecording = mRecorder.CurrentState == AudioLib.AudioRecorder.State.Recording;

            if (wasRecording)
            {
                ApplyPhraseDetectionOnTheFly(null);               //@onTheFly: before stopping last chunk of memory stream is passed into phrase detection
            }
            if (mRecorder.CurrentState == AudioLib.AudioRecorder.State.Monitoring ||
                wasRecording)
            {
                if (wasRecording && mPhraseMarks.Count > 0)
                {
                    FinishedPhrase();
                }
                mRecorder.StopRecording();
                if (wasRecording)
                {
                    for (int i = m_PhraseMarksOnTheFly.Count - 2; i >= 0; --i)
                    {
                        if (i != 0 && i < m_PhraseMarksOnTheFly.Count &&
                            (m_PhraseMarksOnTheFly[i] - m_PhraseMarksOnTheFly[i - 1]) <= 250)
                        {
                            m_PhraseMarksOnTheFly.Remove(m_PhraseMarksOnTheFly[i]);
                            i++;
                        }
                        else if (i == 0 && i < m_PhraseMarksOnTheFly.Count &&
                                 m_PhraseMarksOnTheFly[i] <= 250)
                        {
                            m_PhraseMarksOnTheFly.Remove(m_PhraseMarksOnTheFly[i]);
                            i++;
                        }
                    }

                    for (int i = mPhraseMarks.Count - 2; i >= 0; --i)
                    {
                        if (mPhraseMarks[i] < mSessionMedia.Duration.AsMilliseconds && mSessionMedia.Duration.AsMilliseconds > 200)
                        {
                            ManagedAudioMedia split = mSessionMedia.Split(new Time(Convert.ToInt64(mPhraseMarks[i] * Time.TIME_UNIT)));
                            mAudioList.Insert(mSessionOffset, split);
                        }
                        else
                        {
                            MessageBox.Show(Localizer.Message("RecordingSession_SplitError"), Localizer.Message("Caption_Warning"));
                        }
                    }
                    // The first asset is what remains of the session asset
                    mAudioList.Insert(mSessionOffset, mSessionMedia);
                }
                mRecordingUpdateTimer.Enabled = false;
            }
        }