Пример #1
0
        /// <summary>
        /// < Detects phrases, accepts timing parameters in milliseconds
        /// </summary>
        /// <param name="audio"></param>
        /// <param name="threshold"></param>
        /// <param name="GapLength"></param>
        /// <param name="before"></param>
        /// <returns></returns>
        public static List <ManagedAudioMedia> Apply(ManagedAudioMedia audio, long threshold, double GapLength, double before)
        {
            AudioLibPCMFormat audioPCMFormat = new AudioLibPCMFormat(audio.AudioMediaData.PCMFormat.Data.NumberOfChannels, audio.AudioMediaData.PCMFormat.Data.SampleRate, audio.AudioMediaData.PCMFormat.Data.BitDepth);
            List <long>       timingList     = AudioLib.PhraseDetection.Apply(audio.AudioMediaData.OpenPcmInputStream(),
                                                                              audioPCMFormat,
                                                                              threshold,
                                                                              (long)GapLength * AudioLibPCMFormat.TIME_UNIT,
                                                                              (long)before * AudioLibPCMFormat.TIME_UNIT);

            List <ManagedAudioMedia> detectedAudioMediaList = new List <ManagedAudioMedia>();

            //Console.WriteLine("returned list count " + timingList.Count);
            if (timingList == null)
            {
                detectedAudioMediaList.Add(audio);
            }
            else
            {
                for (int i = timingList.Count - 1; i >= 0; i--)
                {
                    //Console.WriteLine("splitting " + timingList[i] + " asset time " + audio.Duration.AsLocalUnits);
                    ManagedAudioMedia splitAsset = audio.Split(new Time(Convert.ToInt64(timingList[i])));
                    //ManagedAsset.MediaData.getMediaDataManager().addMediaData(splitAsset.MediaData);
                    detectedAudioMediaList.Insert(0, splitAsset);
                    //MessageBox.Show(Convert.ToDouble(alPhrases[i]).ToString());
                }
                if (RetainSilenceInBeginningOfPhrase && audio.Duration.AsMilliseconds > 200)
                {
                    detectedAudioMediaList.Insert(0, audio);
                }
            }

            return(detectedAudioMediaList);
        }
Пример #2
0
        private long computeByteOffset(TreeNode targetNode1, TreeNode targetNode2)
        {
            long byteOffset = 0;

            if (State.Audio.PlayStreamMarkers != null && targetNode2 != null)
            {
                ManagedAudioMedia mediaInPlayMarkers = targetNode2.GetManagedAudioMedia();

                if (mediaInPlayMarkers == null)
                {
                    TreeNode prev = targetNode2.GetPreviousSiblingWithManagedAudio();
                    if (prev != null && prev.IsDescendantOf(targetNode1))
                    {
                        ManagedAudioMedia prevAudio = prev.GetManagedAudioMedia();
                        DebugFix.Assert(prevAudio != null);

                        byteOffset = getByteOffset(prev, prevAudio);

                        if (prevAudio != null)
                        {
                            byteOffset += prevAudio.AudioMediaData.PCMFormat.Data.ConvertTimeToBytes(prevAudio.AudioMediaData.AudioDuration.AsLocalUnits);
                        }
                    }
                }
                else
                {
                    byteOffset = getByteOffset(targetNode2, mediaInPlayMarkers);
                }
            }

            return(byteOffset);
        }
Пример #3
0
            public void play()
            {
                if (mCurrentTreeNode == null)
                {
                    mPlaybacAudiokDevice.FireTreeNodePlayEnded(mElapsedTime);
                    return;
                }
                ManagedAudioMedia mam    = null;
                ChannelsProperty  chProp = mCurrentTreeNode.GetProperty(typeof(ChannelsProperty)) as ChannelsProperty;

                if (chProp != null)
                {
                    mam = chProp.GetMedia(mAudioChannel) as ManagedAudioMedia;
                }
                if (mam == null)
                {
                    if (mContinueAfterTreeNode)
                    {
                        mCurrentTreeNode = mTreeNodeNavigator.GetNext(mCurrentTreeNode);
                        play();
                    }
                }
                else
                {
                    mPlaybacAudiokDevice.play(mam.AudioMediaData);
                }
            }
Пример #4
0
        public override void Execute()
        {
            ManagedAudioMedia audioMedia = m_SelectionData.m_TreeNode.GetManagedAudioMedia();
            AudioMediaData    mediaData  = audioMedia.AudioMediaData;

            Time timeBegin = SelectionData.m_LocalStreamLeftMark == -1
                ? Time.Zero
                : new Time(mediaData.PCMFormat.Data.ConvertBytesToTime(SelectionData.m_LocalStreamLeftMark));

            Time timeEnd = SelectionData.m_LocalStreamRightMark == -1
                ? Time.Zero
                : new Time(mediaData.PCMFormat.Data.ConvertBytesToTime(SelectionData.m_LocalStreamRightMark));

            if (SelectionData.TimeBeginEndEqualClipDuration(timeBegin, timeEnd, mediaData))
            {
                ChannelsProperty chProp = m_SelectionData.m_TreeNode.GetChannelsProperty();
                chProp.SetMedia(ChannelOfOriginalMedia, null);
            }
            else if (SelectionData.TimeBeginEndEqualClipDuration(new Time(), timeEnd, mediaData))
            {
                mediaData.RemovePcmData(timeBegin);
            }
            else
            {
                mediaData.RemovePcmData(timeBegin, timeEnd);
            }
        }
Пример #5
0
 /// <summary>
 /// Start recording. Stop monitoring before starting recording.
 /// </summary>
 public void Record()
 {
     if (mRecorder.CurrentState == AudioLib.AudioRecorder.State.Stopped)
     {
         mSessionOffset = mAudioList.Count;
         mPhraseMarks   = new List <double>();
         mSectionMarks  = new List <int>();
         mDeletedTime.Clear();
         m_PhraseIndexesToDelete.Clear();
         m_PhDetectorBytesReceivedFromRecorder = 0;
         m_PhDetectorEstimatedBytesRecorded    = 0;
         m_MemStreamArray = null;
         AudioMediaData asset =
             (AudioMediaData)mPresentation.MediaDataFactory.Create <WavAudioMediaData>();
         mSessionMedia = (ManagedAudioMedia)mPresentation.MediaFactory.CreateManagedAudioMedia();
         //mSessionMedia.setMediaData(asset);
         mSessionMedia.MediaData           = asset;
         mRecorder.AudioRecordingFinished += OnAudioRecordingFinished;
         mRecorder.StartRecording(asset.PCMFormat.Data);
         if (StartingPhrase != null)
         {
             StartingPhrase(this, new PhraseEventArgs(mSessionMedia, mSessionOffset, 0.0));
         }
         mRecordingUpdateTimer.Enabled = true;
     }
 }
        public override void Execute()
        {
            AudioChannel     audioChannel = Presentation.ChannelsManager.GetOrCreateAudioChannel();
            ChannelsProperty chProp       = m_TreeNode.GetOrCreateChannelsProperty();

            chProp.SetMedia(audioChannel, ManagedAudioMedia.Copy());
        }
Пример #7
0
        /// <summary>
        /// Create a new phrase node from an audio media.
        /// </summary>
        public PhraseNode CreatePhraseNode(ManagedAudioMedia audio)
        {
            PhraseNode node = CreatePhraseNode();

            node.Audio = audio;
            return(node);
        }
Пример #8
0
        /*
         *
         * public static readonly double DEFAULT_GAP = 300.0;              // default gap for phrase detection
         * public static readonly double DEFAULT_LEADING_SILENCE = 50.0;  // default leading silence
         * public static readonly double DEFAULT_THRESHOLD = 280.0;
         *
         * private static  AudioMediaData m_AudioAsset;
         * private static  readonly int m_FrequencyDivisor = 2000; // frequency inin hz to observe.
         *
         * // Detecs the maximum size of noise level in a silent sample file
         * public static long GetSilenceAmplitude (ManagedAudioMedia RefAsset)
         * {
         *  m_AudioAsset = RefAsset.AudioMediaData;
         *  BinaryReader brRef = new BinaryReader(RefAsset.AudioMediaData.OpenPcmInputStream ());
         *
         *  // creates counter of size equal to clip size
         *  long lSize = RefAsset.AudioMediaData.PCMFormat.Data.ConvertTimeToBytes(RefAsset.AudioMediaData.AudioDuration.AsLocalUnits);
         *
         *  // Block size of audio chunck which is least count of detection
         *  int Block;
         *
         *  // determine the Block  size
         *  if (RefAsset.AudioMediaData.PCMFormat.Data.SampleRate> 22500)
         *  {
         *      Block = 192;
         *  }
         *  else
         *  {
         *      Block = 96;
         *  }
         *
         *  //set reading position after the header
         *
         *  long lLargest = 0;
         *  long lBlockSum;
         *
         *  // adjust the  lSize to avoid reading beyond file length
         *  lSize = ((lSize / Block) * Block) - 4;
         *
         *  // Experiment starts here
         *  double BlockTime = 25;
         *
         *  long Iterations = Convert.ToInt64(RefAsset.AudioMediaData.AudioDuration.AsMilliseconds/ BlockTime);
         *  long SampleCount = Convert.ToInt64((int)RefAsset.AudioMediaData.PCMFormat.Data.SampleRate/ (1000 / BlockTime));
         *
         *  long lCurrentSum = 0;
         *  long lSumPrev = 0;
         *
         *
         *  for (long j = 0; j < Iterations - 1; j++)
         *  {
         *      //  BlockSum is function to retrieve average amplitude in  Block
         *      //lCurrentSum  = GetAverageSampleValue(brRef, SampleCount)  ;
         *      lCurrentSum =  GetAvragePeakValue(brRef, SampleCount);
         *      lBlockSum = Convert.ToInt64((lCurrentSum + lSumPrev) / 2);
         *      lSumPrev = lCurrentSum;
         *
         *      if (lLargest < lBlockSum)
         *      {
         *          lLargest = lBlockSum;
         *      }
         *  }
         *  long SilVal = Convert.ToInt64(lLargest);
         *
         *  // experiment ends here
         *
         *  brRef.Close();
         *
         *  return SilVal;
         *
         * }
         *
         *
         * public static List<ManagedAudioMedia> Apply(ManagedAudioMedia audio, long threshold, double GapLength, double before)
         * {
         *  long lGapLength = ObiCalculationFunctions.ConvertTimeToByte(GapLength, (int)audio.AudioMediaData.PCMFormat.Data.SampleRate, audio.AudioMediaData.PCMFormat.Data.BlockAlign);
         *  long lBefore = ObiCalculationFunctions.ConvertTimeToByte(before, (int)audio.AudioMediaData.PCMFormat.Data.SampleRate, audio.AudioMediaData.PCMFormat.Data.BlockAlign);
         *  return ApplyPhraseDetection(audio, threshold, lGapLength, lBefore);
         * }
         *
         *
         * private static  List<ManagedAudioMedia> ApplyPhraseDetection(ManagedAudioMedia ManagedAsset, long threshold, long GapLength, long before)
         * {
         *  m_AudioAsset = ManagedAsset.AudioMediaData;
         *  GapLength = ObiCalculationFunctions.AdaptToFrame(GapLength, m_AudioAsset.PCMFormat.Data.BlockAlign);
         *  before = ObiCalculationFunctions.AdaptToFrame(before , m_AudioAsset.PCMFormat.Data.BlockAlign);
         *
         *  int Block = 0;
         *
         *  // determine the Block  size
         *  if ( m_AudioAsset.PCMFormat.Data.SampleRate> 22500)
         *  {
         *      Block = 192;
         *  }
         *  else
         *  {
         *      Block = 96;
         *  }
         *
         *
         *  // count chunck of silence which trigger phrase detection
         *  long lCountSilGap = ( 2 * GapLength ) / Block; // multiplied by two because j counter is incremented by 2
         *  long lSum = 0;
         *  list    <long>  alPhrases = new list    <long>   ();
         *  long lCheck = 0;
         *
         *  // flags to indicate phrases and silence
         *  bool boolPhraseDetected = false;
         *  bool boolBeginPhraseDetected = false;
         *
         *
         *  double BlockTime = 25; // milliseconds
         *  double BeforePhraseInMS = ObiCalculationFunctions.ConvertByteToTime(before , (int) m_AudioAsset.PCMFormat.Data.SampleRate, m_AudioAsset.PCMFormat.Data.BlockAlign);
         *
         *  lCountSilGap = Convert.ToInt64(ObiCalculationFunctions.ConvertByteToTime(GapLength , (int) m_AudioAsset.PCMFormat.Data.SampleRate, m_AudioAsset.PCMFormat.Data.BlockAlign) / BlockTime);
         *
         *  long Iterations = Convert.ToInt64(m_AudioAsset.AudioDuration.AsMilliseconds/ BlockTime);
         *  long SampleCount = Convert.ToInt64(m_AudioAsset.PCMFormat.Data.SampleRate/ (1000 / BlockTime));
         *  double errorCompensatingCoefficient  = GetErrorCompensatingConstant ( SampleCount );
         *  long SpeechBlockCount = 0;
         *
         *  long lCurrentSum = 0;
         *  long lSumPrev = 0;
         *
         *  BinaryReader br = new BinaryReader( m_AudioAsset.OpenPcmInputStream());
         *
         *  bool PhraseNominated = false;
         *  long SpeechChunkSize = 5;
         *  long Counter = 0;
         *  for (long j = 0; j < Iterations - 1; j++)
         *  {
         *      // decodes audio chunck inside block
         *      //lCurrentSum = GetAverageSampleValue(br, SampleCount);
         *      lCurrentSum = GetAvragePeakValue(br, SampleCount);
         *      lSum = (lCurrentSum + lSumPrev) / 2;
         *      lSumPrev = lCurrentSum;
         *
         *      // conditional triggering of phrase detection
         *      if (lSum < threshold )
         *      {
         *          lCheck++;
         *
         *          SpeechBlockCount = 0;
         *      }
         *      else
         *      {
         *          if (j < lCountSilGap && boolBeginPhraseDetected == false)
         *          {
         *              boolBeginPhraseDetected = true;
         *              alPhrases.Add(Convert.ToInt64(0));
         *              boolPhraseDetected = true;
         *              lCheck = 0;
         *          }
         *
         *
         *          // checks the length of silence
         *          if (lCheck > lCountSilGap)
         *          {
         *              PhraseNominated = true;
         *              lCheck = 0;
         *          }
         *          if (PhraseNominated)
         *              SpeechBlockCount++;
         *
         *          if (SpeechBlockCount >= SpeechChunkSize && Counter >= 4)
         *          {
         *              //sets the detection flag
         *              boolPhraseDetected = true;
         *
         *              // changing following time calculations to reduce concatination of rounding off errors
         *              //alPhrases.Add(((j - Counter) * BlockTime) - BeforePhraseInMS);
         *              double phraseMarkTime = ObiCalculationFunctions.ConvertByteToTime (Convert.ToInt64(errorCompensatingCoefficient  * (j - Counter)) * SampleCount * m_AudioAsset.PCMFormat.Data.BlockAlign,
         *                  (int) m_AudioAsset.PCMFormat.Data.SampleRate,
         *                  (int) m_AudioAsset.PCMFormat.Data.BlockAlign);
         *              alPhrases.Add ( phraseMarkTime - BeforePhraseInMS );
         *
         *              SpeechBlockCount = 0;
         *              Counter = 0;
         *              PhraseNominated = false;
         *          }
         *          lCheck = 0;
         *      }
         *      if (PhraseNominated)
         *          Counter++;
         *      // end outer For
         *  }
         *  br.Close();
         *
         *  List<ManagedAudioMedia> ReturnList = new List<ManagedAudioMedia>();
         *
         *  if (boolPhraseDetected == false)
         *  {
         *      ReturnList.Add( ManagedAsset );
         *  }
         *  else
         *  {
         *      for (int i = alPhrases.Count-1   ; i >= 0 ; i-- )
         *      {
         *          ManagedAudioMedia splitAsset = ManagedAsset.Split(new Time(Convert.ToInt64(alPhrases[i]) * Time.TIME_UNIT));
         *                              //ManagedAsset.MediaData.getMediaDataManager().addMediaData(splitAsset.MediaData);
         *          ReturnList.Insert(0, splitAsset);
         *          //MessageBox.Show(Convert.ToDouble(alPhrases[i]).ToString());
         *      }
         *
         *  }
         *
         *
         *
         *  return ReturnList ;
         * }
         *
         *
         * private static int GetAverageSampleValue(BinaryReader br, long SampleLength)
         * {
         *  long AvgSampleValue = 0;
         *
         *  for (long i = 0; i < SampleLength; i++)
         *  {
         *      AvgSampleValue = AvgSampleValue + GetSampleValue(br);
         *  }
         *  AvgSampleValue = AvgSampleValue / SampleLength;
         *
         *  return Convert.ToInt32(AvgSampleValue);
         * }
         *
         *
         * private static  int GetAvragePeakValue(BinaryReader br, long SampleCount)
         * {
         *          // average value to return
         *  long AverageValue = 0;
         *
         *  // number of samples from which peak is selected
         *              long PeakCount  = Convert.ToInt64 (  m_AudioAsset.PCMFormat.Data.SampleRate/ m_FrequencyDivisor) ;
         *
         *  // number of blocks iterated
         *  long AverageCount = Convert.ToInt64 ( SampleCount / PeakCount ) ;
         *
         *      for (long i = 0; i < AverageCount; i++)
         *      {
         *          AverageValue = AverageValue + GetPeak
         *              (br, PeakCount);
         *      }
         *
         *  AverageValue = AverageValue / AverageCount;
         *
         *  return Convert.ToInt32 (  AverageValue  ) ;
         *
         * }
         *
         *
         * private static  int GetPeak(BinaryReader br , long  UBound )
         * {
         *  int Peak = 0;
         *
         *  int CurrentValue = 0 ;
         *  for (long i = 0; i < UBound; i++)
         *  {
         *      CurrentValue = GetSampleValue (br)  ;
         *      if (CurrentValue > Peak)
         *          Peak = CurrentValue;
         *  }
         *  return Peak ;
         * }
         *
         *
         * private static   int GetSampleValue(BinaryReader br)
         * {
         *  int SampleValue1 =  0 ;
         * int SampleValue2 = 0 ;
         *
         *
         *                      SampleValue1 =  br.ReadByte();
         *                          if ( m_AudioAsset.PCMFormat.Data.BitDepth == 16 )
         *  {
         *          SampleValue1 = SampleValue1 + (br.ReadByte() * 256);
         *
         *          if (SampleValue1 > 32768)
         *              SampleValue1 = SampleValue1 - 65536;
         *
         * }
         * if ( m_AudioAsset.PCMFormat.Data.NumberOfChannels == 2)
         * {
         *  SampleValue2 = br.ReadByte();
         *  if ( m_AudioAsset.PCMFormat.Data.BitDepth== 16)
         *  {
         *      SampleValue2 = SampleValue2 + (br.ReadByte() * 256);
         *
         *      if (SampleValue2 > 32768)
         *          SampleValue2 = SampleValue2 - 65536;
         *
         *  }
         *  SampleValue1 = (SampleValue1 + SampleValue2) / 2;
         * }
         *
         *
         *  return SampleValue1 ;
         *
         * }
         *
         * /// <summary>
         * /// computes multiplying factor to compensate errors due to rounding off in average peak calculation functions
         * /// </summary>
         * /// <param name="SampleCount"></param>
         * /// <returns></returns>
         * private static double GetErrorCompensatingConstant ( long SampleCount )
         *  {
         *  // number of samples from which peak is selected
         *  long PeakCount = Convert.ToInt64 ( m_AudioAsset.PCMFormat.Data.SampleRate/ m_FrequencyDivisor );
         *
         *  // number of blocks iterated
         *  long AverageCount = Convert.ToInt64 ( SampleCount / PeakCount );
         *
         *  double roundedOffSampleCount = AverageCount * PeakCount;
         *
         *  double errorCoeff = roundedOffSampleCount  / SampleCount;
         *
         *  if (errorCoeff < 0.90 || errorCoeff  > 1.1)
         *      {
         *      errorCoeff  = 1.0;
         *      }
         *  return errorCoeff;
         *  }
         */

        //Diagnosis code for detecting silence gaps
        public static List <double> GetErrorSilencePositionInAsset(ManagedAudioMedia RefAsset)
        {
            AudioLibPCMFormat audioPCMFormat = new AudioLibPCMFormat(RefAsset.AudioMediaData.PCMFormat.Data.NumberOfChannels, RefAsset.AudioMediaData.PCMFormat.Data.SampleRate, RefAsset.AudioMediaData.PCMFormat.Data.BitDepth);

            List <long> errorPositionsBytesList = null;
            Stream      stream = null;

            try
            {
                stream = RefAsset.AudioMediaData.OpenPcmInputStream();
                errorPositionsBytesList = GetErrorSilencePosition(audioPCMFormat, stream);
            }
            finally
            {
                if (stream != null)
                {
                    stream.Close();
                }
            }
            List <double> errorPositionTimesList = new List <double>();

            foreach (long positionBytes in errorPositionsBytesList)
            {
                double positionTime = Convert.ToDouble(positionBytes / audioPCMFormat.ByteRate) * 1000;
                errorPositionTimesList.Add(positionTime);
                Console.WriteLine("Position time: " + positionTime);
            }
            return(errorPositionTimesList);
        }
Пример #9
0
        public bool HasOrInheritsAudio()
        {
            ManagedAudioMedia media = GetManagedAudioMedia();

            if (media != null && media.IsWavAudioMediaData)
            {
                return(true);
            }

#if ENABLE_SEQ_MEDIA
            SequenceMedia seqManagedAudioMedia = GetManagedAudioSequenceMedia();
            if (seqManagedAudioMedia != null)
            {
                return(true);
            }
#endif //ENABLE_SEQ_MEDIA

            TreeNode ancerstor = GetFirstAncestorWithManagedAudio();
            if (ancerstor != null)
            {
                return(true);
            }

            return(false);
        }
Пример #10
0
 /// <summary>
 /// Signal a change in the audio for this phrase (used during recording)
 /// </summary>
 public void SignalAudioChanged(object sender, ManagedAudioMedia media)
 {
     if (NodeAudioChanged != null)
     {
         NodeAudioChanged(sender, new NodeEventArgs <PhraseNode>(this));
     }
 }
Пример #11
0
        // Create a list of ManagedAudioMedia from audio file being imported
        // Split by duration, unless 0 or less.
        private List <ManagedAudioMedia> ImportAudioFromFile(string path, double durationMs)
        {
            ManagedAudioMedia media         = ImportAudioFromFile(path);
            double            totalDuration = media.Duration.AsMilliseconds;
            // if duration is 0 or less, just one phrase
            int    phrases         = durationMs <= 0.0 ? 1 : (int)Math.Floor(totalDuration / durationMs);
            double lastPhraseBegin = phrases * durationMs;
            double remaining       = totalDuration - lastPhraseBegin;

            if (remaining < durationMs / 10.0)
            {
                lastPhraseBegin -= durationMs;
            }
            else
            {
                ++phrases;
            }
            List <ManagedAudioMedia> audioMediaList = new List <ManagedAudioMedia>(phrases);

            for (double time = lastPhraseBegin; time > 0.0; time -= durationMs)
            {
                audioMediaList.Insert(0, media.Split(new Time((long)(time * Time.TIME_UNIT))));
            }
            audioMediaList.Insert(0, media);
            return(audioMediaList);
        }
Пример #12
0
        /// <summary>
        /// Import an audio file to the project by creating a new node with audio from the file.
        /// The node is created but not actually added but a command is returned.
        /// </summary>
        /// <param name="path">Full path to the audio file to import.</param>
        /// <param name="contextNode">The context node before which to import the audio file.
        /// If null, add at the end.</param>
        /// <returns>The command for adding the node.</returns>
        public Commands.AddTreeNode ImportAudioFileCommand(string path, TreeNode contextNode)
        {
            Stream      input = File.OpenRead(path);
            PCMDataInfo info  = PCMDataInfo.parseRiffWaveHeader(input);

            input.Close();
            getPresentation().getMediaDataManager().getDefaultPCMFormat().setBitDepth(info.getBitDepth());
            getPresentation().getMediaDataManager().getDefaultPCMFormat().setNumberOfChannels(info.getNumberOfChannels());
            getPresentation().getMediaDataManager().getDefaultPCMFormat().setSampleRate(info.getSampleRate());
            AudioMediaData data = (AudioMediaData)
                                  getPresentation().getMediaDataFactory().createMediaData(typeof(AudioMediaData));

            data.appendAudioDataFromRiffWave(path);
            ManagedAudioMedia media = (ManagedAudioMedia)getPresentation().getMediaFactory().createAudioMedia();

            media.setMediaData(data);
            Channel          audio = GetSingleChannelByName(AUDIO_CHANNEL_NAME);
            ChannelsProperty prop  = getPresentation().getPropertyFactory().createChannelsProperty();

            prop.setMedia(audio, media);
            TreeNode node = getPresentation().getTreeNodeFactory().createNode();

            node.setProperty(prop);
            TreeNode root = getPresentation().getRootNode();

            Commands.AddTreeNode command = new Commands.AddTreeNode(node, root,
                                                                    contextNode == null ? root.getChildCount() : contextNode.getParent().indexOf(contextNode));
            return(command);
        }
Пример #13
0
        // Access a channel which we know exist and is the only channel by this name.
        //sdk2
        //internal Channel GetSingleChannelByName(string name)
        //{
        //    List<Channel> channels = getChannelsManager().getListOfChannels(name);
        //    if (channels.Count == 0) throw new Exception(String.Format("No channel named \"{0}\"", name));
        //    if (channels.Count > 1) throw new Exception(String.Format("Expected 1 channel for {0}, got {1}.",
        //        name, channels.Count));
        //    return channels[0];
        //}

        // Create a media object from a sound file.
        private ManagedAudioMedia ImportAudioFromFile(string path)
        {
            string dataProviderDirectory = DataProviderManager.DataFileDirectoryFullPath;

            //EnforceSinglePCMFormat is always true
            //if (!MediaDataManager.EnforceSinglePCMFormat)
            //    {
            //    Stream input = File.OpenRead ( path );
            //    PCMDataInfo info = PCMDataInfo.parseRiffWaveHeader ( input );
            //    input.Close ();
            //    DataManager.setDefaultBitDepth ( info.getBitDepth () );
            //    DataManager.setDefaultNumberOfChannels ( info.getNumberOfChannels () );
            //    DataManager.setDefaultSampleRate ( info.getSampleRate () );
            //    DataManager.setEnforceSinglePCMFormat ( true );
            //    }

            AudioMediaData data = MediaDataFactory.CreateAudioMediaData();

            if (Path.GetFullPath(path).StartsWith(Path.GetFullPath(dataProviderDirectory)))
            {
                FileDataProvider dataProv = (FileDataProvider)DataProviderFactory.Create(urakawa.data.DataProviderFactory.AUDIO_WAV_MIME_TYPE);
                dataProv.InitByMovingExistingFile(path);
                data.AppendPcmData(dataProv);
            }
            else
            {
                data.AppendPcmData_RiffHeader(path);
            }

            ManagedAudioMedia media = MediaFactory.CreateManagedAudioMedia();

            media.AudioMediaData = data;
            return(media);
        }
Пример #14
0
        public static ManagedAudioMedia CreateAudioMedia(Presentation pres, string waveFileName)
        {
            ManagedAudioMedia res = pres.MediaFactory.Create <ManagedAudioMedia>();

            Assert.IsNotNull(res, "Could not create a ManagedAudioMedia");
            res.AudioMediaData.AppendPcmData_RiffHeader(Path.Combine(pres.RootUri.LocalPath, waveFileName));
            return(res);
        }
Пример #15
0
 /// <summary>
 /// Add an existing node to a parent node at the given index.
 /// </summary>
 public UpdateAudioMedia(ProjectView.ProjectView view, PhraseNode node, ManagedAudioMedia media, bool updateSelection) : base(view, "")
 {
     m_Node = node;
     m_OriginalManagedAudioMedia = node.Audio;
     m_ManagedAudioMedia         = media;
     UpdateSelection             = updateSelection;
     mSelection = view.Selection != null && view.Selection.Control is ProjectView.ContentView ?
                  new NodeSelection(m_Node, view.Selection.Control) : view.Selection;
 }
        public void ImportInvalidPCMFormatAudio()
        {
            mProject.Presentations.Get(0).MediaDataManager.DefaultPCMFormat.Data.SampleRate = 44100;

            ManagedAudioMedia mam  = mProject.Presentations.Get(0).MediaFactory.Create <ManagedAudioMedia>();
            string            path = "../../XukWorks/MediaDataSample/Data/aud000000.wav";

            mam.AudioMediaData.AppendPcmData_RiffHeader(path);
        }
Пример #17
0
        /// <summary>
        /// Split the audio of this phrase at the given position and notified that audio has changed.
        /// </summary>
        /// <returns>The half of the split audio after the split point.</returns>
        public ManagedAudioMedia SplitAudio(urakawa.media.timing.Time splitPoint)
        {
            ManagedAudioMedia newAudio = Audio.Split(splitPoint);

            if (NodeAudioChanged != null)
            {
                NodeAudioChanged(this, new NodeEventArgs <PhraseNode>(this));
            }
            return(newAudio);
        }
Пример #18
0
 /// <summary>
 /// Merge the audio of this phrase with the audio of another phrase and notify that audio has changed.
 /// </summary>
 public void MergeAudioWith(ManagedAudioMedia audio)
 {
     //sdk2
     //Audio.MergeWith(audio);
     Audio.AudioMediaData.MergeWith(audio.AudioMediaData);
     if (NodeAudioChanged != null)
     {
         NodeAudioChanged(this, new NodeEventArgs <PhraseNode>(this));
     }
 }
        public void Init(TreeNode treeNode, ManagedAudioMedia managedAudioMediaSource, long bytePositionInsert, TreeNode currentTreeNode)
        {
            if (treeNode == null)
            {
                throw new ArgumentNullException("treeNode");
            }
            if (currentTreeNode == null)
            {
                throw new ArgumentNullException("treeNode");
            }

            if (bytePositionInsert == -1)
            {
                throw new ArgumentNullException("bytePositionInsert");
            }
            if (managedAudioMediaSource == null)
            {
                throw new ArgumentNullException("managedAudioMediaSource");
            }

            ManagedAudioMedia manMedia = treeNode.GetManagedAudioMedia();

            if (manMedia == null)
            {
                throw new ArgumentNullException("manMedia");
            }
            if (manMedia.Presentation != managedAudioMediaSource.Presentation)
            {
                throw new NodeInDifferentPresentationException("TreeNode vs ManagedAudioMedia");
            }
            if (manMedia.Presentation != Presentation)
            {
                throw new NodeInDifferentPresentationException("TreeNode vs ManagedAudioMedia");
            }

            if (!managedAudioMediaSource.HasActualAudioMediaData) // || !manMedia.HasActualAudioMediaData)
            {
                throw new ArgumentException("HasActualAudioMediaData");
            }

            TreeNode           = treeNode;
            CurrentTreeNode    = currentTreeNode;
            BytePositionInsert = bytePositionInsert;

            ManagedAudioMediaSource = managedAudioMediaSource;

            OriginalManagedAudioMedia = manMedia.Copy();

            m_UsedMediaData.Add(OriginalManagedAudioMedia.AudioMediaData);
            m_UsedMediaData.Add(ManagedAudioMediaSource.AudioMediaData);
            //m_UsedMediaData.Add(ManagedAudioMediaTarget.AudioMediaData); belongs to TreeNode, so no need to preserve it explicitely

            ShortDescription = "Insert new audio";
            LongDescription  = "Insert WaveAudioMediaData from a source ManagedAudioMedia into a target ManagedAudioMedia";
        }
Пример #20
0
        public Media GetManagedAudioMediaOrSequenceMedia()
        {
            ManagedAudioMedia managedAudioMedia = GetManagedAudioMedia();

            if (managedAudioMedia == null)
            {
                return(GetManagedAudioSequenceMedia());
            }

            return(managedAudioMedia);
        }
Пример #21
0
        public override void UnExecute()
        {
            ManagedAudioMedia after = mHasAudioAfterDeleted ? mNode.SplitAudio(mSplitTimeBegin) : null;

            mNode.MergeAudioWith(mDeleted.Audio.Copy());
            if (after != null)
            {
                mNode.MergeAudioWith(after);
            }
            base.UnExecute();
        }
Пример #22
0
        public override void Execute()
        {
            ManagedAudioMedia after = mHasAudioAfterDeleted ? mNode.SplitAudio(mSplitTimeEnd) : null;

            mNode.SplitAudio(mSplitTimeBegin);
            if (after != null)
            {
                mNode.MergeAudioWith(after);
            }
            View.Selection = mSelectionAfter;
        }
Пример #23
0
        private long getByteOffset(TreeNode treeNode, ManagedAudioMedia managedMedia)
        {
            //if (!State.IsTreeNodeShownInAudioWaveForm(treeNode))
            //{
            //    return 0;
            //}

            long byteOffset = 0;

            //Tuple<TreeNode, TreeNode> treeNodeSelection = m_UrakawaSession.GetTreeNodeSelection();

            if (State.Audio.PlayStreamMarkers != null)
            {
                long bytesRight;
                long bytesLeft;
                int  index;
                bool match = State.Audio.FindInPlayStreamMarkers(treeNode, out index, out bytesLeft, out bytesRight);

                if (match)
                {
                    byteOffset = bytesLeft;
                }
                else
                {
                    return(0);
                }
            }

            if (managedMedia == null)
            {
                return(byteOffset);
            }

#if ENABLE_SEQ_MEDIA
            SequenceMedia seqManAudioMedia = treeNode.GetManagedAudioSequenceMedia();
            if (seqManAudioMedia != null)
            {
                Debug.Fail("SequenceMedia is normally removed at import time...have you tried re-importing the DAISY book ?");

                foreach (Media media in seqManAudioMedia.ChildMedias.ContentsAs_Enumerable)
                {
                    var manMedia = (ManagedAudioMedia)media;
                    if (media == managedMedia)
                    {
                        break;
                    }
                    byteOffset += manMedia.AudioMediaData.PCMFormat.Data.ConvertTimeToBytes(manMedia.Duration.AsLocalUnits);
                }
            }
#endif //ENABLE_SEQ_MEDIA

            return(byteOffset);
        }
Пример #24
0
        /// <summary>
        /// Stop recording or monitoring.
        /// </summary>
        public void Stop()
        {
            bool wasRecording = mRecorder.CurrentState == AudioLib.AudioRecorder.State.Recording;

            if (wasRecording)
            {
                ApplyPhraseDetectionOnTheFly(null);               //@onTheFly: before stopping last chunk of memory stream is passed into phrase detection
            }
            if (mRecorder.CurrentState == AudioLib.AudioRecorder.State.Monitoring ||
                wasRecording)
            {
                if (wasRecording && mPhraseMarks.Count > 0)
                {
                    FinishedPhrase();
                }
                mRecorder.StopRecording();
                if (wasRecording)
                {
                    for (int i = m_PhraseMarksOnTheFly.Count - 2; i >= 0; --i)
                    {
                        if (i != 0 && i < m_PhraseMarksOnTheFly.Count &&
                            (m_PhraseMarksOnTheFly[i] - m_PhraseMarksOnTheFly[i - 1]) <= 250)
                        {
                            m_PhraseMarksOnTheFly.Remove(m_PhraseMarksOnTheFly[i]);
                            i++;
                        }
                        else if (i == 0 && i < m_PhraseMarksOnTheFly.Count &&
                                 m_PhraseMarksOnTheFly[i] <= 250)
                        {
                            m_PhraseMarksOnTheFly.Remove(m_PhraseMarksOnTheFly[i]);
                            i++;
                        }
                    }

                    for (int i = mPhraseMarks.Count - 2; i >= 0; --i)
                    {
                        if (mPhraseMarks[i] < mSessionMedia.Duration.AsMilliseconds && mSessionMedia.Duration.AsMilliseconds > 200)
                        {
                            ManagedAudioMedia split = mSessionMedia.Split(new Time(Convert.ToInt64(mPhraseMarks[i] * Time.TIME_UNIT)));
                            mAudioList.Insert(mSessionOffset, split);
                        }
                        else
                        {
                            MessageBox.Show(Localizer.Message("RecordingSession_SplitError"), Localizer.Message("Caption_Warning"));
                        }
                    }
                    // The first asset is what remains of the session asset
                    mAudioList.Insert(mSessionOffset, mSessionMedia);
                }
                mRecordingUpdateTimer.Enabled = false;
            }
        }
Пример #25
0
        public override void UnExecute()
        {
            ChannelsProperty chProp = m_SelectionData.m_TreeNode.GetOrCreateChannelsProperty();

            ManagedAudioMedia manMed = m_SelectionData.m_TreeNode.GetManagedAudioMedia();

            if (manMed != null)
            {
                chProp.SetMedia(ChannelOfOriginalMedia, null);
            }

            chProp.SetMedia(ChannelOfOriginalMedia, OriginalManagedAudioMedia.Copy());
        }
        public override void Execute()
        {
            ManagedAudioMedia manMedia = TreeNode.GetManagedAudioMedia();

            long durationBytes = manMedia.AudioMediaData.PCMFormat.Data.ConvertTimeToBytes(manMedia.Duration.AsLocalUnits);

            if (
                //TimeInsert.IsEqualTo(manMedia.Duration)
                //|| timeInsertBytes == durationBytes
                //|| manMedia.AudioMediaData.PCMFormat.Data.BytesAreEqualWithBlockAlignTolerance(timeInsertBytes, durationBytes)
                //|| manMedia.AudioMediaData.PCMFormat.Data.TimesAreEqualWithBlockAlignTolerance(manMedia.Duration.AsLocalUnits, TimeInsert.AsLocalUnits)
                manMedia.AudioMediaData.PCMFormat.Data.BytesAreEqualWithMillisecondsTolerance(BytePositionInsert, durationBytes)
                //|| manMedia.AudioMediaData.PCMFormat.Data.TimesAreEqualWithOneMillisecondTolerance(manMedia.Duration.AsLocalUnits, BytePositionInsert.AsLocalUnits)
                )
            {
                // WARNING: WavAudioMediaData implementation differs from AudioMediaData:
                // the latter is naive and performs a stream binary copy, the latter is optimized and re-uses existing WavClips.
                //  WARNING 2: The audio data from the given parameter gets emptied !
                manMedia.AudioMediaData.MergeWith(ManagedAudioMediaSource.AudioMediaData.Copy());

                //Time duration = ManagedAudioMediaSource.Duration;
                //Stream stream = ManagedAudioMediaSource.AudioMediaData.OpenPcmInputStream();
                //try
                //{
                //    manMedia.AudioMediaData.AppendPcmData(stream, duration);
                //}
                //finally
                //{
                //    stream.Close();
                //}
            }
            else
            {
                Time duration = ManagedAudioMediaSource.Duration;

                ((WavAudioMediaData)manMedia.AudioMediaData).InsertPcmData(
                    (WavAudioMediaData)ManagedAudioMediaSource.AudioMediaData,
                    new Time(manMedia.AudioMediaData.PCMFormat.Data.ConvertBytesToTime(BytePositionInsert)),
                    duration);

                //Stream stream = ManagedAudioMediaSource.AudioMediaData.OpenPcmInputStream();
                //try
                //{
                //    manMedia.AudioMediaData.InsertPcmData(stream, TimeInsert, duration);
                //}
                //finally
                //{
                //    stream.Close();
                //}
            }
        }
Пример #27
0
        public void Init(TreeNodeAndStreamSelection selection, TreeNode currentTreeNode)
        {
            if (selection == null)
            {
                throw new ArgumentNullException("selection");
            }
            if (selection.m_TreeNode == null)
            {
                throw new ArgumentNullException("selection.m_TreeNode");
            }
            if (currentTreeNode == null)
            {
                throw new ArgumentNullException("currentTreeNode");
            }

            //TreeNode = selection.m_TreeNode;

            CurrentTreeNode = currentTreeNode;
            SelectionData   = selection;

            //DebugFix.Assert(m_SelectionData.m_TreeNode == TreeNode);

            ShortDescription = "Delete audio portion";
            LongDescription  = "Delete a portion of audio for a given treenode";

            ManagedAudioMedia manMedia = m_SelectionData.m_TreeNode.GetManagedAudioMedia();

            if (manMedia == null)
            {
                throw new NullReferenceException("m_SelectionData.m_TreeNode.GetManagedAudioMedia()");
            }
            OriginalManagedAudioMedia = manMedia.Copy();
            m_UsedMediaData.Add(OriginalManagedAudioMedia.AudioMediaData);

#if DEBUG
            DebugFix.Assert(manMedia.Duration.IsEqualTo(OriginalManagedAudioMedia.Duration));
#endif //DEBUG

            ChannelsProperty chProp = m_SelectionData.m_TreeNode.GetChannelsProperty();
            foreach (Channel ch in chProp.UsedChannels)
            {
                if (manMedia == chProp.GetMedia(ch))
                {
                    ChannelOfOriginalMedia = ch;
                    break;
                }
            }
            DebugFix.Assert(ChannelOfOriginalMedia != null);
            DebugFix.Assert(ChannelOfOriginalMedia is AudioChannel);
        }
        public void Init(TreeNode treeNode, ManagedAudioMedia managedMedia, TreeNode currentTreeNode)
        {
            if (treeNode == null)
            {
                throw new ArgumentNullException("treeNode");
            }
            if (currentTreeNode == null)
            {
                throw new ArgumentNullException("currentTreeNode");
            }
            if (managedMedia == null)
            {
                throw new ArgumentNullException("ManagedAudioMedia");
            }
            if (treeNode.Presentation != managedMedia.Presentation)
            {
                throw new NodeInDifferentPresentationException("TreeNode vs ManagedAudioMedia");
            }
            if (treeNode.Presentation != Presentation)
            {
                throw new NodeInDifferentPresentationException("TreeNode vs ManagedAudioMedia");
            }

            if (!managedMedia.HasActualAudioMediaData)
            {
                throw new ArgumentException("HasActualAudioMediaData");
            }

#if ENABLE_SEQ_MEDIA
            if (treeNode.GetManagedAudioMediaOrSequenceMedia() != null)
            {
                throw new ArgumentException("treeNode.GetManagedAudioMediaOrSequenceMedia");
            }
#else
            if (treeNode.GetManagedAudioMedia() != null)
            {
                throw new ArgumentException("treeNode.GetManagedAudioMediaOrSequenceMedia");
            }
#endif
            CurrentTreeNode   = currentTreeNode;
            TreeNode          = treeNode;
            ManagedAudioMedia = managedMedia;

            m_UsedMediaData.Add(ManagedAudioMedia.AudioMediaData);

            ShortDescription = "Add new audio";
            LongDescription  = "Attach a ManagedAudioMedia to a TreeNode in the AudioChannel via the ChannelsProperty";
        }
        protected override void XukInChild(XmlReader source, IProgressHandler handler)
        {
            bool readItem = false;

            if (source.NamespaceURI == XukAble.XUK_NS)
            {
                readItem = true;
                if (source.LocalName == XukStrings.Metadatas)
                {
                    XukInMetadata(source, handler);
                }
                else if (XukAble.GetXukName(typeof(TextMedia)).Match(source.LocalName))
                {
                    if (m_Text != null)
                    {
                        throw new exception.XukException("AlternateContent Text XukIn, already set !");
                    }
                    m_Text = Presentation.MediaFactory.CreateTextMedia();
                    m_Text.XukIn(source, handler);
                }
                else if (XukAble.GetXukName(typeof(ManagedAudioMedia)).Match(source.LocalName))
                {
                    if (m_Audio != null)
                    {
                        throw new exception.XukException("AlternateContent Audio XukIn, already set !");
                    }
                    m_Audio = Presentation.MediaFactory.CreateManagedAudioMedia();
                    m_Audio.XukIn(source, handler);
                }
                else if (XukAble.GetXukName(typeof(ManagedImageMedia)).Match(source.LocalName))
                {
                    if (m_Image != null)
                    {
                        throw new exception.XukException("AlternateContent Image XukIn, already set !");
                    }
                    m_Image = Presentation.MediaFactory.CreateManagedImageMedia();
                    m_Image.XukIn(source, handler);
                }
                else
                {
                    readItem = false;
                }
            }
            if (!(readItem || source.IsEmptyElement))
            {
                source.ReadSubtree().Close(); //Read past unknown child
            }
        }
        public void ImportAudio()
        {
            ManagedAudioMedia mam  = mProject.Presentations.Get(0).MediaFactory.Create <ManagedAudioMedia>();
            string            path = "../../XukWorks/MediaDataSample/Data/aud000000.wav";

            mam.AudioMediaData.AppendPcmData_RiffHeader(path);
            Assert.AreEqual(
                93312, mam.AudioMediaData.PCMFormat.Data.ConvertTimeToBytes(mam.Duration.TimeDeltaAsMillisecondDouble),

                "Expected wav file ../MediaDataDample/Data/aud000000.wav to contain 93312 bytes of PCM data");
            path = "../../XukWorks/MediaDataSample/Data/aud000001.wav";
            mam.AudioMediaData.AppendPcmData_RiffHeader(path);
            Assert.AreEqual(
                93312 + 231542, mam.AudioMediaData.PCMFormat.Data.ConvertTimeToBytes(mam.Duration.TimeDeltaAsMillisecondDouble),
                "Expected wav file ../MediaDataDample/Data/aud000000.wav to contain 93312 bytes of PCM data");
        }