public long GetSilenceAmplitude(IAudioMediaAsset silenceRef) { AudioMediaAsset ob_AudioMediaSilenceRef = silenceRef as AudioMediaAsset; AudioClip Ref = ob_AudioMediaSilenceRef.m_alClipList[0] as AudioClip; return(Ref.GetClipSilenceAmplitude()); }
public void Play(IAudioMediaAsset asset) { m_StartPosition = 0; m_State = AudioPlayerState.NotReady; m_Asset = asset as AudioMediaAsset; InitPlay(0, 0); }
//double m_dStartPosition ; // Functions public void Play(IAudioMediaAsset asset) { m_StartPosition = 0; m_State = AudioPlayerState.NotReady; m_Asset = asset as AudioMediaAsset; //VuMeter ob_VuMeter = new VuMeter () ; //ob_VuMeter.DisplayGraph () ; InitPlay(0, 0); }
/// <summary> /// Create a new AudioMediaAsset object from a list of clips and add it to the list of managed assets. /// </summary> /// <param name="clips">The array of <see cref="AudioClip"/>s.</param> /// <returns>The newly created asset.</returns> public AudioMediaAsset NewAudioMediaAsset(ArrayList clips) { AudioMediaAsset ob_AudioMediaAsset = new AudioMediaAsset(clips); ob_AudioMediaAsset.Name = NewMediaAssetName(); ob_AudioMediaAsset.m_AssetManager = this; m_htAssetList.Add(ob_AudioMediaAsset.Name, ob_AudioMediaAsset); m_htExists.Add(ob_AudioMediaAsset.Name, ob_AudioMediaAsset); return(ob_AudioMediaAsset); }
/// <summary> /// Create a new empty AudioMediaAsset object with the given parameters and add it to the list of managed assets. /// </summary> /// <param name="channels">Number of channels</param> /// <param name="bitDepth">Bit depth</param> /// <param name="sampleRate">Sample rate</param> /// <returns>The newly created asset.</returns> public AudioMediaAsset NewAudioMediaAsset(int channels, int bitDepth, int sampleRate) { AudioMediaAsset ob_AudioMediaAsset = new AudioMediaAsset(channels, bitDepth, sampleRate); ob_AudioMediaAsset.Name = NewMediaAssetName(); ob_AudioMediaAsset.m_AssetManager = this; m_htAssetList.Add(ob_AudioMediaAsset.Name, ob_AudioMediaAsset); m_htExists.Add(ob_AudioMediaAsset.Name, ob_AudioMediaAsset); return(ob_AudioMediaAsset); }
public WaveFormat GetInputFormat() { m_AudioMediaAsset = new AudioMediaAsset(m_Channels, m_bitDepth, m_SampleRate); InputFormat.Channels = Convert.ToInt16(m_AudioMediaAsset.Channels); InputFormat.SamplesPerSecond = m_AudioMediaAsset.SampleRate; InputFormat.BitsPerSample = Convert.ToInt16(m_AudioMediaAsset.BitDepth); InputFormat.AverageBytesPerSecond = m_AudioMediaAsset.SampleRate * m_AudioMediaAsset.FrameSize; InputFormat.BlockAlign = Convert.ToInt16(m_AudioMediaAsset.FrameSize); m_FrameSize = m_AudioMediaAsset.FrameSize; // m_Channels = m_AudioMediaAsset.Channels; //m_SampleRate = m_AudioMediaAsset.SampleRate; return(InputFormat); }
public void InsertAsset(IAudioMediaAsset chunk, double time) { // checks if audio formats of original asset and chunk asset are of same formats if (CompareAudioAssetFormat(this, chunk) == true && time <= m_dAudioLengthInTime && time >= 0) { // creates the temporary blank asset AudioMediaAsset ob1 = new AudioMediaAsset(this.Channels, this.BitDepth, this.SampleRate); // if Chunk is to be inserted somewhere in between of original asset if (time > -0 && time < m_dAudioLengthInTime) { // copies part of original asset before insertion time to temporary ob1 asset ob1 = GetChunk(0, time) as AudioMediaAsset; // merges the chunk to temp ob1 asset ob1.MergeWith(chunk); // copies part of original assetafter insertion time to temporary ob2 asset AudioMediaAsset ob2 = GetChunk(time, this.LengthInMilliseconds) as AudioMediaAsset; // merge ob2 at back of ob1 so as to finalise ob1 ob1.MergeWith(ob2); } // if chunk asset is to be placed before original asset else if (time == 0) { // points chunk to ob1 and merge original asset at back of ob1 ob1 = chunk as AudioMediaAsset; ob1.MergeWith(this); } // clears clip list of original asset and copy clips in clip list of ob1 to it m_alClipList.Clear(); for (int i = 0; i < ob1.m_alClipList.Count; i++) { m_alClipList.Add(ob1.m_alClipList [i]); } m_dAudioLengthInTime = ob1.LengthInMilliseconds; m_lAudioLengthInBytes = ob1.AudioLengthInBytes; m_lSizeInBytes = ob1.SizeInBytes; // if Chunk is to be appended to original asset if (time == m_dAudioLengthInTime) { MergeWith(chunk); } } // end of main format check else { throw new Exception("Incompatible format or Insertion time not in asset range"); } // end of insert chunk function }
public IAudioMediaAsset DeleteChunk(double beginTime, double endTime) { // checks if beginTime and EndTime is within bounds of asset and are in order if (beginTime >= 0 && beginTime < endTime && endTime <= m_dAudioLengthInTime) { // create new asset from original asset from part which has to be deleted and keep for returning back AudioMediaAsset ob_NewAsset = GetChunk(beginTime, endTime) as AudioMediaAsset; // create two temp assets for holding clips in front of BeginTime and a asset to hold Clips after endTime AudioMediaAsset ob_FromtAsset = new AudioMediaAsset(m_Channels, m_BitDepth, m_SamplingRate); AudioMediaAsset ob_RearAsset = new AudioMediaAsset(m_Channels, m_BitDepth, m_SamplingRate); // if deletion part lies somewhere in between body of asset if (beginTime != 0 && endTime != m_dAudioLengthInTime) { // Copy respective clips to Front and Rear Assets and merge them ob_FromtAsset = GetChunk(0, beginTime) as AudioMediaAsset; ob_RearAsset = GetChunk(endTime, m_dAudioLengthInTime) as AudioMediaAsset; ob_FromtAsset.MergeWith(ob_RearAsset); } // if deletion is from in between to end of asset else if (beginTime != 0) { // copies only front part of asset ob_FromtAsset = GetChunk(0, beginTime) as AudioMediaAsset; } // if Deletion is in front including start else if (endTime != m_dAudioLengthInTime) { // copies end part of asset to front asset ob_FromtAsset = GetChunk(endTime, m_dAudioLengthInTime) as AudioMediaAsset; } // replaces clip list of original asset with clip list of front asset m_alClipList = ob_FromtAsset.m_alClipList; m_dAudioLengthInTime = ob_FromtAsset.LengthInMilliseconds; m_lAudioLengthInBytes = ob_FromtAsset.AudioLengthInBytes; m_lSizeInBytes = ob_FromtAsset.SizeInBytes; ob_FromtAsset = null; return(ob_NewAsset); } else { throw new Exception("Invalid input parameters"); } }
public void AddAsset(IMediaAsset asset) { if (asset.Type == MediaType.Audio) { AudioMediaAsset Asset = asset as AudioMediaAsset; if (Asset.Name == null) { Asset.Name = NewMediaAssetName(); } Asset.m_AssetManager = this; m_htAssetList.Add(Asset.Name, asset); m_htExists.Add(Asset.Name, asset); } }
private void Play(IAudioMediaAsset asset, double timeFrom) { m_Asset = asset as AudioMediaAsset; long lPosition = Calc.ConvertTimeToByte(timeFrom, m_Asset.SampleRate, m_Asset.FrameSize); lPosition = Calc.AdaptToFrame(lPosition, m_Asset.FrameSize); if (lPosition > 0 && lPosition < m_Asset.AudioLengthInBytes) { InitPlay(lPosition, 0); } else { MessageBox.Show("Parameters out of range"); } }
public void Play(IAudioMediaAsset asset, double timeFrom) { m_Asset = asset as AudioMediaAsset; long lPosition = Calc.ConvertTimeToByte(timeFrom, m_Asset.SampleRate, m_Asset.FrameSize); lPosition = Calc.AdaptToFrame(lPosition, m_Asset.FrameSize); if (lPosition >= 0 && lPosition < m_Asset.AudioLengthInBytes) { m_StartPosition = lPosition; InitPlay(lPosition, 0); } else { throw new Exception("Start Position is out of bounds of Audio Asset"); } }
private void Play(IAudioMediaAsset asset, double timeFrom, double timeTo) { m_Asset = asset as AudioMediaAsset; long lStartPosition = Calc.ConvertTimeToByte(timeFrom, m_Asset.SampleRate, m_Asset.FrameSize); lStartPosition = Calc.AdaptToFrame(lStartPosition, m_Asset.FrameSize); long lEndPosition = Calc.ConvertTimeToByte(timeTo, m_Asset.SampleRate, m_Asset.FrameSize); lByteTo = lEndPosition; // check for valid arguments if (lStartPosition > 0 && lStartPosition < lEndPosition && lEndPosition <= m_Asset.AudioLengthInBytes) { InitPlay(lStartPosition, lEndPosition); } else { MessageBox.Show("Arguments out of range"); } }
public IAudioMediaAsset Split(double time) { // checks if time parameter is in bounds of asset if (time >= 0 && time <= m_dAudioLengthInTime) { // create new asset for clips after time specified in parameter AudioMediaAsset ob_AudioMediaAsset = GetChunk(time, m_dAudioLengthInTime) as AudioMediaAsset; //// modify original asset ArrayList alMarksList = new ArrayList(FindClipToProcess(time)); int ClipIndex = Convert.ToInt32(alMarksList[0]); double dClipTimeMark = Convert.ToDouble(alMarksList [1]); AudioClip ob_AudioClip = m_alClipList [ClipIndex] as AudioClip; if (dClipTimeMark > 0 && dClipTimeMark < ob_AudioClip.LengthInTime) { ob_AudioClip.Split(dClipTimeMark); } else if (dClipTimeMark == 0) { ClipIndex--; } //MessageBox.Show (m_alClipList.Count.ToString () ) ; // Remove clips after clip index m_alClipList.RemoveRange(ClipIndex + 1, (m_alClipList.Count - ClipIndex - 1)); m_dAudioLengthInTime = m_dAudioLengthInTime - ob_AudioMediaAsset.LengthInMilliseconds; m_lAudioLengthInBytes = m_lAudioLengthInBytes - ob_AudioMediaAsset.AudioLengthInBytes; m_lSizeInBytes = m_lAudioLengthInBytes; return(ob_AudioMediaAsset); } else { throw new Exception("Cannot split: parameter value out of bound of asset"); } }
//it will start actual recording, append if there is data //in the wave file through the RecordCaptureData() public void StartRecording(IAudioMediaAsset asset) { events.AudioRecorderEvents.StateChanged e = new events.AudioRecorderEvents.StateChanged(mState); mState = AudioRecorderState.Recording; StateChanged(this, e); m_Channels = asset.Channels; m_SampleRate = asset.SampleRate; m_bitDepth = asset.BitDepth; mAsset = new AudioMediaAsset(m_Channels, m_bitDepth, m_SampleRate); mAsset = asset.Copy() as AudioMediaAsset; AssetManager manager = mAsset.Manager as AssetManager; sProjectDirectory = manager.DirPath; InputFormat = GetInputFormat(); m_sFileName = GetFileName(); BinaryWriter bw = new BinaryWriter(File.Create(m_sFileName)); CreateRIFF(bw); CreateCaptureBuffer(); InitRecording(true); }
/// <summary> /// Make a copy of the asset, sharing the same format and data. /// </summary> /// <returns>The new, identical asset.</returns> public override IMediaAsset Copy() { AudioMediaAsset ob_AudioMediaAsset = new AudioMediaAsset(this.Channels, this.BitDepth, this.SampleRate); ob_AudioMediaAsset.m_eMediaType = m_eMediaType; ob_AudioMediaAsset.m_AssetManager = m_AssetManager; //if (this.Name != null) ob_AudioMediaAsset.Name = m_sName; // Add clips to clip list of new asset for (int i = 0; i < this.m_alClipList.Count; i++) { ob_AudioMediaAsset.m_alClipList.Add(this.m_alClipList [i]); } ob_AudioMediaAsset.m_FrameSize = m_FrameSize; ob_AudioMediaAsset.m_dAudioLengthInTime = m_dAudioLengthInTime; ob_AudioMediaAsset.m_lAudioLengthInBytes = m_lAudioLengthInBytes; ob_AudioMediaAsset.m_lSizeInBytes = m_lSizeInBytes; return(ob_AudioMediaAsset); }
public override void MergeWith(IMediaAsset next) { AudioMediaAsset ob_AudioMediaAsset = next as AudioMediaAsset; // checks if the formats of both clips is same if (CompareAudioAssetFormat(this, ob_AudioMediaAsset) == true) { // append clips of next asset to clip list of original asset for (int i = 0; i < ob_AudioMediaAsset.m_alClipList.Count; i++) { m_alClipList.Add(ob_AudioMediaAsset.m_alClipList [i]); } m_dAudioLengthInTime = m_dAudioLengthInTime + ob_AudioMediaAsset.LengthInMilliseconds; m_lAudioLengthInBytes = m_lAudioLengthInBytes + ob_AudioMediaAsset.AudioLengthInBytes; m_lSizeInBytes = m_lSizeInBytes + ob_AudioMediaAsset.SizeInBytes; next = null; } else { throw new Exception("Cannot merge assets: incompatible format"); } }
// bool BOOLListen = false; public void StartListening(IAudioMediaAsset asset) { StateChanged mStateChanged = new StateChanged(mState); mState = AudioRecorderState.Listening; FireEvent(mStateChanged); m_Channels = asset.Channels; m_bitDepth = asset.BitDepth; m_SampleRate = asset.SampleRate; mAsset = new AudioMediaAsset(m_Channels, m_bitDepth, m_SampleRate); mAsset = asset.Copy() as AudioMediaAsset; AssetManager manager = asset.Manager as AssetManager; sProjectDirectory = manager.DirPath; InputFormat = GetInputFormat(); m_sFileName = sProjectDirectory + "\\" + "Listen.wav"; BinaryWriter ListenWriter = new BinaryWriter(File.Create(m_sFileName)); CreateRIFF(ListenWriter); CreateCaptureBuffer(); InitRecording(true); }
public ArrayList ApplyPhraseDetection(long threshold, double length, double before) { // convert input parameters from time to byte long lLength = Calc.ConvertTimeToByte(length, m_SamplingRate, m_FrameSize); long lBefore = Calc.ConvertTimeToByte(before, m_SamplingRate, m_FrameSize); AudioClip ob_Clip; // AssetList is list of assets returned by phrase detector ArrayList alAssetList = new ArrayList(); // clipList is clip list for each return asset ArrayList alClipList; AudioMediaAsset ob_Asset = new AudioMediaAsset(m_Channels, m_BitDepth, m_SamplingRate); // apply phrase detection on each clip in clip list of this asset for (int i = 0; i < m_alClipList.Count; i++) { ob_Clip = m_alClipList [i] as AudioClip; alClipList = ob_Clip.DetectPhrases(threshold, lLength, lBefore); //MessageBox.Show (alClipList.Count.ToString () + "Clip Count") ; if (Convert.ToBoolean(alClipList [0]) == false) { //MessageBox.Show ("bool is False") ; ob_Asset.AddClip(alClipList [1] as AudioClip); if (i == m_alClipList.Count - 1 && ob_Asset.m_alClipList != null) { alAssetList.Add(ob_Asset); //MessageBox.Show ("last Asset added") ; } } else { //MessageBox.Show ("bool is true") ; if (ob_Clip.BeginTime + 3000 < (alClipList [1] as AudioClip).BeginTime) { ob_Asset.AddClip(ob_Clip.CopyClipPart(0, (alClipList [1] as AudioClip).BeginTime - ob_Clip.BeginTime)); if (i == 0) { alAssetList.Add(ob_Asset); } } //ob_Asset.AddClip (alClipList [1] as AudioClip) ; if (i != 0) { alAssetList.Add(ob_Asset); } //MessageBox.Show ("Asset Added before loop") ; for (int j = 1; j < alClipList.Count - 1; j++) { ob_Asset = new AudioMediaAsset(m_Channels, m_BitDepth, m_SamplingRate); ob_Asset.AddClip(alClipList [j] as AudioClip); alAssetList.Add(ob_Asset); //MessageBox.Show ("Asset added inside loop") ; } ob_Asset = new AudioMediaAsset(m_Channels, m_BitDepth, m_SamplingRate); if (alClipList.Count > 2) { ob_Asset.AddClip(alClipList [alClipList.Count - 1] as AudioClip); } if (i == m_alClipList.Count - 1 && ob_Asset.m_alClipList != null) { alAssetList.Add(ob_Asset); //MessageBox.Show ("last Asset added") ; } } // bool if ends } return(alAssetList); }
public IAudioMediaAsset GetChunk(double beginTime, double endTime) { // checks if the input parameters are in bounds of asset and in order if (beginTime >= 0 && beginTime < endTime && endTime <= m_dAudioLengthInTime) { ArrayList alNewClipList = new ArrayList(); // finds the data for chunk begin point including Clip index, local clip time etc from FindClipToProcess in form of ArrayList and copy it in an ArrayList active in this function ArrayList alBeginList = new ArrayList(FindClipToProcess(beginTime)); //BeginClipIndex is index of clip in Asset Clip list which is to be split at begin point int BeginClipIndex = Convert.ToInt32(alBeginList [0]); // dBeginTimeMark is the time marking in target clip at which point split has to be made double dBeginTimeMark = Convert.ToDouble(alBeginList [1]); // All above steps are repeated for finding marking for EndTime of chunk ArrayList alEndList = new ArrayList(FindClipToProcess(endTime)); int EndClipIndex = Convert.ToInt32(alEndList [0]); double dEndTimeMark = Convert.ToDouble(alEndList [1]); // transfer clip to process to separate object AudioClip ob_BeginClip = m_alClipList[BeginClipIndex] as AudioClip; // if begin time and end time lie in same clip then make a new clip from that clip create an asset for it and return if (BeginClipIndex == EndClipIndex) { AudioClip ob_NewClip = ob_BeginClip.CopyClipPart(dBeginTimeMark, dEndTimeMark); alNewClipList.Add(ob_NewClip); } else { // Normalise EndClip from m_ClipList to original class AudioClip ob_EndClip = m_alClipList[EndClipIndex] as AudioClip; // branch if BeginClip time mark is not end of target clip if (dBeginTimeMark < ob_BeginClip.LengthInTime) { // derive new begin clip from target clip AudioClip ob_NewBeginClip = ob_BeginClip.CopyClipPart(dBeginTimeMark, ob_BeginClip.LengthInTime); //if (ob_NewBeginClip.Equals (null) ) //MessageBox.Show ("if (dBeginTimeMark <ob_BeginClip.LengthInTime )") ; // Add new derived begin clip to clip list of return asset alNewClipList.Add(ob_NewBeginClip); } // add clips between beginClip index and EndClip index to ClipList of return asset excluding begin and end clips for (int i = BeginClipIndex + 1; i < EndClipIndex; i++) { alNewClipList.Add(m_alClipList [i]); } // if EndClip time mark is not at beginning of target clip then do following if (dEndTimeMark > 0) { // Create new endClip to be added to Clip list of return asset from target end clip AudioClip ob_NewEndClip = ob_EndClip.CopyClipPart(0, dEndTimeMark); //if (ob_NewEndClip.Equals (null) ) //MessageBox.Show ("if (dEndTimeMark > 0)") ; alNewClipList.Add(ob_NewEndClip); } } // create return AudioMediaAsset from new clip list AudioMediaAsset ob_AudioMediaAsset = new AudioMediaAsset(alNewClipList); return(ob_AudioMediaAsset); } else { throw new Exception("Invalid input parameters"); } }
/// <summary> /// Create a new stream for a given audio asset. /// </summary> /// <param name="asset">The asset for this stream.</param> public AudioMediaAssetStream(AudioMediaAsset asset) { }