//private static void Reader_FrameArrived(object sender, AudioBeamFrameArrivedEventArgs e) { //} private static void UpdateAudio() { audioFrameAvailable = false; if (audioReader != null) { IList <AudioBeamFrame> frames = audioReader.AcquireLatestBeamFrames(); for (int i = 0; i < frames.Count; i++) { AudioBeamFrame frame = frames[i]; if (frame != null) { audioFrameAvailable = true; IList <AudioBeamSubFrame> subframes = frame.SubFrames; for (int j = 0; j < subframes.Count; j++) { AudioBeamSubFrame subframe = subframes[j]; if (subframe != null) { beamAngle = subframe.BeamAngle; ProcessAudio(subframe); subframe.Dispose(); } } frame.Dispose(); } } } }
private void ProcessAudioFrame() { IList <AudioBeamFrame> frameList = audioReader.AcquireLatestBeamFrames(); //AudioBeamFrameList frameList = (AudioBeamFrameList)reader.AcquireLatestBeamFrames(); if (frameList != null) { if (frameList[0] != null) { if (frameList[0].SubFrames != null && frameList[0].SubFrames.Count > 0) { // Only one audio beam is supported. Get the sub frame list for this beam List <AudioBeamSubFrame> subFrameList = frameList[0].SubFrames.ToList(); // Loop over all sub frames, extract audio buffer and beam information foreach (AudioBeamSubFrame subFrame in subFrameList) { // Check if beam angle and/or confidence have changed bool updateBeam = false; if (subFrame.BeamAngle != this.beamAngleRad) { this.beamAngleRad = subFrame.BeamAngle; this.beamAngleDeg = this.beamAngleRad * 180.0f / Mathf.PI; updateBeam = true; //Debug.Log("beam angle: " + beamAngleDegrees); } if (subFrame.BeamAngleConfidence != this.beamAngleConfidence) { this.beamAngleConfidence = subFrame.BeamAngleConfidence; updateBeam = true; //Debug.Log("beam angle confidence: " + beamAngleRadians); } if (updateBeam) { // Refresh display of audio beam if (statusText) { statusText.text = string.Format("Audio beam angle: {0:F0} deg., Confidence: {1:F0}%", beamAngleDeg, beamAngleConfidence * 100f); } } } } // else // { // this.beamAngle = frameList[0].AudioBeam.BeamAngle; // Debug.Log("No SubFrame: "+ frameList[0].AudioBeam.BeamAngle); // } } // else // { // Debug.Log("Empty Audio Frame: "+ audioSource.AudioBeams.Count()); // if (audioSource.AudioBeams.Count() > 0) // Debug.Log(audioSource.AudioBeams[0].BeamAngle); // // } } // else // { // Debug.Log("Empty Audio Frame"); // } // clean up for (int i = frameList.Count - 1; i >= 0; i--) { AudioBeamFrame frame = frameList[i]; if (frame != null) { frame.Dispose(); } } //frameList.Clear(); }
private void AudioBeamFrameReader_FrameArrived(object sender, AudioBeamFrameArrivedEventArgs e) { using (AudioBeamFrameList frameList = e.FrameReference.AcquireBeamFrames()) { if (frameList != null) { // NOTE - the old pattern of passing the AudioBeamFrameList to a downstream // KinectAudio component had issues that were exposed in async mode. The // AudioBeamFrameList is not disposed immediately in the event handler as // it needs to be kept around until the async receiver processes it. However, // Kinect suppresses all further audio events until the AudioBeamFrameList is // disposed, so the receiver in KinectAudio has no way of recycling the old // AudioBeamFrameList once it is done processing it (since the receiver never // gets called again and this is the way objects are passed back upstream for // recycling in the current cooperative buffering scheme). To resolve this, I // moved the audio processing into this handler inside a using clause which // ensures that the AudioBeamFrameList is disposed of immediately. AudioBeamFrame audioBeamFrame = frameList[0]; foreach (var subFrame in audioBeamFrame.SubFrames) { // Check if we need to reallocate the audio buffer - if for instance the downstream component // that we posted-by-ref to modifies the reference to audioBuffer to null or an array with // a different size. if ((this.audioBuffer == null) || (this.audioBuffer.Length != subFrame.FrameLengthInBytes)) { this.audioBuffer = new byte[subFrame.FrameLengthInBytes]; } // Get the raw audio bytes from the frame. subFrame.CopyFrameDataToArray(this.audioBuffer); // Compute originating time from the relative time reported by Kinect. var originatingTime = this.pipeline.GetCurrentTimeFromElapsedTicks((subFrame.RelativeTime + subFrame.Duration).Ticks); // Post the audio buffer by reference. this.Audio.Post(new AudioBuffer(this.audioBuffer, KinectSensor.audioFormat), originatingTime); // Post the audio beam angle information by value (not using co-operative buffering). this.AudioBeamInfo.Post(new KinectAudioBeamInfo(subFrame.BeamAngle, subFrame.BeamAngleConfidence), originatingTime); if ((subFrame.AudioBodyCorrelations != null) && (subFrame.AudioBodyCorrelations.Count > 0)) { // Get BodyTrackingIds from AudioBodyCorrelations list (seems like this is the only // bit of useful information). var bodyIds = subFrame.AudioBodyCorrelations.Select(abc => abc.BodyTrackingId); // Since we are posting bodyTrackingIds by-ref, we need to do a null check each // time and allocate if necessary. Otherwise clear and re-use the existing list. if (this.bodyTrackingIds == null) { // Allocate a new list this.bodyTrackingIds = new List <ulong>(bodyIds); } else { // Re-use the existing list this.bodyTrackingIds.Clear(); foreach (ulong id in bodyIds) { this.bodyTrackingIds.Add(id); } } // Post the audio body correlations by reference. this.AudioBodyCorrelations.Post(this.bodyTrackingIds, originatingTime); } } } } }