/// <summary> /// Now that we've gotten a message, examine it and dissect the audio data. /// </summary> /// <param name="connection"></param> /// <param name="message"></param> public void OnMessageReceived(NetworkConnection connection, NetworkInMessage message) { // Unused byte headerSize message.ReadByte(); Int32 pack = message.ReadInt32(); // Unused int version versionExtractor.GetBitsValue(pack); int audioStreamCount = audioStreamCountExtractor.GetBitsValue(pack); int channelCount = channelCountExtractor.GetBitsValue(pack); int sampleRate = sampleRateExtractor.GetBitsValue(pack); int sampleType = sampleTypeExtractor.GetBitsValue(pack); int bytesPerSample = sizeof(float); if (sampleType == 1) { bytesPerSample = sizeof(Int16); } int sampleCount = sampleCountExtractor.GetBitsValue(pack); int codecType = codecTypeExtractor.GetBitsValue(pack); // Unused int sequenceNumber sequenceNumberExtractor.GetBitsValue(pack); if (sampleRate == 0) { // Unused int extendedSampleRate message.ReadInt32(); } try { audioDataMutex.WaitOne(); prominentSpeakerCount = 0; for (int i = 0; i < audioStreamCount; i++) { float averageAmplitude = message.ReadFloat(); UInt32 hrtfSourceID = (UInt32)message.ReadInt32(); Vector3 hrtfPosition = new Vector3(); Vector3 hrtfDirection = new Vector3(); if (hrtfSourceID != 0) { hrtfPosition.x = message.ReadFloat(); hrtfPosition.y = message.ReadFloat(); hrtfPosition.z = message.ReadFloat(); hrtfDirection.x = message.ReadFloat(); hrtfDirection.y = message.ReadFloat(); hrtfDirection.z = message.ReadFloat(); Vector3 cameraPosRelativeToGlobalAnchor = Vector3.zero; Vector3 cameraDirectionRelativeToGlobalAnchor = Vector3.zero; if (GlobalAnchorTransform != null) { cameraPosRelativeToGlobalAnchor = MathUtils.TransformPointFromTo( null, GlobalAnchorTransform, Camera.main.transform.position); cameraDirectionRelativeToGlobalAnchor = MathUtils.TransformDirectionFromTo( null, GlobalAnchorTransform, Camera.main.transform.position); } cameraPosRelativeToGlobalAnchor.Normalize(); cameraDirectionRelativeToGlobalAnchor.Normalize(); Vector3 soundVector = hrtfPosition - cameraPosRelativeToGlobalAnchor; soundVector.Normalize(); // x is forward float fltx = (kDropOffMaximum / DropOffMaximumMetres) * Vector3.Dot(soundVector, cameraDirectionRelativeToGlobalAnchor); // y is right Vector3 myRight = Quaternion.Euler(0, 90, 0) * cameraDirectionRelativeToGlobalAnchor; float flty = -(kPanMaximum / PanMaximumMetres) * Vector3.Dot(soundVector, myRight); // z is up Vector3 myUp = Quaternion.Euler(90, 0, 0) * cameraDirectionRelativeToGlobalAnchor; float fltz = (kPanMaximum / PanMaximumMetres) * Vector3.Dot(soundVector, myUp); // Hacky distance check so we don't get too close to source. Vector3 flt = new Vector3(fltx, flty, fltz); if (flt.magnitude < (MinimumDistance * kDropOffMaximum)) { flt = flt.normalized * MinimumDistance * kDropOffMaximum; fltx = flt.x; flty = flt.y; fltz = flt.z; } AddProminentSpeaker(hrtfSourceID, averageAmplitude, fltx, flty, fltz); } for (int j = 0; j < channelCount; j++) { // if uncompressed, size = sampleCount Int16 size = (Int16)sampleCount; if (codecType != 0) { // if compressed, size is first 2 bytes, sampleCount should be number of bytes after decompression size = message.ReadInt16(); } // make this array big enough to hold all of the uncompressed data only if the // buffer is not the right size, minimize new operations int totalBytes = size * bytesPerSample; if (networkPacketBufferBytes.Length != totalBytes) { networkPacketBufferBytes = new byte[totalBytes]; } message.ReadArray(networkPacketBufferBytes, (uint)(totalBytes)); if (codecType != 0) { // in place decompression please - should fill out the data buffer // ... } if (hrtfSourceID > 0) { // hrtf processing here } circularBuffer.Write(networkPacketBufferBytes, 0, networkPacketBufferBytes.Length); } } } catch (Exception e) { Debug.LogError(e.Message); } finally { audioDataMutex.ReleaseMutex(); } }