Пример #1
0
    public static VideoInitSenderPacketData Parse(byte[] packetBytes)
    {
        var reader = new BinaryReader(new MemoryStream(packetBytes));

        reader.BaseStream.Position = 5;

        var videoInitSenderPacketData = new VideoInitSenderPacketData();

        videoInitSenderPacketData.depthWidth  = reader.ReadInt32();
        videoInitSenderPacketData.depthHeight = reader.ReadInt32();

        var depthIntrinsics = new KinectCalibration.Intrinsics();

        depthIntrinsics.cx           = reader.ReadSingle();
        depthIntrinsics.cy           = reader.ReadSingle();
        depthIntrinsics.fx           = reader.ReadSingle();
        depthIntrinsics.fy           = reader.ReadSingle();
        depthIntrinsics.k1           = reader.ReadSingle();
        depthIntrinsics.k2           = reader.ReadSingle();
        depthIntrinsics.k3           = reader.ReadSingle();
        depthIntrinsics.k4           = reader.ReadSingle();
        depthIntrinsics.k5           = reader.ReadSingle();
        depthIntrinsics.k6           = reader.ReadSingle();
        depthIntrinsics.codx         = reader.ReadSingle();
        depthIntrinsics.cody         = reader.ReadSingle();
        depthIntrinsics.p2           = reader.ReadSingle();
        depthIntrinsics.p1           = reader.ReadSingle();
        depthIntrinsics.metricRadius = reader.ReadSingle();
        videoInitSenderPacketData.depthIntrinsics = depthIntrinsics;

        videoInitSenderPacketData.depthMetricRadius = reader.ReadSingle();

        return(videoInitSenderPacketData);
    }
Пример #2
0
    public IEnumerator SetupTextureGroup(VideoInitSenderPacketData initPacketData)
    {
        if (state != PrepareState.Unprepared)
        {
            throw new Exception("State has to be Unprepared to prepare TextureGroupUpdater.");
        }

        state = PrepareState.Preparing;

        textureGroup.SetWidth(initPacketData.depthWidth);
        textureGroup.SetHeight(initPacketData.depthHeight);
        PluginHelper.InitTextureGroup(textureGroup.GetId());

        depthDecoder = new TrvlDecoder(initPacketData.depthWidth * initPacketData.depthHeight);

        state = PrepareState.Prepared;

        while (!textureGroup.IsInitialized())
        {
            yield return(null);
        }

        // TextureGroup includes Y, U, V, and a depth texture.
        azureKinectScreenMaterial.SetTexture("_YTex", textureGroup.GetYTexture());
        azureKinectScreenMaterial.SetTexture("_UvTex", textureGroup.GetUvTexture());
        azureKinectScreenMaterial.SetTexture("_DepthTex", textureGroup.GetDepthTexture());

        state = PrepareState.Prepared;
    }
Пример #3
0
    private static void CollectPacket(UdpSocketPacket packet, SenderPacketCollection senderPacketCollection)
    {
        int senderSessionId         = PacketHelper.getSessionIdFromSenderPacketBytes(packet.Bytes);
        SenderPacketType packetType = PacketHelper.getPacketTypeFromSenderPacketBytes(packet.Bytes);

        if (packetType == SenderPacketType.Confirm)
        {
            senderPacketCollection.ConfirmPacketInfoList.Add(new ConfirmPacketInfo(packet.EndPoint, senderSessionId, ConfirmSenderPacketData.Parse(packet.Bytes)));
            return;
        }

        SenderPacketSet senderPacketSet;

        if (!senderPacketCollection.SenderPacketSets.TryGetValue(senderSessionId, out senderPacketSet))
        {
            return;
        }

        // Heartbeat packets turns on ReceivedAny.
        senderPacketSet.ReceivedAny = true;
        switch (packetType)
        {
        case SenderPacketType.VideoInit:
            senderPacketSet.InitPacketDataList.Add(VideoInitSenderPacketData.Parse(packet.Bytes));
            break;

        case SenderPacketType.Frame:
            senderPacketSet.VideoPacketDataList.Add(VideoSenderPacketData.Parse(packet.Bytes));
            break;

        case SenderPacketType.Parity:
            senderPacketSet.FecPacketDataList.Add(ParitySenderPacketData.Parse(packet.Bytes));
            break;

        case SenderPacketType.Audio:
            senderPacketSet.AudioPacketDataList.Add(AudioSenderPacketData.Parse(packet.Bytes));
            break;

        case SenderPacketType.Floor:
            senderPacketSet.FloorPacketDataList.Add(FloorSenderPacketData.Parse(packet.Bytes));
            break;
        }
    }
Пример #4
0
    // Since calculation including Unproject() takes too much time,
    // this function is made to run as a coroutine that takes a break
    // every 100 ms.
    private IEnumerator SetupMesh(VideoInitSenderPacketData initSenderPacketData)
    {
        State    = PrepareState.Preparing;
        Progress = 0.0f;

        int width  = initSenderPacketData.depthWidth;
        int height = initSenderPacketData.depthHeight;

        var vertices = new Vector3[width * height];
        var uv       = new Vector2[width * height];

        var stopWatch = Stopwatch.StartNew();

        for (int i = 0; i < width; ++i)
        {
            for (int j = 0; j < height; ++j)
            {
                float[] xy    = new float[2];
                int     valid = 0;
                if (KinectIntrinsicTransformation.Unproject(initSenderPacketData.depthIntrinsics,
                                                            initSenderPacketData.depthMetricRadius,
                                                            new float[2] {
                    i, j
                }, ref xy, ref valid))
                {
                    // Flip y since Azure Kinect's y axis is downwards.
                    // https://docs.microsoft.com/en-us/azure/kinect-dk/coordinate-systems
                    vertices[i + j * width] = new Vector3(xy[0], -xy[1], 1.0f);
                }
                else
                {
                    vertices[i + j * width] = new Vector3(0.0f, 0.0f, 0.0f);
                }
                uv[i + j * width] = new Vector2(i / (float)(width - 1), j / (float)(height - 1));
            }

            if (stopWatch.ElapsedMilliseconds > 100)
            {
                Progress = (i * 0.99f) / width;
                yield return(null);

                stopWatch = Stopwatch.StartNew();
            }
        }

        Progress = 0.99f;

        //print($"vertices[0]: {vertices[0]}"); // (-1.0, 1.0, 1.0): left-top
        //print($"vertices[last]: {vertices[vertices.Length - 1]}"); // (0.8, -0.6, 1.0): right-bottom

        const float SIZE_AMPLIFIER = 1.2f;
        int         quadWidth      = width - 2;
        int         quadHeight     = height - 2;
        var         quadVertices   = new Vector3[quadWidth * quadHeight];
        var         quadUv         = new Vector2[quadWidth * quadHeight];
        var         quadHalfSizes  = new Vector2[quadWidth * quadHeight];

        for (int ii = 0; ii < quadWidth; ++ii)
        {
            for (int jj = 0; jj < quadHeight; ++jj)
            {
                int i = ii + 1;
                int j = jj + 1;
                quadVertices[ii + jj * quadWidth] = vertices[i + j * width];
                quadUv[ii + jj * quadWidth]       = uv[i + j * width];
                // Trying to make both x and y to have a positive number. The first 0.5f is to make the size relevant to
                // the vertex in (i, j). The second one is to get the half size of it.
                quadHalfSizes[ii + jj * quadWidth] = (vertices[(i + 1) + (j - 1) * width] - vertices[(i - 1) + (j + 1) * width]) * 0.5f * 0.5f * SIZE_AMPLIFIER;
            }
        }

        //print($"quadSizes[0]: {quadSizes[0].x}, {quadSizes[0].y}"); // 0.002900749, 0.003067017

        var triangles = new int[quadWidth * quadHeight];

        for (int i = 0; i < quadWidth * quadHeight; ++i)
        {
            triangles[i] = i;
        }

        // 65.535 is equivalent to (2^16 - 1) / 1000, where (2^16 - 1) is to complement
        // the conversion happened in the texture-level from 0 ~ (2^16 - 1) to 0 ~ 1.
        // 1000 is the conversion of mm (the unit of Azure Kinect) to m (the unit of Unity3D).
        for (int i = 0; i < quadVertices.Length; ++i)
        {
            quadVertices[i] *= 65.535f;
        }

        for (int i = 0; i < quadHalfSizes.Length; ++i)
        {
            quadHalfSizes[i] *= 65.535f;
        }

        // Without the bounds, Unity decides whether to render this mesh or not based on the vertices calculated here.
        // This causes Unity not rendering the mesh transformed by the depth texture even when the transformed one
        // belongs to the viewport of the camera.
        var bounds = new Bounds(Vector3.zero, Vector3.one * 1000.0f);

        var mesh = new Mesh()
        {
            indexFormat = IndexFormat.UInt32,
            vertices    = quadVertices,
            uv          = quadUv,
            uv2         = quadHalfSizes,
            bounds      = bounds,
        };

        mesh.SetIndices(triangles, MeshTopology.Points, 0);

        meshFilter.mesh = mesh;

        State = PrepareState.Prepared;
    }
Пример #5
0
 public void StartPrepare(VideoInitSenderPacketData initSenderPacketData)
 {
     StartCoroutine(SetupMesh(initSenderPacketData));
 }
Пример #6
0
 public void StartPrepare(MonoBehaviour monoBehaviour, VideoInitSenderPacketData initPacketData)
 {
     monoBehaviour.StartCoroutine(SetupTextureGroup(initPacketData));
 }