/// <summary> /// Calculates the 3-D point cloud using the native SDK functions. /// </summary> /// <returns>3-D point coordinates in meters.</returns> /// <remarks>Format is [height, width, coordinate] with coordinate in {0,1,2} for x, y and z.</remarks> private Point3fCameraImage Calc3DCoordinates() { CameraSpacePoint[] worldPoints = new CameraSpacePoint[depthHeight * depthWidth]; Point3fCameraImage img = new Point3fCameraImage(depthWidth, depthHeight); lock (this.depthFrameData) { lock (cameraLock) { coordinateMapper.MapDepthFrameToCameraSpace(depthFrameData, worldPoints); } for (int y = 0; y < depthHeight; y++) { for (int x = 0; x < depthWidth; x++) { // mirror image int idx = y * depthWidth + depthWidthMinusOne - x; img[y, x] = new Point3f(worldPoints[idx].X * -1, worldPoints[idx].Y * -1, worldPoints[idx].Z); } } img.TimeStamp = timestampDepth; } return(img); }
void Update() { // get new depth data from DepthSourceManager. ushort[] rawdata = depthSourceManagerScript.GetData(); // map to camera space coordinate mapper.MapDepthFrameToCameraSpace(rawdata, cameraSpacePoints); for (int i = 0; i < cameraSpacePoints.Length; i++) { if (cameraSpacePoints[i].X <= 1 && cameraSpacePoints[i].X >= -1) //X座標の1~-1を表示 { particles[i].position = new Vector3(cameraSpacePoints[i].X * scale, cameraSpacePoints[i].Y * scale, cameraSpacePoints[i].Z * scale); particles[i].color = color; particles[i].size = size; if (rawdata[i] == 0) { particles[i].size = 0; //データの値が0ならばサイズを0にする } } else if (particles[i].position != null) //particles[i].positionのデータが空じゃなければサイズを0にする { particles[i].size = 0; } } // update particle system GetComponent <ParticleSystem>().SetParticles(particles, particles.Length); }
private void CoodinateMapping() { // Depth座標系のカラー位置と、Depth座標系とカメラ座標系の対応を作る mapper.MapDepthFrameToColorSpace(depthBuffer, depthColorPoints); mapper.MapDepthFrameToCameraSpace(depthBuffer, cameraPoints); Parallel.For(0, depthFrameDesc.LengthInPixels, i => { PointCloud[i].Clear(); // カラーバッファの位置 int colorX = (int)depthColorPoints[i].X; int colorY = (int)depthColorPoints[i].Y; if ((colorX < 0) || (colorFrameDesc.Width <= colorX) || (colorY < 0) || (colorFrameDesc.Height <= colorY)) { return; } PointCloud[i].X = cameraPoints[i].X; PointCloud[i].Y = cameraPoints[i].Y; PointCloud[i].Z = cameraPoints[i].Z; int colorIndex = (int)(((colorY * colorFrameDesc.Width) + colorX) * colorFrameDesc.BytesPerPixel); PointCloud[i].B = colorBuffer[colorIndex + 0]; PointCloud[i].G = colorBuffer[colorIndex + 1]; PointCloud[i].R = colorBuffer[colorIndex + 2]; }); }
void UpdatePointCloud() { ushort[] depthMap = depthManager.GetData(); CameraSpacePoint[] realWorldPoints = new CameraSpacePoint[depthMap.Length]; mapper.MapDepthFrameToCameraSpace(depthMap, realWorldPoints); // Update Real world to kinect position. for (int ty = 0; ty < depthHeight; ty += downSample) { for (int tx = 0; tx < depthWidth; tx += downSample) { int index = ty * depthWidth + tx; CameraSpacePoint point = realWorldPoints[index]; Vector3 pointV3 = new Vector3(mirror ? -point.X : point.X, point.Y, point.Z); pointV3 = transform.TransformPoint(pointV3); realWorldPoints[index].X = pointV3.x; realWorldPoints[index].Y = pointV3.y; realWorldPoints[index].Z = pointV3.z; } } OnPointCloudUpdate(realWorldPoints, depthWidth, depthHeight, downSample); }
void Update() { if (_Sensor == null) { return; } if (MultiSourceManager == null) { return; } if (_MultiManager == null) { return; } ushort[] rawdata = _MultiManager.GetDepthData(); _Mapper.MapDepthFrameToCameraSpace(rawdata, cameraSpacePoints); for (int i = 0; i < cameraSpacePoints.Length; i++) { particles[i].position = new Vector3(cameraSpacePoints[i].X * scale, cameraSpacePoints[i].Y * scale, cameraSpacePoints[i].Z * scale); particles[i].startColor = color; particles[i].startSize = size; //if (rawdata[i] == 0) particles[i].startSize = 0; } _particleSystem.SetParticles(particles, particles.Length); }
private List <ValidPoint> DepthToColor() { // Points to return List <ValidPoint> validPoints = new List <ValidPoint>(); // Get Data mDepthData = mMultiSource.GetDepthData(); // Map mMapper.MapDepthFrameToCameraSpace(mDepthData, mCameraSpacePoints); mMapper.MapDepthFrameToColorSpace(mDepthData, mColorSpacePoints); // Filter for (int i = 0; i < mDepthResolution.x; i++) { for (int j = 0; j < mDepthResolution.y / 8; j++) { // Sample index int sampleIndex = (j * mDepthResolution.x) + i; sampleIndex *= 8; // Cutoff tests if (mCameraSpacePoints[sampleIndex].X < mLeftCutOff) { continue; } if (mCameraSpacePoints[sampleIndex].X > mRightCutOff) { continue; } if (mCameraSpacePoints[sampleIndex].Y > mTopCutOff) { continue; } if (mCameraSpacePoints[sampleIndex].Y < mBottomCutOff) { continue; } // Create point ValidPoint newPoint = new ValidPoint(mColorSpacePoints[sampleIndex], mCameraSpacePoints[sampleIndex].Z); // Depth test if (mCameraSpacePoints[sampleIndex].Z >= mWallDepth) { newPoint.mWithinWallDepth = true; } // Add validPoints.Add(newPoint); } } return(validPoints); }
private void ProcessDepth() { m_coordinateMapper.MapDepthFrameToCameraSpace(m_depthData, m_cameraSpacePoints); //Remove all ignores DepthBound(); ConvertDepthDataToPixels(); m_depthImage = RenderPixelArray(m_depthFrameDescription, m_depthPixels); }
private List <ValidPoint> DepthToColour() { //Get Depth Data mDepthData = mMultiSource.GetDepthData(); //Map Depth Data mMapper.MapDepthFrameToCameraSpace(mDepthData, mCameraSpacePoints); mMapper.MapDepthFrameToColorSpace(mDepthData, mColorSpacePoints); //Points to return List <ValidPoint> validPoints = new List <ValidPoint>(); // Filter for (int i = 0; i < mDepthResolution.x / 8; i++) { //Divide by 8 to avoid index out of bounds for (int j = 0; j < mDepthResolution.y / 8; j++) { //downsample all of the points to help it run better //go through one dimensional array of camera points int sampleIndex = (j * mDepthResolution.x) + i; //Skip over 8 values between each point sampleIndex *= 8; //Cut off tests if (mCameraSpacePoints[sampleIndex].X < mLeftCutOff) { continue; } if (mCameraSpacePoints[sampleIndex].X > mRightCutOff) { continue; } if (mCameraSpacePoints[sampleIndex].Y > mTopCutOff) { continue; } if (mCameraSpacePoints[sampleIndex].Y < mBottomCutOff) { continue; } //Create a new valid point ValidPoint newPoint = new ValidPoint(mColorSpacePoints[sampleIndex], mCameraSpacePoints[sampleIndex].Z); //Depth Test if (mCameraSpacePoints[sampleIndex].Z >= mWallDepth) { newPoint.mWithinWallDepth = true; } //Add out new pont to valid points validPoints.Add(newPoint); } } return(validPoints); }
/// <summary> /// Densely store depth to color mapping as BLKD. /// /// Returns the number of shorts written to buffer. /// </summary> /// <param name="frame">KinectScanner.Reading.Frame</param> /// <param name="filename">filename to store the mapping</param> /// <returns></returns> public Tuple <Blkd, TimeSpan> CaptureMappedFrame(LiveFrame frame, byte[] buffer) { DepthFrame depthFrame = frame.NativeDepthFrame; CoordinateMapper mapper = frame.NativeCoordinateMapper; if (buffer.Length != Frame.DEPTH_INFRARED_PIXELS * DEPTH_MAPPING_BYTES_PER_PIXEL) { throw new ArgumentException(string.Format("Buffer length is {0} but {1} is needed", buffer.LongLength, Frame.DEPTH_INFRARED_PIXELS * DEPTH_MAPPING_BYTES_PER_PIXEL)); } depthFrame.CopyFrameDataToArray(_depthData); mapper.MapDepthFrameToColorSpace(_depthData, _colorPoints); mapper.MapDepthFrameToCameraSpace(_depthData, _cameraSpacePoints); Array.Clear(buffer, 0, buffer.Length); int count = 0; for (int i = 0; i < Frame.DEPTH_INFRARED_PIXELS; ++i) { ColorSpacePoint colorPoint = _colorPoints[i]; CameraSpacePoint cameraPoint = _cameraSpacePoints[i]; // make sure the depth pixel maps to a valid point in color space short colorX = (short)Math.Floor(colorPoint.X + 0.5); short colorY = (short)Math.Floor(colorPoint.Y + 0.5); if (colorX < 0 || colorX >= Frame.COLOR_WIDTH || colorY < 0 || colorY >= Frame.COLOR_HEIGHT) { colorX = -1; colorY = -1; } // Little endian === lowest order bytes at lower addresses buffer[count++] = (byte)(colorX >> 0); buffer[count++] = (byte)(colorX >> 8); buffer[count++] = (byte)(colorY >> 0); buffer[count++] = (byte)(colorY >> 8); float[] cameraPointValues = new float[] { cameraPoint.X, cameraPoint.Y, cameraPoint.Z }; System.Buffer.BlockCopy(cameraPointValues, 0, buffer, count, 12); count += 12; } Blkd result = new Blkd { Width = (UInt16)Frame.DEPTH_INFRARED_WIDTH, Height = (UInt16)Frame.DEPTH_INFRARED_HEIGHT, BytesPerPixel = DEPTH_MAPPING_BYTES_PER_PIXEL, Version = 2, Data = buffer }; return(new Tuple <Blkd, TimeSpan>(result, depthFrame.RelativeTime)); }
/// <summary> /// Update kinect data in multithread /// </summary> private void UpdateKinectData() { bool _done = false; Chrono chrono = new Chrono(); chrono.Start(); double dt = 0.033; double next_schedule = 0.0; Debug.Log("Kinect acquisition started"); while (!_done) { double current_time = chrono.GetElapsedTime(); if (current_time > next_schedule) { next_schedule += dt; // Get depth map UpdateDepthData(); // Get color map UpdateColorData(); // Convert depth map into point cloud _cameraSpace = new CameraSpacePoint[_depthData.Length]; lock (_cameraSpace) { _mapper.MapDepthFrameToCameraSpace(_depthData, _cameraSpace); } // Map depth map to color map _colorSpace = new ColorSpacePoint[_depthData.Length]; lock (_colorSpace) { _mapper.MapDepthFrameToColorSpace(_depthData, _colorSpace); } } lock (doneLock) { _done = done; } Thread.Sleep(0); } Debug.Log("Kinect acquisition stopped"); }
/// <summary> /// Returns the floor point that is right below the given point. /// </summary> /// <param name="floor">The floor obect to use.</param> /// <param name="x">The X value of the point in the 2D space.</param> /// <param name="z">The Z value of the point.</param> /// <returns>The equivalent Y value of the corresponding floor point.</returns> public static int FloorY(this Floor floor, int x, ushort z) { _mapper.MapDepthFrameToCameraSpace(_depthData, _cameraData); for (int index = 0; index < _depthData.Length; index++) { ushort currentZ = _depthData[index]; int currentX = index % 512; if (currentX >= x - 10 && currentX <= x + 10 && currentZ >= z - 10 && currentZ <= z + 10) { CameraSpacePoint point3D = _cameraData[index]; if (floor.DistanceFrom(point3D) < 0.01) { return(index / 512); } } } return(424); }
void Update() { if (_Reader != null) { var frame = _Reader.AcquireLatestFrame(); if (frame != null) { if (useColor) { var colorFrame = frame.ColorFrameReference.AcquireFrame(); if (colorFrame != null) { colorFrame.CopyConvertedFrameDataToArray(_ColorData, ColorImageFormat.Rgba); _ColorTexture.LoadRawTextureData(_ColorData); _ColorTexture.Apply(); colorFrame.Dispose(); colorFrame = null; } } if (useDepth) { var depthFrame = frame.DepthFrameReference.AcquireFrame(); if (depthFrame != null) { depthFrame.CopyFrameDataToArray(_DepthData); depthFrame.Dispose(); depthFrame = null; mapper.MapDepthFrameToCameraSpace(_DepthData, _RealWorldPoints); } } if (useBodyIndex) { var bodyIndexFrame = frame.BodyIndexFrameReference.AcquireFrame(); if (bodyIndexFrame != null) { bodyIndexFrame.CopyFrameDataToArray(_BodyIndexData); bodyIndexFrame.Dispose(); bodyIndexFrame = null; } } frame = null; } } }
/// <summary> /// Handles depth data from sensor /// </summary> /// <param name="depthFrame"></param> Raw depth frame private static void MapDepthFrame(DepthFrame depthFrame) { // map raw depth points to camera space var depthWidth = depthFrame.FrameDescription.Width; var depthHeight = depthFrame.FrameDescription.Height; var depthData = new ushort[depthWidth * depthHeight]; var camerapoints = new CameraSpacePoint[depthData.Length]; depthFrame.CopyFrameDataToArray(depthData); coordinateMapper.MapDepthFrameToCameraSpace(depthData, camerapoints); // update depth point clouds DepthData.UpdatePoints(camerapoints); }
private List <ValidPoint> DepthToColor() { List <ValidPoint> ValidPoints = new List <ValidPoint>(); //Get depth mDepthData = mMultiSource.GetDepthData(); mMapper.MapDepthFrameToCameraSpace(mDepthData, mCameraSapcePoints); mMapper.MapDepthFrameToColorSpace(mDepthData, mColorSpacePoints); //Filter for (int i = 0; i < mDepthResolution.x / 8; i++) { for (int j = 0; j < mDepthResolution.y / 8; j++) { int sampleIndex = (j * mDepthResolution.x) + i; sampleIndex *= 8; //CutoffTests if (mCameraSapcePoints[sampleIndex].X < mLeftCutOff) { continue; } if (mCameraSapcePoints[sampleIndex].X > mRigthCutOff) { continue; } if (mCameraSapcePoints[sampleIndex].Y > mTopCutOff) { continue; } if (mCameraSapcePoints[sampleIndex].Y < mBottomCutOff) { continue; } ValidPoint newPoint = new ValidPoint(mColorSpacePoints[sampleIndex], mCameraSapcePoints[sampleIndex].Z); if (mCameraSapcePoints[sampleIndex].Z >= mWallDepth) { newPoint.mWithinWallDepth = true; } ValidPoints.Add(newPoint); } } return(ValidPoints); }
private void RefreshData() { var frameDesc = _Sensor.DepthFrameSource.FrameDescription; _Mapper.MapDepthFrameToCameraSpace(_Data, camSpace); _Mapper.MapDepthFrameToColorSpace(_Data, colorSpace); for (int y = 0; y < frameDesc.Height; y += _DownsampleSize) { for (int x = 0; x < frameDesc.Width; x += _DownsampleSize) { int indexX = x / _DownsampleSize; int indexY = y / _DownsampleSize; int realIndex = y * frameDesc.Width + x; int smallIndex = (indexY * (frameDesc.Width / _DownsampleSize)) + indexX; //double avg = GetAvg(_Data, x, y, frameDesc.Width, frameDesc.Height); //avg = avg * _DepthScale + _Offset.z; //_Vertices[smallIndex].z = (float)avg; CameraSpacePoint p = camSpace[realIndex]; Vector3 targetP = (new Vector3(p.X, p.Y, p.Z) + _Offset) * _Scale; if (float.IsInfinity(_Vertices[smallIndex].z)) { _Vertices[smallIndex] = targetP; } else { _Vertices[smallIndex] = _Vertices[smallIndex] + (targetP - _Vertices[smallIndex]) * motionSmooth;// _OldVertices[smallIndex] + (targetP - _OldVertices[smallIndex]) / motionSmooth ; } // Update UV mapping with CDRP var colorSpacePoint = colorSpace[(y * frameDesc.Width) + x]; _UV[smallIndex] = new Vector2(colorSpacePoint.X * 1.0f / 1920, colorSpacePoint.Y * 1.0f / 1080); } } _Mesh.vertices = _Vertices; _Mesh.uv = _UV; _Mesh.triangles = _Triangles; _Mesh.RecalculateNormals(); }
void updateKinect() { if (!kinectIsInit || !sensor.IsOpen || depthReader == null) { Debug.Log("Kinect is not init or open"); initKinect(); return; } DepthFrame depthFrame = depthReader.AcquireLatestFrame(); if (depthFrame != null) { depthFrame.CopyFrameDataToArray(frameData); mapper.MapDepthFrameToCameraSpace(frameData, cameraPoints); depthFrame.Dispose(); for (int i = 0; i < frameData.Length; ++i) { depthTexData[3 * i + 0] = (byte)(frameData[i] * 1f / 20); depthTexData[3 * i + 1] = (byte)(frameData[i] * 1f / 20); depthTexData[3 * i + 2] = (byte)(frameData[i] * 1f / 20); } depthTex.LoadRawTextureData(depthTexData); depthTex.Apply(); } ColorFrame colorFrame = colorReader.AcquireLatestFrame(); if (colorFrame != null) { colorFrame.CopyConvertedFrameDataToArray(colorData, ColorImageFormat.Rgba); colorFrame.Dispose(); colorTex.LoadRawTextureData(colorData); colorTex.Apply(); mapper.MapColorFrameToCameraSpace(frameData, colorPoints); } }
private void RefreshData(ushort[] depthData, int colorWidth, int colorHeight) { var frameDesc = _Sensor.DepthFrameSource.FrameDescription; ColorSpacePoint[] colorSpace = new ColorSpacePoint[depthData.Length]; _Mapper.MapDepthFrameToColorSpace(depthData, colorSpace); CameraSpacePoint[] cameraSpace = new CameraSpacePoint[depthData.Length]; _Mapper.MapDepthFrameToCameraSpace(depthData, cameraSpace); for (int y = 0; y < frameDesc.Height; y += _DownsampleSize) { for (int x = 0; x < frameDesc.Width; x += _DownsampleSize) { int indexX = x / _DownsampleSize; int indexY = y / _DownsampleSize; int smallIndex = (indexY * (frameDesc.Width / _DownsampleSize)) + indexX; double avg = GetAvg(cameraSpace, x, y, frameDesc.Width, frameDesc.Height); avg = avg * _DepthScale; //_Vertices[smallIndex].z = (float)avg; CameraSpacePoint point = cameraSpace[(y * frameDesc.Width) + x]; _Vertices[smallIndex].x = point.X * _DepthScale; _Vertices[smallIndex].y = point.Y * _DepthScale; _Vertices[smallIndex].z = (float)avg; //_Vertices[smallIndex] = new Vector3(point.X, point.Y, (float)avg); // Update UV mapping with CDRP var colorSpacePoint = colorSpace[(y * frameDesc.Width) + x]; _UV[smallIndex] = new Vector2(colorSpacePoint.X / colorWidth, colorSpacePoint.Y / colorHeight); } } _Mesh.vertices = _Vertices; _Mesh.uv = _UV; _Mesh.triangles = _Triangles; _Mesh.RecalculateNormals(); }
void Update() { // get new depth data from DepthSourceManager. ushort[] rawdata = depthSourceManagerScript.GetData(); // map to camera space coordinate mapper.MapDepthFrameToCameraSpace(rawdata, cameraSpacePoints); for (int i = 0; i < cameraSpacePoints.Length; i++) { particles[i].position = new Vector3(cameraSpacePoints[i].X * scale, cameraSpacePoints[i].Y * scale, cameraSpacePoints[i].Z * scale); particles[i].color = color; particles[i].size = size; if (rawdata[i] == 0) { particles[i].size = 0; } } // update particle system GetComponent <ParticleSystem>().SetParticles(particles, particles.Length); }
void Update() { if (reader != null) { MultiSourceFrame MSFrame = reader.AcquireLatestFrame(); if (MSFrame != null) { using (DepthFrame frame = MSFrame.DepthFrameReference.AcquireFrame()) { if (frame != null) { frame.CopyFrameDataToArray(depthData); coordinateMapper.MapDepthFrameToCameraSpace(depthData, cameraSpacePoints); ConvertDepth2RGBA(); } } MSFrame = null; } } }
// ==================================== <summary> // アップデート処理 // </summary> =================================== void Update() { // ------------------ // ボーンデータ更新 if (_BodyReader != null) { var frame = _BodyReader.AcquireLatestFrame(); if (frame != null) { if (_BodyData == null) { _BodyData = new Body[_Sensor.BodyFrameSource.BodyCount]; } frame.GetAndRefreshBodyData(_BodyData); // FloorClipPlaneを取得する FloorClipPlane = frame.FloorClipPlane; frame.Dispose(); frame = null; } } // ------------------ // Depth if (_DepthReader != null) { var frame = _DepthReader.AcquireLatestFrame(); if (frame != null) { frame.CopyFrameDataToArray(_DepthDataRaw); _Mapper.MapDepthFrameToCameraSpace(_DepthDataRaw, _DepthData); frame.Dispose(); frame = null; } } }
private void UpdateData(ushort[] depthData, Texture2D colorData, int colorWidth, int colorHeight) { var frameDesc = _Sensor.DepthFrameSource.FrameDescription; ColorSpacePoint[] colorSpace = new ColorSpacePoint[depthData.Length]; CameraSpacePoint[] cameraSpace = new CameraSpacePoint[depthData.Length]; _Mapper.MapDepthFrameToColorSpace(depthData, colorSpace); _Mapper.MapDepthFrameToCameraSpace(depthData, cameraSpace); for (int y = 0; y < frameDesc.Height; y += _DownsampleSize) { for (int x = 0; x < frameDesc.Width; x += _DownsampleSize) { int indexX = x / _DownsampleSize; int indexY = y / _DownsampleSize; int offsetX = x + offset; int offsetY = y + offset; int smallIndex = (indexY * (frameDesc.Width / _DownsampleSize)) + indexX; int fullIndex = (offsetY * frameDesc.Width) + offsetX; _Vertices[smallIndex].z = cameraSpace[fullIndex].Z - 1; if (cameraSpace[fullIndex].X > -1000 && cameraSpace[smallIndex].X < 1000) { _Vertices[smallIndex].x = cameraSpace[fullIndex].X; } if (cameraSpace[fullIndex].Y > -1000 && cameraSpace[fullIndex].Y < 1000) { _Vertices[smallIndex].y = cameraSpace[fullIndex].Y; } var colorSpacePoint = colorSpace[(y * frameDesc.Width) + x]; _Colors[smallIndex] = colorData.GetPixel((int)colorSpacePoint.X, (int)colorSpacePoint.Y); } } _Mesh.vertices = _Vertices; _Mesh.colors = _Colors; _Mesh.SetIndices(_Indices, MeshTopology.Points, 0); }
/// <summary> /// creates color info for all DEPTH pixels (to later e.g. write ply file from it) /// </summary> /// <param name="myColorMetaData"></param> /// <param name="myDepthMetaData"></param> /// <param name="myCoordinateMapper"></param> /// <returns></returns> public static PointCloud ToPointCloud(ColorMetaData myColorMetaData, DepthMetaData myDepthMetaData, BodyMetaData myBodyMetaData, CoordinateMapper myCoordinateMapper) { if (myColorMetaData == null && myDepthMetaData == null) { return(null); } ColorSpacePoint[] mycolorPointsInDepthSpace = new ColorSpacePoint[DepthMetaData.XDepthMaxKinect * DepthMetaData.YDepthMaxKinect]; CameraSpacePoint[] myRealWorldPoints = new CameraSpacePoint[DepthMetaData.XDepthMaxKinect * DepthMetaData.YDepthMaxKinect]; myCoordinateMapper.MapDepthFrameToCameraSpace(myDepthMetaData.FrameData, myRealWorldPoints); myCoordinateMapper.MapDepthFrameToColorSpace(myDepthMetaData.FrameData, mycolorPointsInDepthSpace); if (myColorMetaData != null) { return(PointCloudWithColorParallel(mycolorPointsInDepthSpace, myRealWorldPoints, myColorMetaData, myDepthMetaData, myBodyMetaData)); } else { return(PointCloudBW(myRealWorldPoints, myDepthMetaData, myBodyMetaData)); } }
void Update() { if (_Sensor == null) { return; } if (MultiSourceManager == null) { return; } if (_MultiManager == null) { return; } ushort[] rawdata = _MultiManager.GetDepthData(); _Mapper.MapDepthFrameToCameraSpace(rawdata, cameraSpacePoints); for (int i = 0; i < cameraSpacePoints.Length; i++) { points[i] = new Vector3(cameraSpacePoints[i].X * scale, cameraSpacePoints[i].Y * scale, cameraSpacePoints[i].Z * scale); } }
void Update() { if (_Sensor == null) { return; } if (_MultiManager == null) { return; } ushort[] rawdata = _MultiManager.GetDepthData(); _Mapper.MapDepthFrameToColorSpace(rawdata, _colorSpacePoints); _Mapper.MapDepthFrameToCameraSpace(rawdata, _cameraSpacePoints); for (int i = 0; i < _cameraSpacePoints.Length; i++) { vertices[i] = new Vector3(_cameraSpacePoints[i].X, _cameraSpacePoints[i].Y, _cameraSpacePoints[i].Z); Colors[i] = Color.white; } }
private void RefreshDataCamera() { var frameDesc = _Sensor.DepthFrameSource.FrameDescription; //map the depth to the camera coordinate system mapper.MapDepthFrameToCameraSpace(_Data, _DataCamera); PopulateMeshArray(frameDesc.Width / DownsampleSize, frameDesc.Height / DownsampleSize); for (int y = 0; y < frameDesc.Height; y += DownsampleSize) { for (int x = 0; x < frameDesc.Width; x += DownsampleSize) { int indexX = x / DownsampleSize; int indexY = y / DownsampleSize; int smallIndex = (indexY * (frameDesc.Width / DownsampleSize)) + indexX; int fullIndex = (y * frameDesc.Width) + x; Vector3 pos = new Vector3(); CameraSpacePoint raw = _DataCamera[fullIndex]; bool invalidFound = false; if (!float.IsInfinity(raw.X)) { pos.x = raw.X; } else { invalidFound = true; } if (!float.IsInfinity(raw.Y)) { pos.y = raw.Y; } else { invalidFound = true; } if (!float.IsInfinity(raw.Z)) { pos.z = raw.Z; } else { invalidFound = true; } if (!invalidFound) { _Vertices[smallIndex] = pos; } } } List <int> tris = new List <int>(); for (int i = 0; i < _Triangles.Length; i += 3) { Vector3 A = _Vertices[_Triangles[i]]; Vector3 B = _Vertices[_Triangles[i + 1]]; Vector3 C = _Vertices[_Triangles[i + 2]]; Vector3 V = Vector3.Cross(A - B, A - C); //figure out the direction the triangle is pointing in Vector3 mN = V.normalized; float an = Vector3.Angle(mN, Vector3.forward); float maxDepth = maxZ / 1000; if (an > minAngle && A.z < maxDepth && B.z < maxDepth && C.z < maxDepth) { tris.Add(_Triangles[i]); tris.Add(_Triangles[i + 1]); tris.Add(_Triangles[i + 2]); } } //map all the valid vertices Dictionary <int, int> lookup = new Dictionary <int, int>(); List <Vector3> verts = new List <Vector3>(); //reindex the triangles for (int i = 0; i < tris.Count; i++) { int vertIndex = tris[i]; int newIndex = -1; if (lookup.TryGetValue(vertIndex, out newIndex)) { tris[i] = newIndex; } else { verts.Add(_Vertices[vertIndex]); lookup.Add(vertIndex, verts.Count - 1); tris[i] = verts.Count - 1; } } _Vertices = verts.ToArray(); _Triangles = tris.ToArray(); //Debug.LogFormat("Vertice count {0}, Triangle count {1} ", _Vertices.Length, tris.Count ); }
private void RefreshData(ushort[] depthData, int colorWidth, int colorHeight) { var frameDesc = _Sensor.DepthFrameSource.FrameDescription; ColorSpacePoint[] colorSpace = new ColorSpacePoint[depthData.Length]; CameraSpacePoint[] cameraSpace = new CameraSpacePoint[depthData.Length]; _Mapper.MapDepthFrameToColorSpace(depthData, colorSpace); _Mapper.MapDepthFrameToCameraSpace(depthData, cameraSpace); for (int y = 0; y < frameDesc.Height; y += _DownsampleSize) { for (int x = 0; x < frameDesc.Width; x += _DownsampleSize) { int indexX = x / _DownsampleSize; int indexY = y / _DownsampleSize; int smallIndex = (indexY * (frameDesc.Width / _DownsampleSize)) + indexX; int bigIndex = y * frameDesc.Width + x; _Vertices[smallIndex].x = -cameraSpace[bigIndex].X; _Vertices[smallIndex].y = cameraSpace[bigIndex].Y; _Vertices[smallIndex].z = cameraSpace[bigIndex].Z; // Update UV mapping with CDRP var colorSpacePoint = colorSpace[bigIndex]; _UV[smallIndex] = new Vector2(colorSpacePoint.X / colorWidth, colorSpacePoint.Y / colorHeight); } } _Mesh.vertices = _Vertices; _Mesh.uv = _UV; _Mesh.triangles = _Triangles; _Mesh.RecalculateNormals(); bool sendMesh = false; lock (readyToSendLock) { sendMesh = readyToSendMesh; } if (sendMesh) { var depthWidth = frameDesc.Width / _DownsampleSize; var depthHeight = frameDesc.Height / _DownsampleSize; var depthCount = depthWidth * depthHeight; // Breite und Höhe Tiefendaten schicken SendDataToClient(BitConverter.GetBytes(depthWidth)); SendDataToClient(BitConverter.GetBytes(depthHeight)); // Punktdaten und UV-Daten sammeln und als einzelne Nachricht schicken // 5 Floats (X, Y, Z, U, V) int byteCount = 5 * sizeof(float) * depthCount; byte[] dataToSend = new byte[byteCount]; for (int i = 0; i < depthCount; i++) { Buffer.BlockCopy(BitConverter.GetBytes(_Vertices[i].x), 0, dataToSend, i * 5 * sizeof(float), sizeof(float)); Buffer.BlockCopy(BitConverter.GetBytes(_Vertices[i].y), 0, dataToSend, (i * 5 + 1) * sizeof(float), sizeof(float)); Buffer.BlockCopy(BitConverter.GetBytes(_Vertices[i].z), 0, dataToSend, (i * 5 + 2) * sizeof(float), sizeof(float)); Buffer.BlockCopy(BitConverter.GetBytes(_UV[i].x), 0, dataToSend, (i * 5 + 3) * sizeof(float), sizeof(float)); Buffer.BlockCopy(BitConverter.GetBytes(_UV[i].y), 0, dataToSend, (i * 5 + 4) * sizeof(float), sizeof(float)); } SendDataToClient(dataToSend.ToArray()); // Farbdaten mittels JPG komprimieren und als einzelne Nachricht schicken byte[] colorDataCopy = new byte[colorData.Length]; byte[] jpgData; Buffer.BlockCopy(colorData, 0, colorDataCopy, 0, colorData.Length); // Der nachfolgende Code zur JPG-Kompression vertauscht Rot- und Blaukanal, deswegen tauschen wir ihn hier auch schonmal for (int y = 0; y < colorHeight; y++) { for (int x = 0; x < colorWidth; x++) { int index = y * colorWidth * 4 + x * 4; // Vertausche erstes Byte (Rot) und drittes Byte (Blau) byte temp = colorDataCopy[index]; colorDataCopy[index] = colorDataCopy[index + 2]; colorDataCopy[index + 2] = temp; } } try { // Quelle: https://stackoverflow.com/questions/11730373/byte-array-to-bitmap-image Bitmap bitmap = new Bitmap(colorWidth, colorHeight, System.Drawing.Imaging.PixelFormat.Format32bppRgb); BitmapData bmData = bitmap.LockBits(new Rectangle(0, 0, colorWidth, colorHeight), ImageLockMode.ReadWrite, bitmap.PixelFormat); Marshal.Copy(colorDataCopy, 0, bmData.Scan0, colorWidth * colorHeight * 4); bitmap.UnlockBits(bmData); using (var ms = new MemoryStream()) { bitmap.Save(ms, ImageFormat.Jpeg); jpgData = ms.ToArray(); textInGui.text = "JPG length: " + jpgData.Length; // Länge der Daten schicken SendDataToClient(BitConverter.GetBytes(jpgData.Length)); SendDataToClient(jpgData); } lock (readyToSendLock) { readyToSendMesh = false; } } catch (Exception e) { textInGui.text = e.Message; } } }
private void MappingSpace() { _mapper.MapDepthFrameToColorSpace(_depthData, _colorSpacePoints); _mapper.MapDepthFrameToCameraSpace(_depthData, _cameraSpacePoints); }
/* Based on Kinect Coordinate Mapping Basics * Modified by John Densoyers-Stewart * 2018-03-28 */ void Reader_MultiSourceFrameArrived(object sender, MultiSourceFrameArrivedEventArgs e) { particles.Clear(); if (MultiSourceManager == null) { return; } if (_MultiManager == null) { return; } int bodyCount = _Sensor.BodyFrameSource.BodyCount; depthFrameData = _MultiManager.GetDepthData(); bodyIndexData = _MultiManager.GetBodyIndexData(); _Mapper.MapDepthFrameToCameraSpace(depthFrameData, cameraSpacePoints); _Mapper.MapDepthFrameToColorSpace(depthFrameData, colorSpacePoints); if (createParticles) { for (int x = 0; x < depthWidth; x += downsample) { for (int y = 0; y < depthHeight; y += downsample) { int i = x + (depthWidth * y); CameraSpacePoint p = cameraSpacePoints[i]; if (!float.IsNegativeInfinity(p.X) && !float.IsNegativeInfinity(p.Y) && !float.IsNegativeInfinity(p.Z)) { if (bodiesOnly) { if (bodyIndexData[i] < bodyCount) { //need to combine this with the other color stuff below to make it work? /* * ColorSpacePoint colorPoint = colorSpacePoints[i]; * * byte r = 0; * byte g = 0; * byte b = 0; * byte a = 0; * * int colorX = (int)System.Math.Floor(colorPoint.X + 0.5); * int colorY = (int)System.Math.Floor(colorPoint.Y + 0.5); * * if ((colorX >= 0) && (colorX < colorWidth) && (colorY >= 0) && (colorY < colorHeight)) * { * int colorIndex = ((colorY * colorWidth) + colorX) * bytesPerPixel; * b = colorFrameData[colorIndex++]; * g = colorFrameData[colorIndex++]; * r = colorFrameData[colorIndex++]; * a = colorFrameData[colorIndex++]; * }*/ Vector3 particlePos = GetVector3FromCameraSpacePoint(p); if (Vector3.SqrMagnitude(particlePos - transform.InverseTransformPoint(vrHeadset.position)) > headRadiusSquare) { //particleArray[i].position = particlePos; //particleArray[i].startColor = color; //particleArray[i].startSize = size; //particleArray[i].startLifetime = 1.0f; ParticleSystem.Particle particle = new ParticleSystem.Particle(); particle.position = particlePos; particle.startColor = color; // new Color32(r,g,b,a); particle.startSize = size; particles.Add(particle); } } } else if (noBodies) { if (bodyIndexData[i] > bodyCount) { Vector3 particlePos = GetVector3FromCameraSpacePoint(p); if (Vector3.SqrMagnitude(particlePos - transform.InverseTransformPoint(vrHeadset.position)) > headRadiusSquare) { //particleArray[i].position = particlePos; //particleArray[i].startColor = color; //particleArray[i].startSize = size; ParticleSystem.Particle particle = new ParticleSystem.Particle(); particle.position = particlePos; particle.startColor = color; particle.startSize = size; particles.Add(particle); } } } else { Vector3 particlePos = GetVector3FromCameraSpacePoint(p); if (Vector3.SqrMagnitude(particlePos - transform.InverseTransformPoint(vrHeadset.position)) > headRadiusSquare) { //particleArray[i].position = particlePos; //Debug.Log(particleArray[i].position); //particleArray[i].startColor = color; //particleArray[i].startSize = size; ParticleSystem.Particle particle = new ParticleSystem.Particle(); particle.position = particlePos; particle.startColor = color; particle.startSize = size; particles.Add(particle); } } } } /* * float colorMappedToDepthX = colorSpacePoints[colorIndex].X; * float colorMappedToDepthY = colorSpacePoints[colorIndex].Y; * * CameraSpacePoint p = cameraSpacePoints[colorIndex]; * * if (!float.IsNegativeInfinity(colorMappedToDepthX) && !float.IsNegativeInfinity(colorMappedToDepthY)) * { * int depthX = (int)(colorMappedToDepthX + 0.5f); * int depthY = (int)(colorMappedToDepthY + 0.5f); * * // If the point is not valid, there is no body index there. * if ((depthX >= 0) && (depthX < depthWidth) && (depthY >= 0) && (depthY < depthHeight)) * { * int depthIndex = (depthY * depthWidth) + depthX; * * //if (bodyIndexData[depthIndex] < bodyCount) * //{ * if (!float.IsNegativeInfinity(p.X) && !float.IsNegativeInfinity(p.Y) && !float.IsNegativeInfinity(p.Z)) * { * ParticleSystem.Particle particle = new ParticleSystem.Particle(); * particle.position = new Vector3(-p.X, p.Y, p.Z) * scale; * particle.startColor = color; * particle.startSize = size; * particles.Add(particle); * //} * } * } * }*/ } //Debug.Log(_particleSystem.particleCount); //particleSystemTimer = Time.time; //_particleSystem.SetParticles(particleArray, particleArray.Length); if (particles.Count > 0) { //_particleSystem.SetParticles(particleArray, particleArray.Length); _particleSystem.SetParticles(particles.ToArray(), particles.Count); particleSystemTimer = Time.time; } } /*if (createMesh) * { * KinectMesh.GetComponent<Renderer>().material.mainTexture = _MultiManager.GetColorTexture(); * * RefreshData(_MultiManager.GetDepthData(), _MultiManager.ColorWidth, _MultiManager.ColorHeight); * }*/ }
private void calculateScanFromDepth(ushort[] depthArray) { CoordinateMapper mapper = this.kinect.CoordinateMapper; //CameraIntrinsics intrinsics = mapper.GetDepthCameraIntrinsics(); CameraSpacePoint[] cameraSpace = new CameraSpacePoint[512 * 424]; mapper.MapDepthFrameToCameraSpace(depthArray, cameraSpace); // Console.WriteLine(cameraSpace[512 * 250].X); int i = 0, j = 0; float min = float.MaxValue, max = float.MinValue; int min_index = 0; int max_index = 0; // float yaw_angle = 0; float y = 0; float floor_offset = .425f; for (i = 0; i < 512; i++) { min = float.MaxValue; max = float.MinValue; min_index = 0; max_index = 0; // yaw_angle = 125.0f - (70f / 512f) * i; for (j = i; j < 512 * 423 + i; j += 512) { // float dist =(float) Math.Sqrt(Math.Pow(cameraSpace[j].Y, 2) + Math.Pow(cameraSpace[j].X, 2) + Math.Pow(cameraSpace[j].Z, 2)); float dist = cameraSpace[j].Z; if (dist < min && depthArray[j] != 0u) { //min = depthArray[j]; // min_angle = 30f - j/512 * (60f / 424f); // y = depthArray[j] * (float)Math.Sin(min_angle * Math.PI / 180) + floor_offset; y = cameraSpace[j].Y + floor_offset; if (y > 0.050 && y < 2.400) { min = cameraSpace[j].Z; min_index = j; } } if (dist > max) { // max = depthArray[j]; // max_angle = 30f - j/512 * (60f / 424f); y = cameraSpace[j].Y + floor_offset;// depthArray[j] * (float)Math.Sin(max_angle * Math.PI / 180) + floor_offset; if (y > .050 && y < 2.000) { max = cameraSpace[j].Z; max_index = j; } } } scan2DArray[6 * i] = min; scan2DArray[6 * i + 1] = cameraSpace[min_index].X; scan2DArray[6 * i + 2] = cameraSpace[min_index].Y; scan2DArray[6 * i + 3] = max; scan2DArray[6 * i + 4] = cameraSpace[max_index].X; scan2DArray[6 * i + 5] = cameraSpace[max_index].Y; } cameraSpace = new CameraSpacePoint[1920 * 1080]; mapper.MapColorFrameToCameraSpace(depthArray, cameraSpace); CameraSpacePoint p = cameraSpace[target_y * 1920 + target_x]; Debug.Print("Cone (x,y,z) is (" + p.X.ToString() + ", " + p.Y.ToString() + ", " + p.Z.ToString() + ")\n"); scan2DArray[6 * 512] = p.X; scan2DArray[6 * 512 + 1] = p.Y; scan2DArray[6 * 512 + 2] = p.Z; target_y = 0; target_z = 0; target_x = 0; }
private void RefreshData(ushort[] depthData, byte[] colorData) { Color32 emptyColor = new Color32(0, 0, 0, 0); var frameDesc = _Sensor.DepthFrameSource.FrameDescription; //var imageDesc = _Sensor.ColorFrameSource.FrameDescription; var imageDesc = _Sensor.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Rgba); ColorSpacePoint[] colorSpace = new ColorSpacePoint[depthData.Length]; CameraSpacePoint[] cameraSpace = new CameraSpacePoint[depthData.Length]; _Mapper.MapDepthFrameToCameraSpace(depthData, cameraSpace); _Mapper.MapDepthFrameToColorSpace(depthData, colorSpace); int stride = (int)imageDesc.BytesPerPixel * imageDesc.Width; Bounds boundingBox = new Bounds(); boundingBox.center = transform.InverseTransformPoint(headPos.transform.position); Vector3 _h1 = boundingBox.center; Vector3 _h2 = boundingBox.center; if (handPosL) { _h1 = transform.InverseTransformPoint(handPosL.transform.position); } if (handPosR) { _h2 = transform.InverseTransformPoint(handPosR.transform.position); } Vector3 floorPos = headPos.transform.position; floorPos.y = 0.1f; if (handPosL) { boundingBox.Encapsulate(_h1); } if (handPosR) { boundingBox.Encapsulate(_h2); } boundingBox.Expand(0.9f / _DepthScale); boundingBox.Encapsulate(transform.InverseTransformPoint(floorPos)); long bigIndexRow = 0; long frameWidth = (frameDesc.Width / _DownsampleSize); Vector3 hidden = new Vector3(9999.0f, 9999.0f, 9999.0f); CameraSpacePoint p; int pointIndex = 0; uint colorsLength = imageDesc.BytesPerPixel * imageDesc.LengthInPixels - 3; //DepthSpacePoint d = new DepthSpacePoint(); for (int y = 0; y < frameDesc.Height; y += _DownsampleSize) { if (pointIndex >= MaxPoints) { break; } long bigIndex = bigIndexRow; Color32 c = new Color32(); c.a = 255; for (int x = 0; x < frameDesc.Width; x += _DownsampleSize) { if (pointIndex >= MaxPoints) { break; } //d.X = x; //d.Y = y; p = cameraSpace[bigIndex]; if (!float.IsNegativeInfinity(p.Z)) { Vector3 unityPos = new Vector3(-p.X, p.Y, p.Z); var colorSpacePoint = colorSpace[bigIndex]; if (colorSpacePoint.X > 0 && colorSpacePoint.Y > 0 && colorSpacePoint.Y < imageDesc.Height && boundingBox.Contains(unityPos) ) { _Vertices[pointIndex] = unityPos; long colorI = ((int)colorSpacePoint.X * (int)imageDesc.BytesPerPixel) + ((int)colorSpacePoint.Y * stride); if (colorI < colorsLength) { c.r = colorData[colorI + 0]; c.g = colorData[colorI + 1]; c.b = colorData[colorI + 2]; _Colors[pointIndex] = c; _Normals[pointIndex] = (getNormal(depthData, x, y, frameDesc.Width, frameDesc.Height)); } pointIndex++; } } bigIndex += _DownsampleSize; } bigIndexRow += frameDesc.Width * _DownsampleSize; } for (int i = pointIndex; i < MaxPoints; i++) { _Vertices[i] = hidden; _Colors[i] = Color.black; } _Mesh.vertices = _Vertices; _Mesh.colors = _Colors; _Mesh.normals = _Normals; //_Mesh.SetIndices(_Indices, MeshTopology.Points, 0); _Mesh.RecalculateBounds(); }