public bool IntersectsLightRay(LightRay lightRay) { var invDir = lightRay.InvDirection; var sign0 = invDir.x < 0; var sign1 = invDir.y < 0; float tMin = ((sign0 ? Max : Min).x - lightRay.Origin.x) * invDir.x; float tMax = ((sign0 ? Min : Max).x - lightRay.Origin.x) * invDir.x; float tyMin = ((sign1 ? Max : Min).y - lightRay.Origin.y) * invDir.y; float tyMax = ((sign1 ? Min : Max).y - lightRay.Origin.y) * invDir.y; if ((tMin > tyMax) || (tyMin > tMax)) { return(false); } var sign2 = invDir.z < 0; tMin = (tyMin > tMin) ? tyMin : tMin; tMax = (tyMax < tMax) ? tyMax : tMax; float tzMin = ((sign2 ? Max : Min).z - lightRay.Origin.z) * invDir.z; float tzMax = ((sign2 ? Min : Max).z - lightRay.Origin.z) * invDir.z; return(!((tMin > tzMax) || (tzMin > tMax))); }
private static RayHit Trace(LightRay lightRay, MainBakeArgs bakeArgs) { RayHit bestHit = BlankHit(); for (int i = 0; i < bakeArgs.ObjectData.Count; i++) { var objectDatum = bakeArgs.ObjectData[i]; if (!objectDatum.Bounds.IntersectsLightRay(lightRay)) { continue; } for (int j = 0; j < objectDatum.IndicesCount; j += 3) { int subIndex0 = j + objectDatum.IndicesOffset; // Going from an object's Local-Space and into World-Space requires matrix-multiplication. For this to // work, we need to Vector4s with a 'w' component that contains a value of 1.0. This value of 1.0 // allows the matrix multiplication to perfrom a transpose that will carry over into the result. It's // one of the reasons for using "w = 1.0" for positions and "w = 0.0" for directions. // -FCT Vector3 v0 = objectDatum.LocalToWorldMatrix * bakeArgs.Vertices[bakeArgs.Indices[subIndex0] + objectDatum.VerticesOffset].Position(); Vector3 v1 = objectDatum.LocalToWorldMatrix * bakeArgs.Vertices[bakeArgs.Indices[subIndex0 + 1] + objectDatum.VerticesOffset].Position(); Vector3 v2 = objectDatum.LocalToWorldMatrix * bakeArgs.Vertices[bakeArgs.Indices[subIndex0 + 2] + objectDatum.VerticesOffset].Position(); float s = float.MaxValue; Vector3 n = new Vector3(0.0f, 0.0f, 0.0f); bool theresAHit = TriangleIntersect(lightRay, v0, v1, v2, bakeArgs.LightSourcePosition, out s, out n); if (theresAHit) { if (s < bestHit.Distance) { bestHit.Distance = s; bestHit.Position = lightRay.Origin + (s * lightRay.Direction); bestHit.Normal = n; bestHit.HasAHit = true; } } } // End loop that checks all triangles for current object } // End loop that checks all objects if (bestHit.HasAHit) { bestHit.Normal = bestHit.Normal.normalized; } return(bestHit); }
private static LightRay CreateInitialLightRay(Vector2 uv, float halfSize, MainBakeArgs bakeArgs) { var intersectionPointOffset = ((uv.x * halfSize) * bakeArgs.LightSourceRightward) + ((uv.y * halfSize) * bakeArgs.LightSourceUpward) + (s_shadowFocusDistance * bakeArgs.LightSourceForward); LightRay lightRay = new LightRay() { Color = new Vector3(1.0f, 1.0f, 1.0f), Origin = bakeArgs.LightSourcePosition, Direction = intersectionPointOffset.normalized }; return(lightRay); }
private static bool TriangleIntersect(LightRay lightRay, Vector3 v0, Vector3 v1, Vector3 v2, Vector3 lightSourcePos, out float s, out Vector3 n) { s = float.MaxValue; Vector3 edge0 = v1 - v0; Vector3 edge1 = v2 - v0; // Unity uses clockwise vert winding for a triangle's forward direction, and it uses a // Left-Handed-Space which is something that must be kept in mind when doing cross products. // * Now, place vert0 at the bottom-left, vert1 at the top-right, and vert2 at // the bottom-right. // * Place your left-hand at vert0. // * Point your index-finger (of your left-hand) to vert1. // * Point your middle-finger (of your left-hand) to vert2. // * Stick our your thumb. It should be pointing at your face. // You just did a rough estimate of the directions in a left-handed cross product. So, your // index-finger was edge0, which goes from vert0's position to vert1's position. Your middle-finger // was edge1, which goes from vert0's position to vert2's position. // This is the normal vector of the triangle that's created by our verts. n = Vector3.Cross(edge0, edge1); //n = Vector3.Normalize(n); // We want the light ray to head towards the triangle surface, but we also want it to hit the front // side of the serface. If the dot-product of the surface-normal 'n' and the light's-direction are // close to zero, then the ray is traveling parallel to the surface and will never get closer or // further away. This means, that unless the ray starts on the surface, it will never touch the // surface. So, we'll just take out anything that ranges in [-EPSILON, +EPSILON]. // // Second, if the light-ray is heading to the surface from behind, then this dot product will be a // positive value. For now, we will be using closed surfaces. And, when dealing with closed surfaces, // the light-ray will never hit the back of a surface because. Because of this, we'll take out any dot // product in the range of [0, +1]. Please keep in mind that all dot-products will be in the range of // [-1.0, +1.0] because we are using normalized vectors. // // So, any dot-product that runs in the range of [-EPSILON, +1.0] will not result in a hit for our // current settings. float nDotLightRayDir = Vector3.Dot(n, lightRay.Direction); if (nDotLightRayDir > -float.Epsilon) { return(false); } // This is the LightRay's current starting point. Vector3 p0 = lightRay.Origin; // If the point of origin of our light-ray is one point in a second triangle, and the point at which it // hits the plane created by our verts is another point in the triangle. Then the point on the plane // that is nearest to the point of origin is the third point of our second triangle. 's' would be the // length of the hypotenuse of our second triangle. The theta for our second triangle would be at the // corner marked by our point of origin. We can find theta with the dot product of our normal and the // light ray direction. We can find the length of the adjacent side with the dot product of the normal // and the difference between the point of origin and any point on the plain. By combining the length // of our adjacent side and theta, we can find the length of the hypotenuse. // // This is the scale factor by which we multiple our lightRay's direction in order to find the offset // for the intersection point. This would be the size of the hypotenuse of that other triangle we were // talking about. s = Vector3.Dot(-n, p0 - v0) / nDotLightRayDir; // This is our intersection point with the plane. "s * lightRay.Direction" was the hypotenuse. Vector3 intersectionPoint = (s * lightRay.Direction) + p0; /// /// Check to see if we are within the actionable range. If we are outside of the actionable range, then /// it doesn't matter if we intersected a triangle, otherwise, what was the point of giving the user the /// option of selecting an actionable range. /// Vector3 deltaFromLightSource = intersectionPoint - lightSourcePos; float distFromLight2 = Vector3.Dot(deltaFromLightSource, deltaFromLightSource); if (distFromLight2 < (s_innerRadius * s_innerRadius)) { return(false); // This intersection point was within the exclusion zone. } if (distFromLight2 > (s_outerRadius * s_outerRadius)) { return(false); // This intersection point was outside of the inclusion zone. } /// /// We now know where the light will hit the triangle's plane, but we don't know if it will hit the /// triangle itself. That's the next thing we need to check. /// -FCT /// // The cross product of edge0 and n, gives me a vector that points away from the direction that's // inside the triangle from edge0. Since this vector is in the wrong direction, than any point away // from a point on edge0 (like vert0), should create a non-positive value when we take its dot product // with vector 'c'. And, if that value isn't non-positive (0 is OK), then it's outside of the triangle. // -FCT Vector3 c = Vector3.Cross(edge0, n); Vector3 delta = intersectionPoint - v0; if (Vector3.Dot(delta, c) > 0) { return(false); } // // Before, the cross product gave us a vector that was pointing away from the insdie of the triangle. // But, before, we were using an edge that was following the clock-wise direction that creates the // forward (front-facing) direction of our triangle. 'edge1' doesn't follow the clock-wise direction, // and because of this, it gave us a vector that points to the inside of the triangle. Since our // directional vector now points to the inside of the triangle, now it's the negative values created by // our dot-product that indicate a point outside the triangle. // -FCT c = Vector3.Cross(edge1, n); if (Vector3.Dot(delta, c) < 0) { return(false); } // This one follows a clock-wise direction, so it'll be like edge0. c = Vector3.Cross((v2 - v1), n); delta = intersectionPoint - v1; // We want our reference point to be on the edge we used to create 'c'. if (Vector3.Dot(delta, c) > 0) { return(false); } return(true); }
private void MainBakeThread_DoWork(object sender, DoWorkEventArgs e) { var args = e.Argument as MainBakeArgs; Vector2 pixelOffset = 0.5f * Vector2.one; float halfSize = s_shadowFocusDistance * Mathf.Tan(args.LightSourceTheata); float colorAdjustment = 1.0f / s_sampleCount; RandomS random = new RandomS(0); // Create an N by N array of Colors; Vector3[][] result = new Vector3[args.ImageResolution][]; for (int i = 0; i < args.ImageResolution; i++) { result[i] = new Vector3[args.ImageResolution]; } ParallelOptions threadingOptionsOuterLoop = new ParallelOptions() { MaxDegreeOfParallelism = 3 }; ParallelOptions theadingOptionsInnerLoop = new ParallelOptions() { MaxDegreeOfParallelism = 3 }; // The way I leared it, you want the outer most loop to be the threaded loop in order to decrease the // performance loss from starting and stopping theads. Yet, using the middle loop will already provide // us with quite a bit of work per thread and splitting it up by sample isn't a bad way to tack our // progress. // -FCT for (int i = 0; i < s_sampleCount; i++) { s_bakeProgress = i; // Note: Normally, you don't do something like replace "Vector2.one" with "new Vector2(1.0f, 1.0f)" // until after you do performance testing and look at the metrics. Yet, I know for a fact that // "VectorN.one" has less performance than just doing "new VectorN(1.0f,..., 1.0f)" because it // it does an additional call stack alloction than calling the constructor directly. In the other // code that I've writen for this project, I have been using VectorN.one, but inside the loop // code that gets called hundreds of thousands of times and takes forever to run, I figured I // might as well do that know. // -FCT Parallel.For(0, args.ImageResolution, threadingOptionsOuterLoop, pixY => { Parallel.For(0, args.ImageResolution, theadingOptionsInnerLoop, pixX => { /// /// Convert pixel coordinates into UV coordinates. /// Vector2 uv = new Vector2(pixX, pixY) + pixelOffset; uv /= args.ImageResolution; uv *= 2.0f; uv -= new Vector2(1.0f, 1.0f); /// /// Create initial lightRay based on UV coordinates and shadow plane intersection. /// LightRay lightRay = CreateInitialLightRay(uv, halfSize, args); /// /// Tracing Code /// for (int j = 0; j < s_maxBounceCount; j++) { RayHit hit = Trace(lightRay, args); if (hit.HasAHit) { lightRay.Color *= 0.5f; lightRay.Direction = hit.Normal; lightRay.Origin = hit.Position + (0.0005f * hit.Normal); } else { j = s_maxBounceCount; } } /// /// Convert our lightRay into a point on the shadow plane. /// Vector3 N = -args.LightSourceForward; float dot_N_LightRayDir = Vector3.Dot(N, lightRay.Direction); if (dot_N_LightRayDir > -float.Epsilon) { return; } Vector3 v0 = (s_shadowFocusDistance * args.LightSourceForward) + args.LightSourcePosition; Vector3 p0 = args.LightSourcePosition; float s_i = Vector3.Dot(-N, p0 - v0) / dot_N_LightRayDir; Vector3 lightPoint = (s_i * lightRay.Direction) + p0; /// /// Convert out lightPoint from a World-Space coord into a uv-coord /// Vector3 planeCoord = lightPoint - v0; float uOffset = Vector3.Dot(planeCoord, args.LightSourceRightward); float vOffset = Vector3.Dot(planeCoord, args.LightSourceUpward); Vector2 uvPrime = (new Vector2(uOffset, vOffset)) / halfSize; if (uvPrime.x < -1.0f || +1.0f <= uvPrime.x) { return; } if (uvPrime.y < -1.0f || +1.0f <= uvPrime.y) { return; } /// /// Convert our new uv coord (uvPrime) into a pixel index to add to the correct pixel. /// Vector2 pixPrime = uvPrime + new Vector2(1.0f, 1.0f); pixPrime *= (args.ImageResolution / 2.0f); result[(int)pixPrime.y][(int)pixPrime.x] += lightRay.Color; }); // End PixX Loop }); // End PixY Loop // I tried using Unity's Random, but it turns out that even Unity's Random is locked to the main // thread. Maybe it would work if I were to use the job system to thread this, but after the failure of // dispatching a Compute Shader from a single job, I'm not sure it would be worth the effort. // // Select the next pixel sample offset and make sure that it ranges [0.0, 1.0). While System.Random's // method for generating random doubles claims to be in the range that I want, I am also down-casting // to a float. I don't know if it's possible for a double to round up to a one when down-casting, but // I'm not taking that chance. So, if we get a one in the pixel offset, we'll try again. // -FCT do { pixelOffset.x = (float)random.NextDouble(); } while (!(pixelOffset.x < 1.0f)); do { pixelOffset.y = (float)random.NextDouble(); } while (!(pixelOffset.y < 1.0f)); } var finalResult = new Color[args.ImageResolution * args.ImageResolution]; Parallel.For(0, args.ImageResolution, y => { int rowOffset = y * args.ImageResolution; for (int x = 0; x < args.ImageResolution; x++) { var colorValues = colorAdjustment * result[y][x]; finalResult[rowOffset + x] = new Color(colorValues.x, colorValues.y, colorValues.z, 1.0f); } result[y] = null; }); args.Result = finalResult; args.Complete = true; }