// Now we define methods that give our pipeline several different // schedules. public void ScheduleForCpu() { // Compute the look-up-table ahead of time. Lut.ComputeRoot(); // Compute color channels innermost. Promise that there will // be three of them and unroll across them. Curved.Reorder(C, X, Y) .Bound(C, 0, 3) .Unroll(C); // Look-up-tables don't vectorize well, so just parallelize // curved in slices of 16 scanlines. var yo = new HSVar("yo"); var yi = new HSVar("yi"); Curved.Split(Y, yo, yi, 16) .Parallel(yo); // Compute sharpen as needed per scanline of curved. Sharpen.ComputeAt(Curved, yi); // Vectorize the sharpen. It's 16-bit so we'll vectorize it 8-wide. Sharpen.Vectorize(X, 8); // Compute the padded input as needed per scanline of curved, // reusing previous values computed within the same strip of // 16 scanlines. Padded.StoreAt(Curved, yo) .ComputeAt(Curved, yi); // Also vectorize the padding. It's 8-bit, so we'll vectorize // 16-wide. Padded.Vectorize(X, 16); // JIT-compile the pipeline for the CPU. Curved.CompileJit(); }
public static int Main(string[] args) { // First we'll declare some Vars to use below. var x = new HSVar("x"); var y = new HSVar("y"); // Let's examine various scheduling options for a simple two stage // pipeline. We'll start with the default schedule: { var producer = new HSFunc("producer_default"); var consumer = new HSFunc("consumer_default"); // The first stage will be some simple pointwise math similar // to our familiar gradient function. The value at position x, // y is the sin of product of x and y. producer[x, y] = HSMath.Sin(x * y); // Now we'll add a second stage which averages together multiple // points in the first stage. consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // We'll turn on tracing for both functions. consumer.TraceStores(); producer.TraceStores(); // And evaluate it over a 4x4 box. Console.WriteLine("\nEvaluating producer-consumer pipeline with default schedule"); consumer.Realize <float>(4, 4); // There were no messages about computing values of the // producer. This is because the default schedule fully // inlines 'producer' into 'consumer'. It is as if we had // written the following code instead: // consumer(x, y) = (sin(x * y) + // sin(x * (y + 1)) + // sin((x + 1) * y) + // sin((x + 1) * (y + 1))/4); // All calls to 'producer' have been replaced with the body of // 'producer', with the arguments substituted in for the // variables. // The equivalent C code is: var result = new float[4, 4]; for (int yy = 0; yy < 4; yy++) { for (int xx = 0; xx < 4; xx++) { result[yy, xx] = (float)((Math.Sin(xx * yy) + Math.Sin(xx * (yy + 1)) + Math.Sin((xx + 1) * yy) + Math.Sin((xx + 1) * (yy + 1))) / 4); } } Console.WriteLine(); // If we look at the loop nest, the producer doesn't appear // at all. It has been inlined into the consumer. Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); } // Next we'll examine the next simplest option - computing all // values required in the producer before computing any of the // consumer. We call this schedule "root". { // Start with the same function definitions: var producer = new HSFunc("producer_root"); var consumer = new HSFunc("consumer_root"); producer[x, y] = HSMath.Sin(x * y); consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // Tell Halide to evaluate all of producer before any of consumer. producer.ComputeRoot(); // Turn on tracing. consumer.TraceStores(); producer.TraceStores(); // Compile and run. Console.WriteLine("\nEvaluating producer.compute_root()"); consumer.Realize <float>(4, 4); // Reading the output we can see that: // A) There were stores to producer. // B) They all happened before any stores to consumer. // See figures/lesson_08_compute_root.gif for a visualization. // The producer is on the left and the consumer is on the // right. Stores are marked in orange and loads are marked in // blue. // Equivalent C: var result = new float[4, 4]; // Allocate some temporary storage for the producer. var producer_storage = new float[5, 5]; // Compute the producer. for (int yy = 0; yy < 5; yy++) { for (int xx = 0; xx < 5; xx++) { producer_storage[yy, xx] = (float)Math.Sin(xx * yy); } } // Compute the consumer. Skip the prints this time. for (int yy = 0; yy < 4; yy++) { for (int xx = 0; xx < 4; xx++) { result[yy, xx] = (producer_storage[yy, xx] + producer_storage[yy + 1, xx] + producer_storage[yy, xx + 1] + producer_storage[yy + 1, xx + 1]) / 4; } } // Note that consumer was evaluated over a 4x4 box, so Halide // automatically inferred that producer was needed over a 5x5 // box. This is the same 'bounds inference' logic we saw in // the previous lesson, where it was used to detect and avoid // out-of-bounds reads from an input image. // If we print the loop nest, we'll see something very // similar to the C above. Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); } // Let's compare the two approaches above from a performance // perspective. // Full inlining (the default schedule): // - Temporary memory allocated: 0 // - Loads: 0 // - Stores: 16 // - Calls to sin: 64 // producer.compute_root(): // - Temporary memory allocated: 25 floats // - Loads: 64 // - Stores: 41 // - Calls to sin: 25 // There's a trade-off here. Full inlining used minimal temporary // memory and memory bandwidth, but did a whole bunch of redundant // expensive math (calling sin). It evaluated most points in // 'producer' four times. The second schedule, // producer.compute_root(), did the mimimum number of calls to // sin, but used more temporary memory and more memory bandwidth. // In any given situation the correct choice can be difficult to // make. If you're memory-bandwidth limited, or don't have much // memory (e.g. because you're running on an old cell-phone), then // it can make sense to do redundant math. On the other hand, sin // is expensive, so if you're compute-limited then fewer calls to // sin will make your program faster. Adding vectorization or // multi-core parallelism tilts the scales in favor of doing // redundant work, because firing up multiple cpu cores increases // the amount of math you can do per second, but doesn't increase // your system memory bandwidth or capacity. // We can make choices in between full inlining and // compute_root. Next we'll alternate between computing the // producer and consumer on a per-scanline basis: { // Start with the same function definitions: var producer = new HSFunc("producer_y"); var consumer = new HSFunc("consumer_y"); producer[x, y] = HSMath.Sin(x * y); consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // Tell Halide to evaluate producer as needed per y coordinate // of the consumer: producer.ComputeAt(consumer, y); // This places the code that computes the producer just // *inside* the consumer's for loop over y, as in the // equivalent C below. // Turn on tracing. producer.TraceStores(); consumer.TraceStores(); // Compile and run. Console.WriteLine("\nEvaluating producer.ComputeAt(consumer, y)"); consumer.Realize <float>(4, 4); // See figures/lesson_08_compute_y.gif for a visualization. // Reading the log or looking at the figure you should see // that producer and consumer alternate on a per-scanline // basis. Let's look at the equivalent C: var result = new float[4, 4]; // There's an outer loop over scanlines of consumer: for (int yy = 0; yy < 4; yy++) { // Allocate space and compute enough of the producer to // satisfy this single scanline of the consumer. This // means a 5x2 box of the producer. var producer_storage = new float[2, 5]; for (int py = yy; py < yy + 2; py++) { for (int px = 0; px < 5; px++) { producer_storage[py - yy, px] = (float)Math.Sin(px * py); } } // Compute a scanline of the consumer. for (int xx = 0; xx < 4; xx++) { result[yy, xx] = (producer_storage[0, xx] + producer_storage[1, xx] + producer_storage[0, xx + 1] + producer_storage[1, xx + 1]) / 4; } } // Again, if we print the loop nest, we'll see something very // similar to the C above. Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); // The performance characteristics of this strategy are in // between inlining and compute root. We still allocate some // temporary memory, but less that compute_root, and with // better locality (we load from it soon after writing to it, // so for larger images, values should still be in cache). We // still do some redundant work, but less than full inlining: // producer.ComputeAt(consumer, y): // - Temporary memory allocated: 10 floats // - Loads: 64 // - Stores: 56 // - Calls to sin: 40 } // We could also say producer.ComputeAt(consumer, x), but this // would be very similar to full inlining (the default // schedule). Instead let's distinguish between the loop level at // which we allocate storage for producer, and the loop level at // which we actually compute it. This unlocks a few optimizations. { var producer = new HSFunc("producer_root_y"); var consumer = new HSFunc("consumer_root_y"); producer[x, y] = HSMath.Sin(x * y); consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // Tell Halide to make a buffer to store all of producer at // the outermost level: producer.StoreRoot(); // ... but compute it as needed per y coordinate of the // consumer. producer.ComputeAt(consumer, y); producer.TraceStores(); consumer.TraceStores(); Console.WriteLine("\nEvaluating producer.store_root().ComputeAt(consumer, y)"); consumer.Realize <float>(4, 4); // See figures/lesson_08_store_root_compute_y.gif for a // visualization. // Reading the log or looking at the figure you should see // that producer and consumer again alternate on a // per-scanline basis. It computes a 5x2 box of the producer // to satisfy the first scanline of the consumer, but after // that it only computes a 5x1 box of the output for each new // scanline of the consumer! // // Halide has detected that for all scanlines except for the // first, it can reuse the values already sitting in the // buffer we've allocated for producer. Let's look at the // equivalent C: var result = new float[4, 4]; { // producer.store_root() implies that storage goes here: var producer_storage = new float[5, 5]; // There's an outer loop over scanlines of consumer: for (int yy = 0; yy < 4; yy++) { // Compute enough of the producer to satisfy this scanline // of the consumer. for (int py = yy; py < yy + 2; py++) { // Skip over rows of producer that we've already // computed in a previous iteration. if (yy > 0 && py == yy) { continue; } for (int px = 0; px < 5; px++) { producer_storage[py, px] = (float)Math.Sin(px * py); } } // Compute a scanline of the consumer. for (int xx = 0; xx < 4; xx++) { result[yy, xx] = (producer_storage[yy, xx] + producer_storage[yy + 1, xx] + producer_storage[yy, xx + 1] + producer_storage[yy + 1, xx + 1]) / 4; } } } Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); // The performance characteristics of this strategy are pretty // good! The numbers are similar compute_root, except locality // is better. We're doing the minimum number of sin calls, // and we load values soon after they are stored, so we're // probably making good use of the cache: // producer.store_root().ComputeAt(consumer, y): // - Temporary memory allocated: 10 floats // - Loads: 64 // - Stores: 39 // - Calls to sin: 25 // Note that my claimed amount of memory allocated doesn't // match the reference C code. Halide is performing one more // optimization under the hood. It folds the storage for the // producer down into a circular buffer of two // scanlines. Equivalent C would actually look like this: { // Actually store 2 scanlines instead of 5 var producer_storage = new float[2, 5]; for (int yy = 0; yy < 4; yy++) { for (int py = yy; py < yy + 2; py++) { if (yy > 0 && py == yy) { continue; } for (int px = 0; px < 5; px++) { // Stores to producer_storage have their y coordinate bit-masked. producer_storage[py & 1, px] = (float)Math.Sin(px * py); } } // Compute a scanline of the consumer. for (int xx = 0; xx < 4; xx++) { // Loads from producer_storage have their y coordinate bit-masked. result[yy, xx] = (producer_storage[yy & 1, xx] + producer_storage[(yy + 1) & 1, xx] + producer_storage[yy & 1, xx + 1] + producer_storage[(yy + 1) & 1, xx + 1]) / 4; } } } } // We can do even better, by leaving the storage outermost, but // moving the computation into the innermost loop: { var producer = new HSFunc("producer_root_x"); var consumer = new HSFunc("consumer_root_x"); producer[x, y] = HSMath.Sin(x * y); consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // Store outermost, compute innermost. producer.StoreRoot().ComputeAt(consumer, x); producer.TraceStores(); consumer.TraceStores(); Console.WriteLine("\nEvaluating producer.store_root().ComputeAt(consumer, x)"); consumer.Realize <float>(4, 4); // See figures/lesson_08_store_root_compute_x.gif for a // visualization. // You should see that producer and consumer now alternate on // a per-pixel basis. Here's the equivalent C: var result = new float[4, 4]; // producer.store_root() implies that storage goes here, but // we can fold it down into a circular buffer of two // scanlines: var producer_storage = new float[2, 5]; // For every pixel of the consumer: for (int yy = 0; yy < 4; yy++) { for (int xx = 0; xx < 4; xx++) { // Compute enough of the producer to satisfy this // pixel of the consumer, but skip values that we've // already computed: if (yy == 0 && xx == 0) { producer_storage[yy & 1, xx] = (float)Math.Sin(xx * yy); } if (yy == 0) { producer_storage[yy & 1, xx + 1] = (float)Math.Sin((xx + 1) * yy); } if (xx == 0) { producer_storage[(yy + 1) & 1, xx] = (float)Math.Sin(xx * (yy + 1)); } producer_storage[(yy + 1) & 1, xx + 1] = (float)Math.Sin((xx + 1) * (yy + 1)); result[yy, xx] = (producer_storage[yy & 1, xx] + producer_storage[(yy + 1) & 1, xx] + producer_storage[yy & 1, xx + 1] + producer_storage[(yy + 1) & 1, xx + 1]) / 4; } } Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); // The performance characteristics of this strategy are the // best so far. One of the four values of the producer we need // is probably still sitting in a register, so I won't count // it as a load: // producer.store_root().ComputeAt(consumer, x): // - Temporary memory allocated: 10 floats // - Loads: 48 // - Stores: 56 // - Calls to sin: 40 } // So what's the catch? Why not always do // producer.store_root().ComputeAt(consumer, x) for this type of // code? // // The answer is parallelism. In both of the previous two // strategies we've assumed that values computed on previous // iterations are lying around for us to reuse. This assumes that // previous values of x or y happened earlier in time and have // finished. This is not true if you parallelize or vectorize // either loop. Darn. If you parallelize, Halide won't inject the // optimizations that skip work already done if there's a parallel // loop in between the store_at level and the ComputeAt level, // and won't fold the storage down into a circular buffer either, // which makes our store_root pointless. // We're running out of options. We can make new ones by // splitting. We can store_at or ComputeAt at the natural // variables of the consumer (x and y), or we can split x or y // into new inner and outer sub-variables and then schedule with // respect to those. We'll use this to express fusion in tiles: { var producer = new HSFunc("producer_tile"); var consumer = new HSFunc("consumer_tile"); producer[x, y] = HSMath.Sin(x * y); consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // We'll compute 8x8 of the consumer, in 4x4 tiles. var x_outer = new HSVar("x_outer"); var y_outer = new HSVar("y_outer"); var x_inner = new HSVar("x_inner"); var y_inner = new HSVar("y_inner"); consumer.Tile(x, y, x_outer, y_outer, x_inner, y_inner, 4, 4); // Compute the producer per tile of the consumer producer.ComputeAt(consumer, x_outer); // Notice that I wrote my schedule starting from the end of // the pipeline (the consumer). This is because the schedule // for the producer refers to x_outer, which we introduced // when we tiled the consumer. You can write it in the other // order, but it tends to be harder to read. // Turn on tracing. producer.TraceStores(); consumer.TraceStores(); Console.WriteLine("\nEvaluating:"); Console.WriteLine("consumer.tile(x, y, x_outer, y_outer, x_inner, y_inner, 4, 4);"); Console.WriteLine("producer.ComputeAt(consumer, x_outer);"); consumer.Realize <float>(8, 8); // See figures/lesson_08_tile.gif for a visualization. // The producer and consumer now alternate on a per-tile // basis. Here's the equivalent C: var result = new float[8, 8]; // For every tile of the consumer: for (int yy_outer = 0; yy_outer < 2; yy_outer++) { for (int xx_outer = 0; xx_outer < 2; xx_outer++) { // Compute the x and y coords of the start of this tile. int x_base = xx_outer * 4; int y_base = yy_outer * 4; // Compute enough of producer to satisfy this tile. A // 4x4 tile of the consumer requires a 5x5 tile of the // producer. var producer_storage = new float[5, 5]; for (int py = y_base; py < y_base + 5; py++) { for (int px = x_base; px < x_base + 5; px++) { producer_storage[py - y_base, px - x_base] = (float)Math.Sin(px * py); } } // Compute this tile of the consumer for (int yy_inner = 0; yy_inner < 4; yy_inner++) { for (int xx_inner = 0; xx_inner < 4; xx_inner++) { int xx = x_base + xx_inner; int yy = y_base + yy_inner; result[yy, xx] = (producer_storage[yy - y_base, xx - x_base] + producer_storage[yy - y_base + 1, xx - x_base] + producer_storage[yy - y_base, xx - x_base + 1] + producer_storage[yy - y_base + 1, xx - x_base + 1]) / 4; } } } } Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); // Tiling can make sense for problems like this one with // stencils that reach outwards in x and y. Each tile can be // computed independently in parallel, and the redundant work // done by each tile isn't so bad once the tiles get large // enough. } // Let's try a mixed strategy that combines what we have done with // splitting, parallelizing, and vectorizing. This is one that // often works well in practice for large images. If you // understand this schedule, then you understand 95% of scheduling // in Halide. { var producer = new HSFunc("producer_mixed"); var consumer = new HSFunc("consumer_mixed"); producer[x, y] = HSMath.Sin(x * y); consumer[x, y] = (producer[x, y] + producer[x, y + 1] + producer[x + 1, y] + producer[x + 1, y + 1]) / 4; // Split the y coordinate of the consumer into strips of 16 scanlines: var yo = new HSVar("yo"); var yi = new HSVar("yi"); consumer.Split(y, yo, yi, 16); // Compute the strips using a thread pool and a task queue. consumer.Parallel(yo); // Vectorize across x by a factor of four. consumer.Vectorize(x, 4); // Now store the producer per-strip. This will be 17 scanlines // of the producer (16+1), but hopefully it will fold down // into a circular buffer of two scanlines: producer.StoreAt(consumer, yo); // Within each strip, compute the producer per scanline of the // consumer, skipping work done on previous scanlines. producer.ComputeAt(consumer, yi); // Also vectorize the producer (because sin is vectorizable on x86 using SSE). producer.Vectorize(x, 4); // Let's leave tracing off this time, because we're going to // evaluate over a larger image. // consumer.TraceStores(); // producer.TraceStores(); var halide_result = consumer.Realize <float>(160, 160); // See figures/lesson_08_mixed.mp4 for a visualization. // Here's the equivalent (serial) C: var c_result = new float[160, 160]; // For every strip of 16 scanlines (this loop is parallel in // the Halide version) for (int yyo = 0; yyo < 160 / 16 + 1; yyo++) { // 16 doesn't divide 160, so push the last slice upwards // to fit within [0, 159] (see lesson 05). int y_base = yyo * 16; if (y_base > 160 - 16) { y_base = 160 - 16; } // Allocate a two-scanline circular buffer for the producer var producer_storage = new float[2, 161]; // For every scanline in the strip of 16: for (int yyi = 0; yyi < 16; yyi++) { int yy = y_base + yyi; for (int py = yy; py < yy + 2; py++) { // Skip scanlines already computed *within this task* if (yyi > 0 && py == yy) { continue; } // Compute this scanline of the producer in 4-wide vectors for (int x_vec = 0; x_vec < 160 / 4 + 1; x_vec++) { int x_base = x_vec * 4; // 4 doesn't divide 161, so push the last vector left // (see lesson 05). if (x_base > 161 - 4) { x_base = 161 - 4; } // If you're on x86, Halide generates SSE code for this part: int[] xx = { x_base, x_base + 1, x_base + 2, x_base + 3 }; float[] vec = { (float)Math.Sin(xx[0] * py), (float)Math.Sin(xx[1] * py), (float)Math.Sin(xx[2] * py), (float)Math.Sin(xx[3] * py) }; producer_storage[py & 1, xx[0]] = vec[0]; producer_storage[py & 1, xx[1]] = vec[1]; producer_storage[py & 1, xx[2]] = vec[2]; producer_storage[py & 1, xx[3]] = vec[3]; } } // Now compute consumer for this scanline: for (int x_vec = 0; x_vec < 160 / 4; x_vec++) { int x_base = x_vec * 4; // Again, Halide's equivalent here uses SSE. int[] xx = { x_base, x_base + 1, x_base + 2, x_base + 3 }; float[] vec = { (producer_storage[yy & 1, xx[0]] + producer_storage[(yy + 1) & 1, xx[0]] + producer_storage[yy & 1, xx[0] + 1] + producer_storage[(yy + 1) & 1, xx[0] + 1]) / 4, (producer_storage[yy & 1, xx[1]] + producer_storage[(yy + 1) & 1, xx[1]] + producer_storage[yy & 1, xx[1] + 1] + producer_storage[(yy + 1) & 1, xx[1] + 1]) / 4, (producer_storage[yy & 1, xx[2]] + producer_storage[(yy + 1) & 1, xx[2]] + producer_storage[yy & 1, xx[2] + 1] + producer_storage[(yy + 1) & 1, xx[2] + 1]) / 4, (producer_storage[yy & 1, xx[3]] + producer_storage[(yy + 1) & 1, xx[3]] + producer_storage[yy & 1, xx[3] + 1] + producer_storage[(yy + 1) & 1, xx[3] + 1]) / 4 }; c_result[yy, xx[0]] = vec[0]; c_result[yy, xx[1]] = vec[1]; c_result[yy, xx[2]] = vec[2]; c_result[yy, xx[3]] = vec[3]; } } } Console.WriteLine("Pseudo-code for the schedule:"); consumer.PrintLoopNest(); Console.WriteLine(); // Look on my code, ye mighty, and despair! // Let's check the C result against the Halide result. Doing // this I found several bugs in my C implementation, which // should tell you something. for (int yy = 0; yy < 160; yy++) { for (int xx = 0; xx < 160; xx++) { float error = halide_result[xx, yy] - c_result[yy, xx]; // It's floating-point math, so we'll allow some slop: if (error < -0.001f || error > 0.001f) { Console.WriteLine("halide_result(%d, %d) = %f instead of %f", xx, yy, halide_result[xx, yy], c_result[yy, xx]); return(-1); } } } } // This stuff is hard. We ended up in a three-way trade-off // between memory bandwidth, redundant work, and // parallelism. Halide can't make the correct choice for you // automatically (sorry). Instead it tries to make it easier for // you to explore various options, without messing up your // program. In fact, Halide promises that scheduling calls like // compute_root won't change the meaning of your algorithm -- you // should get the same bits back no matter how you schedule // things. // So be empirical! Experiment with various schedules and keep a // log of performance. Form hypotheses and then try to prove // yourself wrong. Don't assume that you just need to vectorize // your code by a factor of four and run it on eight cores and // you'll get 32x faster. This almost never works. Modern systems // are complex enough that you can't predict performance reliably // without running your code. // We suggest you start by scheduling all of your non-trivial // stages compute_root, and then work from the end of the pipeline // upwards, inlining, parallelizing, and vectorizing each stage in // turn until you reach the top. // Halide is not just about vectorizing and parallelizing your // code. That's not enough to get you very far. Halide is about // giving you tools that help you quickly explore different // trade-offs between locality, redundant work, and parallelism, // without messing up the actual result you're trying to compute. Console.WriteLine("Success!"); return(0); }
// Now a schedule that uses CUDA or OpenCL. public void ScheduleForGpu() { // We make the decision about whether to use the GPU for each // Func independently. If you have one Func computed on the // CPU, and the next computed on the GPU, Halide will do the // copy-to-gpu under the hood. For this pipeline, there's no // reason to use the CPU for any of the stages. Halide will // copy the input image to the GPU the first time we run the // pipeline, and leave it there to reuse on subsequent runs. // As before, we'll compute the LUT once at the start of the // pipeline. Lut.ComputeRoot(); // Let's compute the look-up-table using the GPU in 16-wide // one-dimensional thread blocks. First we split the index // into blocks of size 16: var block = new HSVar("block"); var thread = new HSVar("thread"); Lut.Split(I, block, thread, 16); // Then we tell cuda that our Vars 'block' and 'thread' // correspond to CUDA's notions of blocks and threads, or // OpenCL's notions of thread groups and threads. Lut.GpuBlocks(block) .GpuThreads(thread); // This is a very common scheduling pattern on the GPU, so // there's a shorthand for it: // lut.gpu_tile(i, block, thread, 16); // Func::gpu_tile behaves the same as Func::tile, except that // it also specifies that the tile coordinates correspond to // GPU blocks, and the coordinates within each tile correspond // to GPU threads. // Compute color channels innermost. Promise that there will // be three of them and unroll across them. Curved.Reorder(C, X, Y) .Bound(C, 0, 3) .Unroll(C); // Compute curved in 2D 8x8 tiles using the GPU. Curved.GpuTile(X, Y, XO, YO, XI, YI, 8, 8); // This is equivalent to: // curved.tile(x, y, xo, yo, xi, yi, 8, 8) // .gpu_blocks(xo, yo) // .gpu_threads(xi, yi); // We'll leave sharpen as inlined into curved. // Compute the padded input as needed per GPU block, storing // the intermediate result in shared memory. In the schedule // above xo corresponds to GPU blocks. Padded.ComputeAt(Curved, XO); // Use the GPU threads for the x and y coordinates of the // padded input. Padded.GpuThreads(X, Y); // JIT-compile the pipeline for the GPU. CUDA, OpenCL, or // Metal are not enabled by default. We have to construct a // Target object, enable one of them, and then pass that // target object to compile_jit. Otherwise your CPU will very // slowly pretend it's a GPU, and use one thread per output // pixel. // Start with a target suitable for the machine you're running // this on. var target = HS.GetHostTarget(); // Then enable OpenCL or Metal, depending on which platform // we're on. OS X doesn't update its OpenCL drivers, so they // tend to be broken. CUDA would also be a fine choice on // machines with NVidia GPUs. if (target.OS == HSOperatingSystem.OSX) { target.SetFeature(HSFeature.Metal); } else { target.SetFeature(HSFeature.OpenCL); } // Uncomment the next line and comment out the lines above to // try CUDA instead. //target.SetFeature(HSFeature.CUDA); // If you want to see all of the OpenCL, Metal, or CUDA API // calls done by the pipeline, you can also enable the Debug // flag. This is helpful for figuring out which stages are // slow, or when CPU -> GPU copies happen. It hurts // performance though, so we'll leave it commented out. // target.set_feature(Target::Debug); Curved.CompileJit(target); }
public static int Main(string[] args) { // We're going to define and schedule our gradient function in // several different ways, and see what order pixels are computed // in. var x = new HSVar("x"); var y = new HSVar("y"); // First we observe the default ordering. { var gradient = new HSFunc("gradient"); gradient[x, y] = x + y; gradient.TraceStores(); // By default we walk along the rows and then down the // columns. This means x varies quickly, and y varies // slowly. x is the column and y is the row, so this is a // row-major traversal. Console.WriteLine("Evaluating gradient row-major"); var output = gradient.Realize <int>(4, 4); // See figures/lesson_05_row_major.gif for a visualization of // what this did. // The equivalent C is: Console.WriteLine("Equivalent C:"); for (int yy = 0; yy < 4; yy++) { for (int xx = 0; xx < 4; xx++) { Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } } Console.WriteLine("\n"); // Tracing is one useful way to understand what a schedule is // doing. You can also ask Halide to print out pseudocode // showing what loops Halide is generating: Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); // Because we're using the default ordering, it should print: // compute gradient: // for y: // for x: // gradient(...) = ... } // Reorder variables. { var gradient = new HSFunc("gradient_col_major"); gradient[x, y] = x + y; gradient.TraceStores(); // If we reorder x and y, we can walk down the columns // instead. The reorder call takes the arguments of the func, // and sets a new nesting order for the for loops that are // generated. The arguments are specified from the innermost // loop out, so the following call puts y in the inner loop: gradient.Reorder(y, x); // This means y (the row) will vary quickly, and x (the // column) will vary slowly, so this is a column-major // traversal. Console.WriteLine("Evaluating gradient column-major"); var output = gradient.Realize <int>(4, 4); // See figures/lesson_05_col_major.gif for a visualization of // what this did. Console.WriteLine("Equivalent C:"); for (int xx = 0; xx < 4; xx++) { for (int yy = 0; yy < 4; yy++) { Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } } Console.WriteLine(); // If we print pseudo-code for this schedule, we'll see that // the loop over y is now inside the loop over x. Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); } // Split a variable into two. { var gradient = new HSFunc("gradient_split"); gradient[x, y] = x + y; gradient.TraceStores(); // The most powerful primitive scheduling operation you can do // to a var is to split it into inner and outer sub-variables: var x_outer = new HSVar("x_outer"); var x_inner = new HSVar("x_inner"); gradient.Split(x, x_outer, x_inner, 2); // This breaks the loop over x into two nested loops: an outer // one over x_outer, and an inner one over x_inner. The last // argument to split was the "split factor". The inner loop // runs from zero to the split factor. The outer loop runs // from zero to the extent required of x (4 in this case) // divided by the split factor. Within the loops, the old // variable is defined to be outer * factor + inner. If the // old loop started at a value other than zero, then that is // also added within the loops. Console.WriteLine("Evaluating gradient with x split into x_outer and x_inner "); var output = gradient.Realize <int>(4, 4); Console.WriteLine("Equivalent C:"); for (int yy = 0; yy < 4; yy++) { for (int xOuter = 0; xOuter < 2; xOuter++) { for (int xInner = 0; xInner < 2; xInner++) { int xx = xOuter * 2 + xInner; Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); // Note that the order of evaluation of pixels didn't actually // change! Splitting by itself does nothing, but it does open // up all of the scheduling possibilities that we will explore // below. } // Fuse two variables into one. { var gradient = new HSFunc("gradient_fused"); gradient[x, y] = x + y; // The opposite of splitting is 'fusing'. Fusing two variables // merges the two loops into a single for loop over the // product of the extents. Fusing is less important than // splitting, but it also sees use (as we'll see later in this // lesson). Like splitting, fusing by itself doesn't change // the order of evaluation. var fused = new HSVar("fused"); gradient.Fuse(x, y, fused); Console.WriteLine("Evaluating gradient with x and y fused"); var output = gradient.Realize <int>(4, 4); Console.WriteLine("Equivalent C:"); for (int f = 0; f < 4 * 4; f++) { int yy = f / 4; int xx = f % 4; Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); } // Evaluating in tiles. { var gradient = new HSFunc("gradient_tiled"); gradient[x, y] = x + y; gradient.TraceStores(); // Now that we can both split and reorder, we can do tiled // evaluation. Let's split both x and y by a factor of four, // and then reorder the vars to express a tiled traversal. // // A tiled traversal splits the domain into small rectangular // tiles, and outermost iterates over the tiles, and within // that iterates over the points within each tile. It can be // good for performance if neighboring pixels use overlapping // input data, for example in a blur. We can express a tiled // traversal like so: var x_outer = new HSVar("x_outer"); var x_inner = new HSVar("x_inner"); var y_outer = new HSVar("y_outer"); var y_inner = new HSVar("y_inner"); gradient.Split(x, x_outer, x_inner, 4); gradient.Split(y, y_outer, y_inner, 4); gradient.Reorder(x_inner, y_inner, x_outer, y_outer); // This pattern is common enough that there's a shorthand for it: // gradient.tile(x, y, x_outer, y_outer, x_inner, y_inner, 4, 4); Console.WriteLine("Evaluating gradient in 4x4 tiles"); var output = gradient.Realize <int>(8, 8); // See figures/lesson_05_tiled.gif for a visualization of this // schedule. Console.WriteLine("Equivalent C:"); for (int yOuter = 0; yOuter < 2; yOuter++) { for (int xOuter = 0; xOuter < 2; xOuter++) { for (int yInner = 0; yInner < 4; yInner++) { for (int xInner = 0; xInner < 4; xInner++) { int xx = xOuter * 4 + xInner; int yy = yOuter * 4 + yInner; Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } } } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); } // Evaluating in vectors. { var gradient = new HSFunc("gradient_in_vectors"); gradient[x, y] = x + y; gradient.TraceStores(); // The nice thing about splitting is that it guarantees the // inner variable runs from zero to the split factor. Most of // the time the split-factor will be a compile-time constant, // so we can replace the loop over the inner variable with a // single vectorized computation. This time we'll split by a // factor of four, because on X86 we can use SSE to compute in // 4-wide vectors. var x_outer = new HSVar("x_outer"); var x_inner = new HSVar("x_inner"); gradient.Split(x, x_outer, x_inner, 4); gradient.Vectorize(x_inner); // Splitting and then vectorizing the inner variable is common // enough that there's a short-hand for it. We could have also // said: // // gradient.vectorize(x, 4); // // which is equivalent to: // // gradient.split(x, x, x_inner, 4); // gradient.vectorize(x_inner); // // Note that in this case we reused the name 'x' as the new // outer variable. Later scheduling calls that refer to x // will refer to this new outer variable named x. // This time we'll evaluate over an 8x4 box, so that we have // more than one vector of work per scanline. Console.WriteLine("Evaluating gradient with x_inner vectorized "); var output = gradient.Realize <int>(8, 4); // See figures/lesson_05_vectors.gif for a visualization. Console.WriteLine("Equivalent C:"); for (int yy = 0; yy < 4; yy++) { for (int xOuter = 0; xOuter < 2; xOuter++) { // The loop over x_inner has gone away, and has been // replaced by a vectorized version of the // expression. On x86 processors, Halide generates SSE // for all of this. int[] x_vec = { xOuter * 4 + 0, xOuter * 4 + 1, xOuter * 4 + 2, xOuter * 4 + 3 }; int[] val = { x_vec[0] + yy, x_vec[1] + yy, x_vec[2] + yy, x_vec[3] + yy }; Console.WriteLine($"Evaluating at " + $"<{x_vec[0]}, {x_vec[1]}, {x_vec[2]}, {x_vec[3]}>, " + $"<{yy}, {yy}, {yy}, {yy}>: " + $"<{val[0]}, {val[1]}, {val[2]}, {val[3]}>"); } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); } // Unrolling a loop. { var gradient = new HSFunc("gradient_unroll"); gradient[x, y] = x + y; gradient.TraceStores(); // If multiple pixels share overlapping data, it can make // sense to unroll a computation so that shared values are // only computed or loaded once. We do this similarly to how // we expressed vectorizing. We split a dimension and then // fully unroll the loop of the inner variable. Unrolling // doesn't change the order in which things are evaluated. var x_outer = new HSVar("x_outer"); var x_inner = new HSVar("x_inner"); gradient.Split(x, x_outer, x_inner, 2); gradient.Unroll(x_inner); // The shorthand for this is: // gradient.unroll(x, 2); Console.WriteLine("Evaluating gradient unrolled by a factor of two"); var result = gradient.Realize <int>(4, 4); Console.WriteLine("Equivalent C:"); for (int yy = 0; yy < 4; yy++) { for (int xOuter = 0; xOuter < 2; xOuter++) { // Instead of a for loop over x_inner, we get two // copies of the innermost statement. { int xInner = 0; int xx = xOuter * 2 + xInner; Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } { int xInner = 1; int xx = xOuter * 2 + xInner; Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); } // Splitting by factors that don't divide the extent. { var gradient = new HSFunc("gradient_split_7x2"); gradient[x, y] = x + y; gradient.TraceStores(); // Splitting guarantees that the inner loop runs from zero to // the split factor, which is important for the uses we saw // above. So what happens when the total extent we wish to // evaluate x over isn't a multiple of the split factor? We'll // split by a factor three, and we'll evaluate gradient over a // 7x2 box instead of the 4x4 box we've been using. var x_outer = new HSVar("x_outer"); var x_inner = new HSVar("x_inner"); gradient.Split(x, x_outer, x_inner, 3); Console.WriteLine("Evaluating gradient over a 7x2 box with x split by three "); var output = gradient.Realize <int>(7, 2); // See figures/lesson_05_split_7_by_3.gif for a visualization // of what happened. Note that some points get evaluated more // than once! Console.WriteLine("Equivalent C:"); for (int yy = 0; yy < 2; yy++) { for (int xOuter = 0; xOuter < 3; xOuter++) // Now runs from 0 to 2 { for (int xInner = 0; xInner < 3; xInner++) { int xx = xOuter * 3; // Before we add x_inner, make sure we don't // evaluate points outside of the 7x2 box. We'll // clamp x to be at most 4 (7 minus the split // factor). if (xx > 4) { xx = 4; } xx += xInner; Console.WriteLine($"Evaluating at x = {xx}, y = {yy}: {xx + yy}"); } } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); // If you read the output, you'll see that some coordinates // were evaluated more than once. That's generally OK, because // pure Halide functions have no side-effects, so it's safe to // evaluate the same point multiple times. If you're calling // out to C functions like we are, it's your responsibility to // make sure you can handle the same point being evaluated // multiple times. // The general rule is: If we require x from x_min to x_min + x_extent, and // we split by a factor 'factor', then: // // x_outer runs from 0 to (x_extent + factor - 1)/factor // x_inner runs from 0 to factor // x = min(x_outer * factor, x_extent - factor) + x_inner + x_min // // In our example, x_min was 0, x_extent was 7, and factor was 3. // However, if you write a Halide function with an update // definition (see lesson 9), then it is not safe to evaluate // the same point multiple times, so we won't apply this // trick. Instead the range of values computed will be rounded // up to the next multiple of the split factor. } // Fusing, tiling, and parallelizing. { // We saw in the previous lesson that we can parallelize // across a variable. Here we combine it with fusing and // tiling to express a useful pattern - processing tiles in // parallel. // This is where fusing shines. Fusing helps when you want to // parallelize across multiple dimensions without introducing // nested parallelism. Nested parallelism (parallel for loops // within parallel for loops) is supported by Halide, but // often gives poor performance compared to fusing the // parallel variables into a single parallel for loop. var gradient = new HSFunc("gradient_fused_tiles"); gradient[x, y] = x + y; gradient.TraceStores(); // First we'll tile, then we'll fuse the tile indices and // parallelize across the combination. var x_outer = new HSVar("x_outer"); var y_outer = new HSVar("y_outer"); var x_inner = new HSVar("x_inner"); var y_inner = new HSVar("y_inner"); var tile_index = new HSVar("tile_index"); gradient.Tile(x, y, x_outer, y_outer, x_inner, y_inner, 4, 4); gradient.Fuse(x_outer, y_outer, tile_index); gradient.Parallel(tile_index); // The scheduling calls all return a reference to the Func, so // you can also chain them together into a single statement to // make things slightly clearer: // // gradient // .tile(x, y, x_outer, y_outer, x_inner, y_inner, 2, 2) // .fuse(x_outer, y_outer, tile_index) // .parallel(tile_index); Console.WriteLine("Evaluating gradient tiles in parallel"); var output = gradient.Realize <int>(8, 8); // The tiles should occur in arbitrary order, but within each // tile the pixels will be traversed in row-major order. See // figures/lesson_05_parallel_tiles.gif for a visualization. Console.WriteLine("Equivalent (serial) C:\n"); // This outermost loop should be a parallel for loop, but that's hard in C. for (int ti = 0; ti < 4; ti++) { int yOuter = ti / 2; int xOuter = ti % 2; for (int j_inner = 0; j_inner < 4; j_inner++) { for (int i_inner = 0; i_inner < 4; i_inner++) { int j = yOuter * 4 + j_inner; int i = xOuter * 4 + i_inner; Console.WriteLine($"Evaluating at x = {i}, y = {j}: {i + j}"); } } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient.PrintLoopNest(); Console.WriteLine(); } // Putting it all together. { // Are you ready? We're going to use all of the features above now. var gradient_fast = new HSFunc("gradient_fast"); gradient_fast[x, y] = x + y; // We'll process 64x64 tiles in parallel. var x_outer = new HSVar("x_outer"); var y_outer = new HSVar("y_outer"); var x_inner = new HSVar("x_inner"); var y_inner = new HSVar("y_inner"); var tile_index = new HSVar("tile_index"); gradient_fast .Tile(x, y, x_outer, y_outer, x_inner, y_inner, 64, 64) .Fuse(x_outer, y_outer, tile_index) .Parallel(tile_index); // We'll compute two scanlines at once while we walk across // each tile. We'll also vectorize in x. The easiest way to // express this is to recursively tile again within each tile // into 4x2 subtiles, then vectorize the subtiles across x and // unroll them across y: var x_inner_outer = new HSVar("x_inner_outer"); var y_inner_outer = new HSVar("y_inner_outer"); var x_vectors = new HSVar("x_vectors"); var y_pairs = new HSVar("y_pairs"); gradient_fast .Tile(x_inner, y_inner, x_inner_outer, y_inner_outer, x_vectors, y_pairs, 4, 2) .Vectorize(x_vectors) .Unroll(y_pairs); // Note that we didn't do any explicit splitting or // reordering. Those are the most important primitive // operations, but mostly they are buried underneath tiling, // vectorizing, or unrolling calls. // Now let's evaluate this over a range which is not a // multiple of the tile size. // If you like you can turn on tracing, but it's going to // produce a lot of printfs. Instead we'll compute the answer // both in C and Halide and see if the answers match. var result = gradient_fast.Realize <int>(350, 250); // See figures/lesson_05_fast.mp4 for a visualization. Console.WriteLine("Checking Halide result against equivalent C..."); for (int tileIndex = 0; tileIndex < 6 * 4; tileIndex++) { int yOuter = tileIndex / 4; int xOuter = tileIndex % 4; for (int yInnerOuter = 0; yInnerOuter < 64 / 2; yInnerOuter++) { for (int xInnerOuter = 0; xInnerOuter < 64 / 4; xInnerOuter++) { // We're vectorized across x int xx = Math.Min(xOuter * 64, 350 - 64) + xInnerOuter * 4; int[] xVec = { xx + 0, xx + 1, xx + 2, xx + 3 }; // And we unrolled across y int yBase = Math.Min(yOuter * 64, 250 - 64) + yInnerOuter * 2; { // y_pairs = 0 int yy = yBase + 0; int[] yVec = { yy, yy, yy, yy }; int[] val = { xVec[0] + yVec[0], xVec[1] + yVec[1], xVec[2] + yVec[2], xVec[3] + yVec[3] }; // Check the result. for (int i = 0; i < 4; i++) { if (result[xVec[i], yVec[i]] != val[i]) { Console.WriteLine($"There was an error at {xVec[i]} {yVec[i]}!"); return(-1); } } } { // y_pairs = 1 int yy = yBase + 1; int[] yVec = { yy, yy, yy, yy }; int[] val = { xVec[0] + yVec[0], xVec[1] + yVec[1], xVec[2] + yVec[2], xVec[3] + yVec[3] }; // Check the result. for (int i = 0; i < 4; i++) { if (result[xVec[i], yVec[i]] != val[i]) { Console.WriteLine($"There was an error at {xVec[i]} {yVec[i]}!"); return(-1); } } } } } } Console.WriteLine(); Console.WriteLine("Pseudo-code for the schedule:"); gradient_fast.PrintLoopNest(); Console.WriteLine(); // Note that in the Halide version, the algorithm is specified // once at the top, separately from the optimizations, and there // aren't that many lines of code total. Compare this to the C // version. There's more code (and it isn't even parallelized or // vectorized properly). More annoyingly, the statement of the // algorithm (the result is x plus y) is buried in multiple places // within the mess. This C code is hard to write, hard to read, // hard to debug, and hard to optimize further. This is why Halide // exists. } Console.WriteLine("Success!"); return(0); }