/// <summary>
        /// Accumulate the diffs from one BlobCollection into another.
        /// </summary>
        /// <param name="cuda">Specifies the CudaDnn instance used to add the blobs into this collection.</param>
        /// <param name="src">Specifies the source BlobCollection to add into this one.</param>
        /// <param name="bAccumulateDiff">Specifies to accumulate diffs when <i>true</i>, and the data otherwise.</param>
        public void Accumulate(CudaDnn <T> cuda, BlobCollection <T> src, bool bAccumulateDiff)
        {
            for (int i = 0; i < src.Count; i++)
            {
                Blob <T> bSrc      = src[i];
                Blob <T> bDst      = m_rgBlobs[i];
                int      nSrcCount = bSrc.count();
                int      nDstCount = bDst.count();

                if (nSrcCount != nDstCount)
                {
                    throw new Exception("The src and dst blobs at index #" + i.ToString() + " have different sizes!");
                }

                if (bAccumulateDiff)
                {
                    if (bSrc.DiffExists && bDst.DiffExists)
                    {
                        cuda.add(nSrcCount, bSrc.gpu_diff, bDst.gpu_diff, bDst.mutable_gpu_diff);
                    }
                }
                else
                {
                    cuda.add(nSrcCount, bSrc.gpu_data, bDst.gpu_data, bDst.mutable_gpu_data);
                }
            }
        }
Beispiel #2
0
        public static float[] myBlobAdditionTest(CudaDnn <float> cuda, Log log, float fInput1, float fInput2)
        {
            // Create the blobs and load their input data.
            Blob <float> scalar1 = CuSca(cuda, log, fInput1);

            Console.WriteLine("Scalar 1 gpu_data = {0}", scalar1.gpu_data);
            Blob <float> scalar2 = CuSca(cuda, log, fInput2);

            Console.WriteLine("Scalar 2 gpu_data = {0}", scalar2.gpu_data);

            // Do the add.
            Blob <float> blobResult = scalar2.Clone();

            cuda.add(scalar1.count(), scalar1.gpu_data, scalar2.gpu_data, blobResult.mutable_gpu_data);

            // Transfer the data back to CPU memory.
            float[] rgRes = blobResult.mutable_cpu_data;

            // Free up any resources used (including any GPU memory used).
            scalar1.Dispose();
            scalar2.Dispose();
            blobResult.Dispose();

            // Return the result.
            return(rgRes);
        }
Beispiel #3
0
        //=====================================================================
        //  Simple Blob Example #3 - performing a simple addition w/o SimpelDatum
        //=====================================================================
        public static void runSimpleBlobExample3(CudaDnn <float> cuda, Log log)
        {
            float[] rgInput = new float[3];
            rgInput[0] = 1.0f;
            rgInput[1] = 2.0f;
            rgInput[2] = 3.0f;

            // Load Blob which holds data in GPU memory.
            Blob <float> blob = new Blob <float>(cuda, log, true);

            // Reshape the blob (to allocate the GPU memory to
            // match the size of the CPU data that will be copied)
            blob.Reshape(1, 1, 1, 3);

            // Transfer the CPU data to the GPU data of Blob's data
            blob.mutable_cpu_data = rgInput;

            // Blob gpu_data holds the handle to the GPU data memory
            // (which actually resides in the low-level CudaDnnDll)
            long hData = blob.gpu_data;

            // Blob gpu_diff holds the handle to the GPU diff memory
            // (which also resides in the low-level CudaDnnDll)
            long hDiff = blob.gpu_diff;

            // Set all diff values to 1.0f
            blob.SetDiff(1.0);

            // Use CudaDnn to add the data = data + diff.
            cuda.add(blob.count(), hData, hDiff, hData);

            // Transfer the data from the GPU back to the CPU.
            float[] rgResult = blob.mutable_cpu_data;

            log.CHECK_EQ(rgResult[0], 1.0f + 1.0f, "incorrect values.");
            Console.WriteLine("1.0 + 1.0 = " + rgResult[0].ToString());

            log.CHECK_EQ(rgResult[1], 2.0f + 1.0f, "incorrect values.");
            Console.WriteLine("2.0 + 1.0 = " + rgResult[1].ToString());

            log.CHECK_EQ(rgResult[2], 3.0f + 1.0f, "incorrect values.");
            Console.WriteLine("3.0 + 1.0 = " + rgResult[2].ToString());

            // Free all GPU memory used.
            blob.Dispose();
        }
Beispiel #4
0
 private void apply(Blob <T> work, Blob <T> btm)
 {
     m_cuda.add(btm.count(), work.gpu_diff, btm.gpu_diff, btm.mutable_gpu_diff);
 }
Beispiel #5
0
        /// <summary>
        /// Renders the deep draw image(s) depending on the Octave's installed.
        /// </summary>
        /// <param name="bmpInput">Specifies the input image.</param>
        /// <param name="nFocusLabel">Specifies a label to focus on (use this when running on classifying layers).</param>
        /// <param name="dfDetailPercentageToOutput">Optionally, specifies the amount of detail to apply to the original image when producing the final image (Default = 0.25 for 25%).</param>
        /// <param name="strOutputDir">Optionally, specifies the output directory wheren images are to be output.  When <i>null</i>, no images are output, but are instead set in each Octave.</param>
        /// <param name="bVisualizeEachStep">Optionally, specifies to create an image at each step of the process which can be useful when making a video of the evolution (default = <i>false</i>).</param>
        /// <param name="rgDirectInputs">Optionally, specifies the direct inputs used to set each output.  When not <i>null</i> the direct inputs are used instead of the <i>nFocusLabel</i> whereby the
        /// network outputs are set to the direct input values and the <i>nFocusLabel</i> is used to index the image and should therefore be unique for each set of direct inputs.
        /// By default, this value is set to <i>null</i>.
        /// </param>
        /// <returns>Upon completing the render, this method returns <i>true</i>, otherwise if cancelled it returns <i>false</i>.</returns>
        public bool Render(Bitmap bmpInput, int nFocusLabel = -1, double dfDetailPercentageToOutput = 0.25, string strOutputDir = null, bool bVisualizeEachStep = false, float[] rgDirectInputs = null)
        {
            if (rgDirectInputs != null && nFocusLabel < 0)
            {
                throw new Exception("The focus label must be set to a unique value >= 0 that corresponds to this specific direct input set.");
            }

            // get the input dimensions from net
            Blob <T> blobSrc = m_net.blob_by_name("data");

            int nW = blobSrc.width;
            int nH = blobSrc.height;

            m_log.WriteLine("Starting drawing...");
            blobSrc.Reshape(1, 3, nH, nW);    // resize the networks input.

            // Set the base data.
            if (strOutputDir != null)
            {
                bmpInput.Save(strOutputDir + "\\input_image.png");
            }

            Datum d = ImageData.GetImageData(bmpInput, 3, false, -1);

            m_blobBase.mutable_cpu_data = m_transformer.Transform(d);

            m_blobDetail.SetData(0.0);
            m_blobBlur.SetData(0);

            for (int i = 0; i < m_rgOctaves.Count; i++)
            {
                Octaves o = m_rgOctaves[i];
                // Select layer.
                string strLayer = o.LayerName;

                // Add changed details to the image.
                if (nFocusLabel < 0)
                {
                    m_cuda.add(blobSrc.count(), m_blobBase.gpu_data, m_blobDetail.gpu_data, blobSrc.mutable_gpu_data, o.PercentageOfPreviousOctaveDetailsToApply);
                }

                for (int j = 0; j < o.IterationN; j++)
                {
                    if (m_evtCancel.WaitOne(0))
                    {
                        return(false);
                    }

                    if (nFocusLabel >= 0)
                    {
                        blobSrc.CopyFrom(m_blobBase);
                    }

                    double dfSigma    = o.StartSigma + ((o.EndSigma - o.StartSigma) * j) / o.IterationN;
                    double dfStepSize = o.StartStepSize + ((o.EndStepSize - o.StartStepSize) * j) / o.IterationN;

                    make_step(strLayer, dfSigma, dfStepSize, nFocusLabel, rgDirectInputs);

                    if ((bVisualizeEachStep || (j == o.IterationN - 1 && o.Save)))
                    {
                        // Get the detail.
                        m_cuda.sub(m_blobDetail.count(), blobSrc.gpu_data, m_blobBase.gpu_data, m_blobDetail.mutable_gpu_data);

                        if (dfDetailPercentageToOutput < 1.0)
                        {
                            // reuse blob blur memory.
                            m_cuda.add(m_blobBlur.count(), m_blobBase.gpu_data, m_blobDetail.gpu_data, m_blobBlur.mutable_gpu_data, dfDetailPercentageToOutput);
                        }
                        else
                        {
                            m_blobBlur.CopyFrom(blobSrc);
                        }

                        Image bmp = getImage(m_blobBlur);

                        if (nFocusLabel < 0)
                        {
                            Bitmap bmp1 = AdjustContrast(bmp, 0.9f, 1.6f, 1.2f);
                            bmp.Dispose();
                            bmp = bmp1;
                        }

                        if (strOutputDir != null)
                        {
                            string strFile = strOutputDir + "\\" + o.UniqueName + "_" + j.ToString();
                            if (nFocusLabel >= 0)
                            {
                                if (rgDirectInputs != null)
                                {
                                    strFile += "_idx_" + nFocusLabel.ToString();
                                }
                                else
                                {
                                    strFile += "_class_" + nFocusLabel.ToString();
                                }
                            }

                            bmp.Save(strFile + ".png");
                        }

                        if (j == o.IterationN - 1)
                        {
                            o.Images.Add(nFocusLabel, bmp);
                        }
                        else
                        {
                            bmp.Dispose();
                        }
                    }

                    m_log.Progress = (double)j / (double)o.IterationN;
                    m_log.WriteLine("Focus Label: " + nFocusLabel.ToString() + "  Octave: '" + o.LayerName + "' - " + j.ToString() + " of " + o.IterationN.ToString() + " " + m_log.Progress.ToString("P"));

                    if (nFocusLabel >= 0)
                    {
                        m_blobBase.CopyFrom(blobSrc);
                    }
                }

                // Extract details produced on the current octave.
                if (nFocusLabel < 0)
                {
                    m_cuda.sub(m_blobDetail.count(), blobSrc.gpu_data, m_blobBase.gpu_data, m_blobDetail.mutable_gpu_data);
                }
            }

            m_log.WriteLine("Rendering completed!");
            return(true);
        }