Beispiel #1
0
        public override void FeedForward()
        {
            convolutionalLayer1.FeedForward();


            /*
             *
             * float[] conv1outputAll = new float[convolutionalLayer1.OutputNeurons.NumberOfUnits * inputNeurons.MiniBatchSize];
             * OpenCLSpace.ClError = Cl.EnqueueReadBuffer(OpenCLSpace.Queue,
             *                                          convolutionalLayer1.OutputNeurons.ActivationsGPU, // source
             *                                          Bool.True,
             *                                          (IntPtr)0,
             *                                          (IntPtr)(convolutionalLayer1.OutputNeurons.NumberOfUnits * inputNeurons.MiniBatchSize * sizeof(float)),
             *                                          conv1outputAll,  // destination
             *                                          0,
             *                                          null,
             *                                          out OpenCLSpace.ClEvent);
             * OpenCLSpace.CheckErr(OpenCLSpace.ClError, "NeuralNetwork.ForwardPass Cl.clEnqueueReadBuffer layerInput");
             *
             * Console.WriteLine("\nConvLayer1 output activations:");
             * for (int m = 0; m < inputNeurons.MiniBatchSize; m++)
             * {
             *  float[] layerOutput = new float[convolutionalLayer1.OutputNeurons.NumberOfUnits];
             *  Array.Copy(conv1outputAll, m * convolutionalLayer1.OutputNeurons.NumberOfUnits, layerOutput, 0, convolutionalLayer1.OutputNeurons.NumberOfUnits);
             *
             *  Console.WriteLine("\n --- Mini-batch item {0} -----", m);
             *  for (int j = 0; j < layerOutput.Length; j++)
             *      Console.Write("{0}  ", layerOutput[j]);
             *  Console.WriteLine();
             *  Console.ReadKey();
             * }
             */
            if (nonlinearityType == "ReLU")
            {
                nonlinearityReLU.FeedForward();
            }
            else if (nonlinearityType == "ELU")
            {
                nonlinearityELU.FeedForward();
            }


            /*
             * float[] nonlinOutputAll = new float[nonlinearity.OutputNeurons.NumberOfUnits * inputNeurons.MiniBatchSize];
             * OpenCLSpace.ClError = Cl.EnqueueReadBuffer(OpenCLSpace.Queue,
             *                                          nonlinearity.OutputNeurons.ActivationsGPU, // source
             *                                          Bool.True,
             *                                          (IntPtr)0,
             *                                          (IntPtr)(convolutionalLayer1.OutputNeurons.NumberOfUnits * inputNeurons.MiniBatchSize * sizeof(float)),
             *                                          nonlinOutputAll,  // destination
             *                                          0,
             *                                          null,
             *                                          out OpenCLSpace.ClEvent);
             * OpenCLSpace.CheckErr(OpenCLSpace.ClError, "NeuralNetwork.ForwardPass Cl.clEnqueueReadBuffer layerInput");
             *
             * Console.WriteLine("\nNonlinearity output activations:");
             * for (int m = 0; m < inputNeurons.MiniBatchSize; m++)
             * {
             *  float[] layerOutput = new float[nonlinearity.OutputNeurons.NumberOfUnits];
             *  Array.Copy(nonlinOutputAll, m * nonlinearity.OutputNeurons.NumberOfUnits, layerOutput, 0, nonlinearity.OutputNeurons.NumberOfUnits);
             *
             *  Console.WriteLine("\n --- Mini-batch item {0} -----", m);
             *  for (int j = 0; j < layerOutput.Length; j++)
             *      Console.Write("{0}  ", layerOutput[j]);
             *  Console.WriteLine();
             *  Console.ReadKey();
             * }
             */

            convolutionalLayer2.FeedForward();

            /*
             * float[] conv2outputAll = new float[convolutionalLayer2.OutputNeurons.NumberOfUnits * inputNeurons.MiniBatchSize];
             * OpenCLSpace.ClError = Cl.EnqueueReadBuffer(OpenCLSpace.Queue,
             *                                          convolutionalLayer2.OutputNeurons.ActivationsGPU, // source
             *                                          Bool.True,
             *                                          (IntPtr)0,
             *                                          (IntPtr)(convolutionalLayer2.OutputNeurons.NumberOfUnits * inputNeurons.MiniBatchSize * sizeof(float)),
             *                                          conv2outputAll,  // destination
             *                                          0,
             *                                          null,
             *                                          out OpenCLSpace.ClEvent);
             * OpenCLSpace.CheckErr(OpenCLSpace.ClError, "NeuralNetwork.ForwardPass Cl.clEnqueueReadBuffer layerInput");
             *
             * Console.WriteLine("\nConvLayer2 output activations:");
             * for (int m = 0; m < inputNeurons.MiniBatchSize; m++)
             * {
             *  float[] layerOutput = new float[convolutionalLayer2.OutputNeurons.NumberOfUnits];
             *  Array.Copy(conv2outputAll, m * convolutionalLayer2.OutputNeurons.NumberOfUnits, layerOutput, 0, convolutionalLayer2.OutputNeurons.NumberOfUnits);
             *
             *  Console.WriteLine("\n --- Mini-batch item {0} -----", m);
             *  for (int j = 0; j < layerOutput.Length; j++)
             *      Console.Write("{0}  ", layerOutput[j]);
             *  Console.WriteLine();
             *  Console.ReadKey();
             * }
             */

            // Additionally, cumulate inputs onto outputs

            OpenCLSpace.ClError  = Cl.SetKernelArg(OpenCLSpace.SkipForward, 0, outputNeurons.ActivationsGPU);
            OpenCLSpace.ClError |= Cl.SetKernelArg(OpenCLSpace.SkipForward, 1, inputNeurons.ActivationsGPU);
            OpenCLSpace.ClError |= Cl.SetKernelArg(OpenCLSpace.SkipForward, 2, (IntPtr)sizeof(int), nInputUnits);
            OpenCLSpace.ClError |= Cl.SetKernelArg(OpenCLSpace.SkipForward, 3, (IntPtr)sizeof(int), inputNeurons.MiniBatchSize);
            OpenCLSpace.CheckErr(OpenCLSpace.ClError, "Cl.SetKernelArg");

            // Run kernel
            OpenCLSpace.ClError = Cl.EnqueueNDRangeKernel(OpenCLSpace.Queue,
                                                          OpenCLSpace.SkipForward,
                                                          1,
                                                          null,
                                                          globalWorkSizePtr,
                                                          localWorkSizePtr,
                                                          0,
                                                          null,
                                                          out OpenCLSpace.ClEvent);
            OpenCLSpace.CheckErr(OpenCLSpace.ClError, "Cl.EnqueueNDRangeKernel");

            OpenCLSpace.ClError = Cl.ReleaseEvent(OpenCLSpace.ClEvent);
            OpenCLSpace.CheckErr(OpenCLSpace.ClError, "Cl.ReleaseEvent");

            OpenCLSpace.ClError = Cl.Finish(OpenCLSpace.Queue);
            OpenCLSpace.CheckErr(OpenCLSpace.ClError, "Cl.Finish");
        }