Exemplo n.º 1
0
            float NearestCC_dist; //. similarity to closest



            public override void Init(int nGPU)
            {
                m_kernel_AddNewCCenter = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\KMeansWM", "AddDataAsCC");
                m_kernel_AddNewCCenter.SetupExecution(Owner.DescCount);

                m_kernel_UpadteCC_desc = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\KMeansWM", "UpadateCC_Desc");
                m_kernel_UpadteCC_desc.SetupExecution(Owner.DescCount);

                m_kernel_UpdateCC_XY = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\KMeansWM", "UpdateCC_XY");
                m_kernel_UpdateCC_XY.SetupExecution(Owner.ObjectXY.Count);


                m_dotKernel = MyReductionFactory.Kernel(nGPU, MyReductionFactory.Mode.f_DotProduct_f);
                m_mulKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\TransformKernels", "PolynomialFunctionKernel");
                m_mulKernel.SetupExecution(Owner.DescCount);

                m_matMultpl = MyKernelFactory.Instance.Kernel(Owner.GPU, @"Common\CombineVectorsKernel", "MatMultipl_naive");
                m_matMultpl.GridDimensions  = new ManagedCuda.VectorTypes.dim3(1, Owner.DescCount);
                m_matMultpl.BlockDimensions = new ManagedCuda.VectorTypes.dim3(1, 1);

                m_minIdxKernel = MyReductionFactory.Kernel(nGPU, MyReductionFactory.Mode.f_MinIdx_ff);

                m_kernel_UpdateXY_basedOnTheBrainsMovement = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\KMeansWM", "ApplyBrainsMovement");
                m_kernel_UpdateCC_XY.SetupExecution(Owner.MaxClusters);
            }
Exemplo n.º 2
0
 public MyJoinPatchesObserver()
 {
     m_kernel_fillImWhite  = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\JoinPatchesObs", "FillImWhite");
     m_kernel_fillImFromIm = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\JoinPatchesObs", "FillImByOtherIm");
     m_kernel_drawEdges    = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\JoinPatchesObs", "Draw_edges");
     m_kernel_drawDesc     = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\JoinPatchesObs", "FillImByEnergy");
 }
 public MyGradientBackPropAgent(MyAbstractFeedForwardNode network, int nGPU, MyMemoryBlock<float> labelInput)
     : base(network)
 {
     m_updateWeightKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\UpdateWeightKernel");
     DeltaProvider = new MyLabelDeltaProvider(m_network, nGPU);
     DeltaProvider.LabelInput = labelInput;
 }
Exemplo n.º 4
0
        public override void Init(int nGPU)
        {
            m_samplingDeltaKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\DeltaKernels", "GaussianSamplingDeltaKernel");
            m_samplingDeltaKernel.SetupExecution(Owner.Neurons);

            m_regularizationDeltaKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\RegularizationTermKernels", "GaussianRegularizationDeltaKernel");
        }
Exemplo n.º 5
0
        public override void Init(int nGPU)
        {
            switch (Owner.LearningTasks)
            {
            case MyLSTMLayer.LearningTasksType.RTRL:
            {
                m_deltaKernel     = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMDeltaKernel", "LSTMDeltaKernel");
                m_deltaBackKernel = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMDeltaKernel", "LSTMDeltaBackKernel");
                break;
            }

            case MyLSTMLayer.LearningTasksType.BPTT:
            {
                m_deltaKernel             = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMDeltaKernel", "LSTMDeltaKernelBPTT");
                m_gateGradientKernel      = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMDeltaKernel", "LSTMGateGradientKernelBPTT");
                m_cellInputGradientKernel = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMDeltaKernel", "LSTMCellInputGradientKernelBPTT");
                m_deltaBackKernel         = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMDeltaKernel", "LSTMDeltaBackKernelBPTT");

                m_gateGradientKernel.SetupExecution((Owner.Input.Count + Owner.Output.Count + Owner.CellsPerBlock + 1) * Owner.MemoryBlocks);
                m_cellInputGradientKernel.SetupExecution((Owner.Input.Count + Owner.Output.Count + 1) * Owner.CellStates.Count);
                break;
            }
            }
            m_deltaKernel.SetupExecution(Owner.MemoryBlocks);
        }
Exemplo n.º 6
0
            public override void Init(int nGPU)
            {
                if (DecayFactor != 1f)
                {
                    if (DecayFactor > 1f)
                    {
                        MyLog.WARNING.WriteLine("Decay factor on a HashingMemoryNode that is greater than one is suspicious...");
                    }

                    _polynomialFuncKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\TransformKernels", "PolynomialFunctionKernel");
                    _polynomialFuncKernel.SetupExecution(Memory.Count);
                }

                if (AddFactor != 1f)
                {
                    _constMulKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\TransformKernels", "PolynomialFunctionKernel");
                    _constMulKernel.SetupExecution(Owner.SymbolSize);
                }

                if (NormalizeTarget)
                {
                    _combineVectorsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\CombineVectorsKernel", "CombineTwoVectorsKernel");
                    _combineVectorsKernel.SetupExecution(Owner.SymbolSize);
                    _mapToIdcsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\CombineVectorsKernel", "MapToIdcs");
                    _mapToIdcsKernel.SetupExecution(Owner.SymbolSize);
                    _dotKernel = MyReductionFactory.Kernel(nGPU, MyReductionFactory.Mode.f_DotProduct_f);
                }
                else
                {
                    _mapToIdcsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"common\CombineVectorsKernel", "AddToIdcs");
                    _mapToIdcsKernel.SetupExecution(Owner.SymbolSize);
                }

                Temp.SafeCopyToHost();
            }
Exemplo n.º 7
0
        public MyFourierBinder(MyWorkingNode owner, int inputSize, MyMemoryBlock <float> tempBlock)
            : base(owner, inputSize, tempBlock)
        {
            m_fft  = new CudaFFTPlan1D(inputSize, cufftType.R2C, 1);
            m_ifft = new CudaFFTPlan1D(inputSize, cufftType.C2R, 1);

            m_mulkernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "MulComplexElementWise");
            m_mulkernel.SetupExecution(inputSize + 1);

            m_involutionKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "InvolveVector");
            m_involutionKernel.SetupExecution(inputSize - 1);

            m_inversionKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Transforms\InvertValuesKernel", "InvertLengthComplexKernel");
            m_inversionKernel.SetupExecution(inputSize);

            m_dotKernel = MyReductionFactory.Kernel(owner.GPU, MyReductionFactory.Mode.f_DotProduct_f);

            m_normalKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Transforms\TransformKernels", "PolynomialFunctionKernel");
            m_normalKernel.SetupExecution(inputSize);

            m_firstFFTOffset  = 0;
            m_secondFFTOffset = (inputSize + 1) * 2;
            m_tempOffset      = (inputSize + 1) * 4;

            Denominator = inputSize;
        }
Exemplo n.º 8
0
            public override void Init(int nGPU)
            {
                base.Init(nGPU);

                m_applyThresholdKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\VisionMath", "ApplyThreshold");
                m_applyThresholdKernel.SetupExecution(Owner.MaskOutput.Count);
            }
Exemplo n.º 9
0
            /// <summary>
            /// Transforms the <paramref name="output"/> vector into a vector of indices with properties specified by the parameters.
            /// </summary>
            /// <param name="input">The vector to transform.</param>
            /// <param name="output">The memory to contain the results.</param>
            /// <param name="misc">A vector containing the range to modulate to as the first value (typically 2f because dot product ranges from [-1,1])
            /// and the bin size in this modulated space (typically <paramref name="misc"/>[0] / internalBinCount) as the second value.</param>
            /// <param name="offsets">The random offsets for each <paramref name="output"/> value (typically uniform random numbers in [0, <paramref name="misc"/>[0].</param>
            /// <param name="vectorSize">The length of the <paramref name="output"/> vector.</param>
            /// <param name="outputBinCount">The range into which the internal bins will be scattered.</param>
            /// <param name="seed">The seed used for the scattering the internal bins.</param>
            /// <param name="combineVectorsKernel">The kernel used for addition, modulo and integer division.</param>
            /// <param name="hashKernel">The kernel used for scattering the internal bins.</param>
            /// <param name="noHashKernel"></param>
            /// <param name="doHashMapping"></param>
            /// <param name="internalBinCount"></param>
            /// <param name="stream"></param>
            public static void GetIndices(
                CUdeviceptr input, CUdeviceptr output, CUdeviceptr misc, CUdeviceptr?offsets,
                int vectorSize, int outputBinCount, int seed,
                MyCudaKernel combineVectorsKernel, MyCudaKernel hashKernel, MyCudaKernel noHashKernel,
                bool doHashMapping, int internalBinCount,
                CudaStream stream)
            {
                Debug.Assert(vectorSize > 0, "Invalid vector size");
                Debug.Assert(outputBinCount > 1, "Requires at least 2 output bins");
                Debug.Assert(combineVectorsKernel != null && hashKernel != null, "Missing kernels");


                // Values are in [-1, 1] if they were normalized

                if (offsets != null)
                {
                    // Offset to [-1, 3]
                    combineVectorsKernel.RunAsync(stream, input, offsets.Value, output, (int)MyJoin.MyJoinOperation.Addition, vectorSize, vectorSize);
                }

                // Modulate to [0, 2]
                combineVectorsKernel.RunAsync(stream, output, misc, output, (int)MyJoin.MyJoinOperation.Modulo, vectorSize, 1);

                // Transform to integers in [0, InternalBinCount - 1]
                combineVectorsKernel.RunAsync(stream, output, misc + sizeof(float), output, (int)MyJoin.MyJoinOperation.Division_int, vectorSize, 1);

                if (doHashMapping)
                {
                    hashKernel.RunAsync(stream, output, output, vectorSize, vectorSize, outputBinCount, seed);
                }
                else
                {
                    noHashKernel.RunAsync(stream, output, output, vectorSize, internalBinCount);
                }
            }
Exemplo n.º 10
0
        protected override void Execute()
        {
            if (Target.Image == null)
            {
                Target.NOfObjects.SafeCopyToHost();
                m_kernel_downInTime.SetupExecution(TextureWidth * TextureHeight);
                m_kernel_downInTime.Run(VBODevicePointer, TextureWidth * TextureHeight);

                m_kernel_fillImActSate.GridDimensions  = new ManagedCuda.VectorTypes.dim3((int)Target.NOfObjects.Host[0]); // numbe rof clusters to print
                m_kernel_fillImActSate.BlockDimensions = new ManagedCuda.VectorTypes.dim3(16, 16);                         // size of square
                //   m_kernel_fillImActSate.Run(VBODevicePointer, TextureWidth, TextureWidth * TextureHeight, Target.ClusCentersXY, Target.ClusCentersXY.ColumnHint, Target.MaxClusters, 1, Target.ClusCentersSize, GetMaxFreqOfClusters());
                m_kernel_fillImActSate.Run(VBODevicePointer, TextureWidth, TextureWidth * TextureHeight, Target.ClusCentersXY, Target.ClusCentersXY.ColumnHint, Target.MaxClusters, 1, Target.ClusCentersLastSeen, 2.0f, IsXYInputInOneNorm());
            }
            if (Target.Image != null)
            {
                MyCudaKernel m_ker = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\VisionObsFce", "FillVBOFromInputImage");
                m_ker.SetupExecution(TextureWidth * TextureHeight);
                m_ker.Run(Target.Image, TextureWidth * TextureHeight, VBODevicePointer);

                m_ker = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\KMeansWM", "FocuserInputObserver");
                m_ker.SetupExecution(TextureWidth * TextureHeight);

                // FocuserInputObserver(float* values, float* pupilControl, int id_pupil , int inputWidth, int inputHeight, unsigned int* pixels, float color)
                m_ker.Run(Target.Image, Target.ClusCentersXY, 4, TextureWidth, TextureHeight, VBODevicePointer, 0.0f);
                m_ker.Run(Target.Image, Target.ClusCentersXY, 3, TextureWidth, TextureHeight, VBODevicePointer, 0.1f);
                m_ker.Run(Target.Image, Target.ClusCentersXY, 2, TextureWidth, TextureHeight, VBODevicePointer, 0.2f);
                m_ker.Run(Target.Image, Target.ClusCentersXY, 1, TextureWidth, TextureHeight, VBODevicePointer, 0.3f);
                m_ker.Run(Target.Image, Target.ClusCentersXY, 0, TextureWidth, TextureHeight, VBODevicePointer, 0.4f);
            }
        }
Exemplo n.º 11
0
        public MySegmentObserver()
        {
            m_kernel_draw = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\SegmentObs", "Draw");

            k_test = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\SegmentObs", "Test_draw_xy");
            k_test.SetupExecution(new dim3(1), new dim3(1));
        }
Exemplo n.º 12
0
            public override void Init(Int32 nGPU)
            {
                switch (Owner.Interpolation)
                {
                case InterpolationType.Bilinear:
                    m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\Transform2DKernels", "BilinearResampleKernel");
                    m_kernel.SetupExecution(Owner.outputWidth * Owner.outputHeight);
                    break;

                case InterpolationType.Exact1toN:
                    if (Owner.GetTransformationType() == ExactTransformationType.Increasing)
                    {
                        m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\Transform2DKernels", "ExactResampleKernel_1toN");
                    }
                    else
                    {
                        m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\Transform2DKernels", "ExactResampleKernel_Nto1");
                    }
                    m_kernel.SetupExecution(Owner.outputWidth * Owner.outputHeight);
                    break;

                default:
                    throw new InvalidEnumArgumentException("Unknown interpolation type " + Owner.Interpolation);
                }
            }
Exemplo n.º 13
0
        public static void DrawString(string str, int x, int y, uint bgColor, uint fgColor, CUdeviceptr image, int imageWidth, int imageHeight)
        {
            // Crop if the string is too long
            if (str.Length > 20)
            {
                str = str.Substring(0, 20);
            }

            MyCudaKernel m_drawDigitKernel        = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\DrawDigitsKernel");
            CudaDeviceVariable <float> characters = MyMemoryManager.Instance.GetGlobalVariable <float>("CHARACTERS_TEXTURE", MyKernelFactory.Instance.DevCount - 1, LoadDigits);

            m_drawDigitKernel.SetConstantVariable("D_BG_COLOR", bgColor);
            m_drawDigitKernel.SetConstantVariable("D_FG_COLOR", fgColor);
            m_drawDigitKernel.SetConstantVariable("D_IMAGE_WIDTH", imageWidth);
            m_drawDigitKernel.SetConstantVariable("D_IMAGE_HEIGHT", imageHeight);
            m_drawDigitKernel.SetConstantVariable("D_DIGIT_WIDTH", CharacterWidth);
            m_drawDigitKernel.SetConstantVariable("D_DIGIT_SIZE", CharacterSize);
            m_drawDigitKernel.SetConstantVariable("D_DIGITMAP_NBCHARS", CharacterMapNbChars);

            int[] indexes = stringToDigitIndexes(str);
            m_drawDigitKernel.SetConstantVariable("D_DIGIT_INDEXES", indexes);
            m_drawDigitKernel.SetConstantVariable("D_DIGIT_INDEXES_LEN", indexes.Length);

            m_drawDigitKernel.SetupExecution(CharacterSize * indexes.Length);
            m_drawDigitKernel.Run(image, characters.DevicePointer, x, y);
        }
Exemplo n.º 14
0
 public MyGradientBackPropAgent(MyAbstractFeedForwardNode network, int nGPU, MyMemoryBlock <float> labelInput)
     : base(network)
 {
     m_updateWeightKernel     = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\UpdateWeightKernel");
     DeltaProvider            = new MyLabelDeltaProvider(m_network, nGPU);
     DeltaProvider.LabelInput = labelInput;
 }
Exemplo n.º 15
0
            public override void Init(Int32 nGPU)
            {
                if (Owner.OutputSize <= 0)
                {
                    return;
                }

                m_pixelCount = Owner.InputSize;

                switch (Owner.Target)
                {
                case TransformTarget.RGB:
                    m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Drawing\RgbaDrawing", "RawToRgbKernel");
                    break;

                case TransformTarget.RawBW:
                    m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Drawing\RgbaDrawing", "RawToRawGrayscaleKernel");
                    break;

                case TransformTarget.Grayscale:
                    m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Drawing\RgbaDrawing", "RawToGrayscaleKernel");
                    break;
                }

                m_kernel.SetupExecution(m_pixelCount);
            }
Exemplo n.º 16
0
        public MyFourierBinder(MyWorkingNode owner, int inputSize, MyMemoryBlock<float> tempBlock)
            : base(owner, inputSize, tempBlock)
        {
            m_stream = new CudaStream();

            m_fft = new CudaFFTPlan1D(inputSize, cufftType.R2C, 1);
            m_fft.SetStream(m_stream.Stream);
            m_ifft = new CudaFFTPlan1D(inputSize, cufftType.C2R, 1);
            m_ifft.SetStream(m_stream.Stream);

            m_mulkernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "MulComplexElementWise");
            m_mulkernel.SetupExecution(inputSize + 1);

            m_involutionKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "InvolveVector");
            m_involutionKernel.SetupExecution(inputSize - 1);

            m_inversionKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Transforms\InvertValuesKernel", "InvertLengthComplexKernel");
            m_inversionKernel.SetupExecution(inputSize);

            m_dotKernel = MyKernelFactory.Instance.KernelProduct<float>(owner, owner.GPU, ProductMode.f_DotProduct_f);

            m_normalKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Transforms\TransformKernels", "PolynomialFunctionKernel");
            m_normalKernel.SetupExecution(inputSize);

            m_firstFFTOffset = 0;
            m_secondFFTOffset = (inputSize + 1) * 2;
            m_tempOffset = (inputSize + 1) * 4;

            Denominator = inputSize;
        }
Exemplo n.º 17
0
            public override void Init(int nGPU)
            {
                switch (Mode)
                {
                case HashMapperMode.Simple:
                    break;

                case HashMapperMode.LocalitySensitive:
                    MyMemoryManager.Instance.ClearGlobalVariable(Owner.GlobalVariableName, Owner.GPU);

                    // Only values are the modulo and and integer divisor (placing into bins)
                    Owner.Temp.SafeCopyToHost(0, 2);
                    Owner.Temp.Host[0] = 2f;
                    Owner.Temp.Host[1] = 2f / InternalBinCount;
                    Owner.Temp.SafeCopyToDevice(0, 2);

                    break;

                default:
                    throw new ArgumentOutOfRangeException();
                }
                _combineVectorsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"common\CombineVectorsKernel", "CombineTwoVectorsKernelVarSize");
                _combineVectorsKernel.SetupExecution(Owner.InputSize);

                _hashKernel = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\Mappers", "GetIndices_ImplicitSeed");
                _hashKernel.SetupExecution(Owner.Output.Count);

                _noHashKernel = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\Mappers", "GetIndices_NoHashing");
                _noHashKernel.SetupExecution(Owner.Output.Count);

                m_stream = new CudaStream();
            }
Exemplo n.º 18
0
        public override void Init(int nGPU)
        {
            m_forwardSamplingKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\FeedForwardKernels", "GaussianForwardSamplingKernel");
            m_forwardSamplingKernel.SetupExecution(Owner.Neurons);

            m_resetPriorStats = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\FeedForwardKernels", "GaussianResetPriorStats");
            m_resetPriorStats.SetupExecution(Owner.Input.Count);

            m_minMaxField = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\FeedForwardKernels", "GaussianMinMaxField");
            m_minMaxField.SetupExecution(Owner.Input.Count);

            m_samplePrior = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\FeedForwardKernels", "GaussianSamplePrior");
            m_samplePrior.SetupExecution(Owner.Input.Count);

            m_L1TermKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\RegularizationTermKernels", "L1TermKernel");
            m_L1TermKernel.SetupExecution(m_L1TermKernel.MAX_THREADS);
            m_L1TermKernel.DynamicSharedMemory = m_L1TermKernel.BlockDimensions.x * sizeof(float);

            m_L2TermKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\RegularizationTermKernels", "L2TermKernel");
            m_L2TermKernel.SetupExecution(m_L2TermKernel.MAX_THREADS);
            m_L2TermKernel.DynamicSharedMemory = m_L2TermKernel.BlockDimensions.x * sizeof(float);

            m_regularizationKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\RegularizationTermKernels", "GaussianRegularizationKernel");
            m_regularizationKernel.SetupExecution(m_regularizationKernel.MAX_THREADS);
            m_regularizationKernel.DynamicSharedMemory = m_regularizationKernel.BlockDimensions.x * sizeof(float);
        }
Exemplo n.º 19
0
        // MyCudaKernel m_ker;


        public MyKMeansWMObserver()
        {
            m_kernel_fillImActSate = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\KMeansWM", "FillImByActState");
            m_kernel_downInTime    = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\KMeansWM", "DownInTime");

            //m_ker = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Vision\VisionObsFce", "FillVBOFromInputImage");
        }
Exemplo n.º 20
0
            public override void Init(int nGPU)
            {
                outputWidth  = Owner.Output.ColumnHint;
                outputHeight = Owner.Output.Count / Owner.Output.ColumnHint;

                m_kernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\FocuserInputObserver", "RetinaObserver_UnMaskPatchFl");
                m_kernel.SetupExecution(Owner.Output.Count);
            }
Exemplo n.º 21
0
 public MyPermutationBinder(MyWorkingNode owner, int inputSize, MyMemoryBlock<float> tempBlock)
     : base(owner, inputSize, tempBlock)
 {
     m_PermKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "CombineVectorsKernel");
     m_PermKernel.SetupExecution(inputSize);
     m_binaryPermKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "CombineTwoVectorsKernel");
     m_binaryPermKernel.SetupExecution(inputSize);
 }
Exemplo n.º 22
0
        public void InitWorldOutputs(int nGPU)
        {
            m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Transforms\Transform2DKernels", "BilinearResampleKernel");
            m_kernel.SetupExecution(Viewport.Width * Viewport.Height);

            m_grayscaleKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawGrayscaleKernel");
            m_grayscaleKernel.SetupExecution(Viewport.Width * Viewport.Height);
        }
Exemplo n.º 23
0
        public override void Init(int nGPU)
        {
            m_updateGateWeightsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMUpdateWeightsKernel", "LSTMUpdateGateWeightsKernel");
            m_updateCellWeightsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMUpdateWeightsKernel", "LSTMUpdateCellWeightsKernel");

            m_updateGateWeightsKernel.SetupExecution((Owner.Input.Count + Owner.Output.Count + Owner.CellsPerBlock + 1) * Owner.MemoryBlocks);
            m_updateCellWeightsKernel.SetupExecution((Owner.Input.Count + Owner.Output.Count + 1) * Owner.CellStates.Count);
        }
Exemplo n.º 24
0
            public override void Init(int nGPU)
            {
                kerX = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\VisionMath", "SetMatrixVauleMinMaxX");
                kerX.SetupExecution(Owner.Output.Count);

                kerY = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\VisionMath", "SetMatrixVauleMinMaxY");
                kerY.SetupExecution(Owner.Output.Count);
            }
        void MyMemoryBlockObserver_TargetChanged(object sender, PropertyChangedEventArgs e)
        {
            Type type = Target.GetType().GenericTypeArguments[0];

            m_kernel       = MyKernelFactory.Instance.Kernel(@"Observers\ColorScaleObserver" + type.Name);
            m_vectorKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawVectorsKernel");
            m_rgbKernel    = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawRGBKernel");
        }
Exemplo n.º 26
0
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_forwardKernel  = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorPoolLayerKernel", "ForwardKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorPoolLayerKernel", "BackwardKernel");

            base.Initialize(nGPU);
        }
Exemplo n.º 27
0
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_forwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\SoftmaxLayerKernel", "ForwardKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\SoftmaxLayerKernel", "BackwardKernel");

            base.Initialize(nGPU);
        }
        public MyAbstractQLearningObserver()
        {
            m_kernel       = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Harm\MatrixQLearningKernel", "createTexture");
            m_vertexKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Harm\MatrixQLearningKernel", "crate3Dplot");
            m_setKernel    = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Common\SetKernel");

            TriggerReset();
        }
 public override void Init(int nGPU)
 {
     m_forwardKernel      = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\FeedForwardKernels", "FullyConnectedForwardKernel");
     m_forwardBatchKernel = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\FeedForwardKernels", "FullyConnectedForwardBatchKernel");
     m_L1TermKernel       = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\RegularizationTermKernels", "L1TermKernel");
     m_L2TermKernel       = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Layer\RegularizationTermKernels", "L2TermKernel");
     m_softmaxKernel      = MyKernelFactory.Instance.Kernel(nGPU, @"NeuralNetwork\Activation\ActivationFunction", "SoftmaxKernel");
 }
Exemplo n.º 30
0
 public override void Init(int nGPU)
 {
     m_setContextKernel = MyKernelFactory.Instance.Kernel(nGPU, @"\CWSetContext");
     m_setContextKernel.SetupExecution(Owner.HIDDEN_UNITS * Owner.NeuronGroups);
     m_setContextKernel.SetConstantVariable("D_NEURONS_PER_GROUP", Owner.NeuronsPerGroup);
     m_setContextKernel.SetConstantVariable("D_NEURON_GROUPS", Owner.NeuronGroups);
     m_setContextKernel.SetConstantVariable("D_HIDDEN_UNITS", Owner.HIDDEN_UNITS);
 }
        public override void Init(int nGPU)
        {
            m_cellWeightsRTRLPartialsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMPartialDerivativesKernel", "LSTMCellWeightsRTRLPartialsKernel");
            m_gateWeightsRTRLPartialsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"LSTM\LSTMPartialDerivativesKernel", "LSTMGateWeightsRTRLPartialsKernel");

            m_cellWeightsRTRLPartialsKernel.SetupExecution(Owner.CellWeightsRTRLPartials.Count);
            m_gateWeightsRTRLPartialsKernel.SetupExecution(Owner.InputGateWeightsRTRLPartials.Count);
        }
Exemplo n.º 32
0
 public override void Init(int nGPU)
 {
     m_normalizePositionKernel  = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\SymbolicGrid", "NormalizePositionKernel");
     m_interpolateSymbolsKernel = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\SymbolicGrid", "InterpolateSymbolsKernel");
     m_sumSymbolsKernel         = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\SymbolicGrid", "SumSymbolsKernel");
     m_sumBasicSymbolsKernel    = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\SymbolicGrid", "SumBasicSymbolsKernel");
     m_computeDistanceKernel    = MyKernelFactory.Instance.Kernel(nGPU, @"VSA\SymbolicGrid", "ComputeDistanceKernel");
 }
Exemplo n.º 33
0
            public override void Init(int nGPU)
            {
                m_kernel = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\JoinPatches", "FillAdjacencyMatrix");
                m_kernel.SetupExecution(Owner.MaskCount);

                m_kernel_resetIm = MyKernelFactory.Instance.Kernel(nGPU, @"Vision\VisionMath", "ResetImage");
                m_kernel_resetIm.SetupExecution(Owner.PatchesNum * Owner.PatchesNum);
            }
Exemplo n.º 34
0
        public MyPermutationBinder(MyWorkingNode owner, int inputSize, MyMemoryBlock <float> tempBlock)
            : base(owner, inputSize, tempBlock)
        {
            m_stream = new CudaStream();

            m_binaryPermKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "CombineTwoVectorsKernel");
            m_binaryPermKernel.SetupExecution(inputSize);
        }
Exemplo n.º 35
0
        //constructor with node parameter
        public MyMatrixObserver()
        {
            NbDecimals = 2;
            m_drawMatrixKernel = MyKernelFactory.Instance.Kernel(@"Observers\DrawMatrixKernel", true);
            m_setKernel = MyKernelFactory.Instance.Kernel(@"Common\SetKernel", true);
            m_drawMatrixKernel.SetConstantVariable("D_CHARACTER_WIDTH", MyDrawStringHelper.CharacterWidth);
            m_drawMatrixKernel.SetConstantVariable("D_CHARACTER_HEIGHT", MyDrawStringHelper.CharacterHeight);
            m_drawMatrixKernel.SetConstantVariable("D_CHARACTER_SIZE", MyDrawStringHelper.CharacterWidth * MyDrawStringHelper.CharacterHeight);
            m_characters = MyMemoryManager.Instance.GetGlobalVariable<float>("CHARACTERS_TEXTURE", MyKernelFactory.Instance.DevCount - 1, MyDrawStringHelper.LoadDigits);

            TargetChanged += MyMatrixObserver_TargetChanged;
        }
Exemplo n.º 36
0
        /// <summary>
        /// /Constructor with node parameter
        /// </summary>
        public MyTextObserver()
        {
            MaxLineLength = 80;
            MaxRows = 16;

            TextureWidth = 800;
            TextureHeight = 400;

            m_History = new List<string>();
            m_History.Add("");

            m_ClearCanvasKernel = MyKernelFactory.Instance.Kernel(@"GrowingNeuralGas\ClearCanvasKernel");

            TriggerReset();
        }
Exemplo n.º 37
0
        public MyHistogramObserver()
        {
            MAX_VALUE = 1.00f;
            MIN_VALUE = 0.00f;

            BIN_VALUE_WIDTH = 0.10f;

            BINS = (int)Math.Ceiling((double)((MAX_VALUE - MIN_VALUE) / BIN_VALUE_WIDTH)) + 2;

            BIN_PIXEL_HEIGHT = 250;
            BIN_PIXEL_WIDTH = 32;

            UPDATE_STEP = 100;

            COLOR_ONE = 0xFF00CCFF;
            COLOR_TWO = 0xFF00B8E6;
            BACKGROUND = 0xFFFFFFFF;
            OUT_OF_BOUNDS = 0xFFFF0000;

            m_computeHistogram = MyKernelFactory.Instance.Kernel(@"Observers\ComputeHistogramKernel");
            m_visualizeHistogram = MyKernelFactory.Instance.Kernel(@"Observers\VisualizeHistogramKernel");
        }
Exemplo n.º 38
0
        public MyDistanceOps(MyWorkingNode caller, DistanceOperation operations, MyMemoryBlock<float> tempBlock = null)
        {
            m_caller = caller;
            m_operations = operations;
            m_temp = tempBlock;

            if (operations.HasFlag(DistanceOperation.DotProd))
            {
                m_dotKernel = MyReductionFactory.Kernel(m_caller.GPU, MyReductionFactory.Mode.f_DotProduct_f);
            }

            if (operations.HasFlag(DistanceOperation.CosDist))
            {
                m_cosKernel = MyReductionFactory.Kernel(m_caller.GPU, MyReductionFactory.Mode.f_Cosine_f);
            }

            if (operations.HasFlag(DistanceOperation.EuclidDist) || operations.HasFlag(DistanceOperation.EuclidDistSquared))
            {
                // EuclidDist computes EuclidDistSquared first, so keep them together:
                m_operations |= DistanceOperation.EuclidDist | DistanceOperation.EuclidDistSquared;
                m_dotKernel = MyReductionFactory.Kernel(m_caller.GPU, MyReductionFactory.Mode.f_DotProduct_f);
            }

            if (operations.HasFlag(DistanceOperation.HammingDist))
            {
                m_reduceSumKernel = MyReductionFactory.Kernel(m_caller.GPU, MyReductionFactory.Mode.f_Sum_f);
            }
            if (operations.HasFlag(DistanceOperation.HammingSim))
            {
                m_reduceSumKernel = MyReductionFactory.Kernel(m_caller.GPU, MyReductionFactory.Mode.f_Sum_f);
            }

            if (operations.HasFlag(DistanceOperation.EuclidDist) || operations.HasFlag(DistanceOperation.EuclidDistSquared) ||
                operations.HasFlag(DistanceOperation.HammingDist) || operations.HasFlag(DistanceOperation.HammingSim))
            {
                m_combineVecsKernel = MyKernelFactory.Instance.Kernel(m_caller.GPU, @"Common\CombineVectorsKernel", "CombineTwoVectorsKernel");
            }
        }
Exemplo n.º 39
0
        /// <summary>
        /// Normalizes vectors along the leading dimension.
        /// </summary>
        public static void NormalizeLeadingDim(
            MyMemoryBlock<float> vectors, MyMemoryBlock<float> temp,
            int leadingDim, int otherDim,
            MyProductKernel<float> dotKernel, MyCudaKernel multKernel, int GPU)
        {
            var count = leadingDim * otherDim;

            Debug.Assert(vectors != null && temp != null, "Missing data!");
            Debug.Assert(dotKernel != null && multKernel != null, "Missing kernels.");
            Debug.Assert(leadingDim > 0 && otherDim > 0, "Negative matrix dimensions!");
            Debug.Assert(vectors.Count >= count, "Too little vectors to orthonormalize!");
            Debug.Assert(temp.Count >= Math.Max(leadingDim, otherDim), "Too little temp space!");

            multKernel.SetupExecution(leadingDim);

            for (int i = 0; i < otherDim; i++)
            {
                var seg = vectors.GetDevicePtr(GPU, i * leadingDim);
                //dotKernel.Run(temp, i, seg, seg, leadingDim, /* distributed: */ 0);
                dotKernel.outOffset = i;
                dotKernel.Run(temp, seg, seg, leadingDim);
            }

            temp.SafeCopyToHost(0, otherDim);

            for (int i = 0; i < otherDim; i++)
            {
                if (temp.Host[i] < 0.0000001f)
                    temp.Host[i] = 0;
                else
                    temp.Host[i] = (float)(1 / Math.Sqrt(temp.Host[i]));
            }

            temp.SafeCopyToDevice(0, otherDim);

            for (int i = 0; i < otherDim; i++)
            {
                var seg = vectors.GetDevicePtr(GPU, i * leadingDim);
                var len = temp.GetDevicePtr(GPU, i);
                multKernel.Run(seg, len, seg, (int)MyJoin.MyJoinOperation.Multiplication, leadingDim, 1);
            }
        }
Exemplo n.º 40
0
        /// <summary>
        /// Transforms all the vectors stored in <paramref name="vectors"/> to be pair-wise orthonormal using a modified version of the Gram-Schmidt algorithm.
        /// </summary>
        /// <param name="vectors">The vectors to orthonormalize.</param>
        /// <param name="temp">A vector of temporal space.</param>
        /// <param name="xDim">The length of each vector.</param>
        /// <param name="yDim">The number of vectors.</param>
        /// <param name="dotKernel">The kernel to compute a dot product.</param>
        /// <param name="multKernel">The kernel to compute vector combinations.</param>
        public static void OrthonormalizeVectors(MyMemoryBlock<float> vectors, MyMemoryBlock<float> temp, int xDim, int yDim, MyProductKernel<float> dotKernel, MyCudaKernel multKernel, int GPU)
        {
            int count = xDim * yDim;

            Debug.Assert(vectors != null && temp != null, "Missing data!");
            Debug.Assert(dotKernel != null && multKernel != null, "Missing a kernel!");
            Debug.Assert(xDim > 0 && yDim > 0, "Negative matrix dimensions!");
            Debug.Assert(vectors.Count >= count, "Too little vectors to orthonormalize!");
            Debug.Assert(temp.Count >= xDim, "Too little temp space!");

            multKernel.SetupExecution(xDim);

            for (int i = 0; i < count; i += xDim)
            {
                var curr = vectors.GetDevicePtr(GPU, i);

                // Normalize the current vector
                {
                    //ZXC dotKernel.Run(temp, 0, curr, curr, xDim, /* distributed: */ 0);
                    dotKernel.Run(temp, curr, curr, xDim);
                    temp.SafeCopyToDevice(0, 1);

                    if (temp.Host[0] < 0.0000001f)
                        continue;

                    temp.Host[0] = (float)(1 / Math.Sqrt(temp.Host[0]));
                    temp.SafeCopyToDevice(0, 1);

                    multKernel.Run(curr, temp, curr, (int)MyJoin.MyJoinOperation.Multiplication, xDim, 1);
                }

                // Make all the remaining vectors orthogonal to the current one
                for (int j = i + xDim; j < count; j += xDim)
                {
                    var next = vectors.GetDevicePtr(GPU, j);

                    // Compute and subtract the projection onto the current vector
                    //ZXC dotKernel.Run(temp, xDim, curr, next, xDim, /* distributed: */ 0);
                    dotKernel.outOffset = xDim;
                    dotKernel.Run(temp, curr, next, xDim);

                    multKernel.Run(curr, temp, temp, (int)MyJoin.MyJoinOperation.Multiplication, xDim, 1);
                    multKernel.Run(next, temp, next, (int)MyJoin.MyJoinOperation.Subtraction, xDim, xDim);
                }
            }
        }
Exemplo n.º 41
0
 public void Init(int nGPU)
 {
     m_RBMInputForwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMInputForwardKernel");
     m_RBMInputForwardAndStoreKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMInputForwardAndStoreKernel");
     m_RBMUpdateBiasesKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMUpdateBiasesKernel");
     m_RBMRandomActivationKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMRandomActivationKernel");
 }
Exemplo n.º 42
0
        /// <summary>
        /// Generates a matrix with <paramref name="xDim"/> being the leading dimension in column-major storage.
        /// </summary>
        /// <param name="unmanagedVectors">A memory block to store the generated matrix.
        /// Must be as large as <paramref name="xDim"/> x <paramref name="yDim"/>.</param>
        /// <param name="unmanagedBaseVectors">A temporary block to store all the base vectors.
        /// Must be as large as Max(<paramref name="xDim"/>, <paramref name="yDim"/>)^2.
        /// Only neccessary when <paramref name="mode"/> is set to <see cref="VectorGenerationMode.AverageBaseVectors"/>.</param>
        /// <param name="temp">The temporary storage. It should be as long as the longer of the dimensions.</param>
        /// <param name="random">The random object for number generation.</param>
        /// <param name="xDim">The size of the other dimension.</param>
        /// <param name="yDim">The size of the leading dimension.</param>
        /// <param name="mode">If true, the vectors along the longer dimension will be orthonormalized.</param>
        /// <param name="axisToNormalize">The axis along which to normalize vectors after orthonormalization.</param>
        public static void GenerateTransformMatrix(
            MyMemoryBlock<float> unmanagedVectors, MyMemoryBlock<float> unmanagedBaseVectors, MyMemoryBlock<float> temp,
            Random random, int xDim, int yDim,
            MyProductKernel<float> dotKernel, MyCudaKernel multKernel, MyCudaKernel transposeKernel, int GPU,
            VectorGenerationMode mode = VectorGenerationMode.Normal, AxisToNormalizeEnum axisToNormalize = AxisToNormalizeEnum.yDim)
        {
            Debug.Assert(random != null, "Missing random object");
            Debug.Assert(unmanagedVectors != null && (mode != VectorGenerationMode.AverageBaseVectors || unmanagedBaseVectors != null) && temp != null, "Missing data!");
            Debug.Assert(dotKernel != null && multKernel != null && transposeKernel != null, "Missing a kernel!");

            // Mapping to rows --- Column-major storage --- rows will the leading dimension
            // The larger dimension vectors will be orthogonal; the cols dimension vectors will be normalized

            switch (mode)
            {
                case VectorGenerationMode.Normal:
                    if (axisToNormalize == AxisToNormalizeEnum.xDim)
                    {
                        // Generate normalized vectors with xDim as the leading dim
                        GenerateRandomNormalVectors(unmanagedVectors.Host, random, xDim, yDim);
                        unmanagedVectors.SafeCopyToDevice();

                        // Transpose to the correct position
                        transposeKernel.Run(unmanagedVectors, unmanagedVectors, xDim, yDim);
                    }
                    else
                    {
                        GenerateRandomNormalVectors(unmanagedVectors.Host, random, yDim, xDim);
                        unmanagedVectors.SafeCopyToDevice();
                    }
                    break;

                case VectorGenerationMode.Orthonormalize:
                    int largerDim = Math.Max(xDim, yDim);
                    int smallerDim = Math.Min(xDim, yDim);

                    // Generate vectors with larger leading dimension
                    GenerateRandomNormalVectors(unmanagedVectors.Host, random, largerDim, smallerDim, normalize: false);
                    unmanagedVectors.SafeCopyToDevice();

                    // Orthonormalize along the larger dimension
                    OrthonormalizeVectors(unmanagedVectors, temp, largerDim, smallerDim, dotKernel, multKernel, GPU);

                    if (xDim > yDim)
                    {
                        // xDim is leading and is normalized
                        // We need to transpose to get the correct dims
                        transposeKernel.Run(unmanagedVectors, unmanagedVectors, xDim, yDim);

                        if (axisToNormalize == AxisToNormalizeEnum.yDim)
                            NormalizeLeadingDim(unmanagedVectors, temp, yDim, xDim, dotKernel, multKernel, GPU);
                    }
                    else
                    {
                        // yDim is leading and is normalized
                        // The matrix is in correct position

                        if (axisToNormalize == AxisToNormalizeEnum.xDim)
                        {
                            // TODO: generate the matrix with transposed dims?
                            // TODO: SMELLY VERSION:
                            transposeKernel.Run(unmanagedVectors, unmanagedVectors, yDim, xDim);
                            NormalizeLeadingDim(unmanagedVectors, temp, xDim, yDim, dotKernel, multKernel, GPU);
                            transposeKernel.Run(unmanagedVectors, unmanagedVectors, xDim, yDim);
                        }
                    }
                    break;

                case VectorGenerationMode.AverageBaseVectors:
                    int longerDim = Math.Max(xDim, yDim);
                    int shorterDim = Math.Min(xDim, yDim);

                    GenerateTransformMatrix(
                        unmanagedBaseVectors, null, temp,
                        random, longerDim, longerDim,
                        dotKernel, multKernel, transposeKernel, GPU,
                        VectorGenerationMode.Orthonormalize);

                    if (shorterDim == longerDim)
                        break;

                    float it = 0f;
                    float step = longerDim / (float)shorterDim;
                    int beg, end = 0;

                    for (int i = 0; i < shorterDim; i++)
                    {
                        beg = end;
                        it += step;
                        end = (int)it;

                        var vect = unmanagedVectors.GetDevicePtr(GPU, i * longerDim);

                        for (int j = beg; j < end; j++)
                        {
                            var baseVect = unmanagedBaseVectors.GetDevicePtr(GPU, j * longerDim);
                            multKernel.Run(baseVect, vect, vect, (int)MyJoin.MyJoinOperation.Addition, longerDim,
                                longerDim);
                        }
                    }

                    if (xDim > yDim)
                    {
                        // xDim is leading and is not normalized
                        // We need to transpose to get the correct dims

                        if (axisToNormalize == AxisToNormalizeEnum.xDim)
                        {
                            NormalizeLeadingDim(unmanagedVectors, temp, xDim, yDim, dotKernel, multKernel, GPU);

                            transposeKernel.Run(unmanagedVectors, unmanagedVectors, xDim, yDim);
                        }
                        else
                        {
                            transposeKernel.Run(unmanagedVectors, unmanagedVectors, xDim, yDim);

                            NormalizeLeadingDim(unmanagedVectors, temp, yDim, xDim, dotKernel, multKernel, GPU);
                        }
                    }
                    else
                    {
                        // yDim is leading and is not normalized
                        // The matrix is in correct position

                        if (axisToNormalize == AxisToNormalizeEnum.yDim)
                            NormalizeLeadingDim(unmanagedVectors, temp, yDim, xDim, dotKernel, multKernel, GPU);
                        else
                        {
                            // TODO: SMELLY VERSION:
                            transposeKernel.Run(unmanagedVectors, unmanagedVectors, yDim, xDim);
                            NormalizeLeadingDim(unmanagedVectors, temp, xDim, yDim, dotKernel, multKernel, GPU);
                            transposeKernel.Run(unmanagedVectors, unmanagedVectors, xDim, yDim);
                        }
                    }
                    break;
            }
        }
        void MyMemoryBlockObserver_TargetChanged(object sender, PropertyChangedEventArgs e)
        {
            Type type = Target.GetType().GenericTypeArguments[0];
            m_kernel = MyKernelFactory.Instance.Kernel(@"Observers\ColorScaleObserver" + type.Name);
            m_vectorKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawVectorsKernel");
            m_rgbKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawRGBKernel");

            if (!m_methodSelected)
            {
                Method = Target.Metadata.GetOrDefault(MemoryBlockMetadataKeys.RenderingMethod,
                    defaultValue: RenderingMethod.RedGreenScale);
            }

            if (!m_showCoordinatesSelected)
            {
                ShowCoordinates = Target.Metadata.GetOrDefault(MemoryBlockMetadataKeys.ShowCoordinates,
                    defaultValue: false);
            }
        }
Exemplo n.º 44
0
 public void Init(int nGPU)
 {
     m_RBMForwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMForwardKernel");
     m_RBMForwardAndStoreKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMForwardAndStoreKernel");
     m_RBMBackwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMBackwardKernel");
     m_RBMSamplePositiveKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMSamplePositiveKernel");
     m_RBMUpdateWeightsKernel  = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMUpdateWeightsKernel");
     m_RBMCopyFilterKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMCopyFilterKernel");
     m_RBMUpdateBiasesKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMUpdateBiasesKernel");
     m_RBMRandomActivationKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMRandomActivationKernel");
     m_RBMDropoutMaskKernel = MyKernelFactory.Instance.Kernel(nGPU, @"RBM\RBMKernels", "RBMDropoutMaskKernel");
 }
Exemplo n.º 45
0
 void MyMemoryBlockObserver_TargetChanged(object sender, PropertyChangedEventArgs e)
 {
     Type type = Target.GetType().GenericTypeArguments[0];
     m_kernel = MyKernelFactory.Instance.Kernel(@"Observers\ColorScaleObserver" + type.Name);
     m_vectorKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawVectorsKernel");
     m_rgbKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawRGBKernel");
 }
Exemplo n.º 46
0
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_setKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\SetKernel");

            m_RBMBackwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "BackwardKernel");
            m_RBMObserverKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "ObserverKernel");

            m_ForwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "NeuronCopyForwardKernel");
            m_ForwardAndStoreKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "NeuronCopyForwardAndStoreKernel");

            base.Initialize(nGPU);
        }
Exemplo n.º 47
0
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_setKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\SetKernel");
            m_forwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\NeuronLayerKernel", "ForwardKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\NeuronLayerKernel", "BackwardKernel");
            m_weightKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\NeuronLayerKernel", "WeightKernel");
            m_biasKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\NeuronLayerKernel", "BiasKernel");
            m_backpropKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\NeuronLayerKernel", "BackpropKernel");

            m_RBMForwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "ForwardKernel");
            m_RBMForwardAndStoreKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "ForwardAndStoreKernel");
            m_RBMBackwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "BackwardKernel");
            m_RBMSampleKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "SampleKernel");
            m_RBMUpdateWeightKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "UpdateWeightKernel");
            m_RBMUpdateBiasKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "UpdateBiasKernel");
            m_RBMObserverKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\RBMKernel", "ObserverKernel");

            base.Initialize(nGPU);
        }
Exemplo n.º 48
0
        public override void Initialize(Int32 nGPU)
        {
            m_setKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\SetKernel");

            // Use the same kernels as MyNeuronLayer
            m_forwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorNeuronLayerKernel", "ForwardKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorNeuronLayerKernel", "BackwardKernel");
            m_weightKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorNeuronLayerKernel", "WeightKernel");
            m_biasKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorNeuronLayerKernel", "BiasKernel");

            base.Initialize(nGPU);
        }
Exemplo n.º 49
0
        void MyMemoryBlockObserver_TargetChanged(object sender, PropertyChangedEventArgs e)
        {
            // TODO fix this: should be handled somewhere above (during deserialization)!
            if (Target == null)
            {
                MyLog.ERROR.WriteLine("Observer: could find the target MemoryBlock, so not opening this one.");
                return;
            }
            Type type = Target.GetType().GenericTypeArguments[0];
            m_kernel = MyKernelFactory.Instance.Kernel(@"Observers\ColorScaleObserver" + type.Name);
            m_vectorKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawVectorsKernel");
            m_rgbKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawRGBKernel");

            m_tiledKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "ColorScaleObserverTiledSingle");
            m_tiledRGBKernel = MyKernelFactory.Instance.Kernel(MyKernelFactory.Instance.DevCount - 1, @"Observers\ColorScaleObserverSingle", "DrawRGBTiledKernel");

            if (!m_methodSelected)
            {
                Method = Target.Metadata.GetOrDefault(MemoryBlockMetadataKeys.RenderingMethod,
                    defaultValue: RenderingMethod.RedGreenScale);
            }

            if (!m_showCoordinatesSelected)
            {
                ShowCoordinates = Target.Metadata.GetOrDefault(MemoryBlockMetadataKeys.ShowCoordinates,
                    defaultValue: false);
            }

            if (ObserveTensors && type.Name != "Single")
            {
                MyLog.WARNING.WriteLine("Observing tensors, with anything other than Signles not supported, will use Single");
            }
            if (Method == RenderingMethod.Vector)
            {
                MyLog.WARNING.WriteLine("Observing tensors in Vector mode not supported");
            }
        }
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_setKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\SetKernel");
            m_forwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorConvolutionLayerKernel", "ForwardKernel");
            m_forwardBiasKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorConvolutionLayerKernel", "ForwardBiasKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorConvolutionLayerKernel", "BackwardKernel");
            m_weightBiasesKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorConvolutionLayerKernel", "WeightBiasKernel");
            m_weightKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\MirrorConvolutionLayerKernel", "WeightKernel");

            base.Initialize(nGPU);
        }
Exemplo n.º 51
0
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_setKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\SetKernel");
            m_forwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\ConvolutionLayerKernel", "ForwardKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\ConvolutionLayerKernel", "BackwardKernel");
            m_weightKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\ConvolutionLayerKernel", "WeightKernel");

            base.Initialize(nGPU);

            // Routage
            uint nbMaps = (uint)FeatureInputs.Length;
            uint nbSourceOffset = m_extraOffset;
            uint featureInfoOffset = nbSourceOffset + nbMaps;
            uint sourceIndexOffset = featureInfoOffset + nbMaps;

            // Write features' number of sources
            uint extraOffset = 0;
            for (uint featureMapId = 0; featureMapId < nbMaps; featureMapId++)
            {
                uint[] featureSources = FeatureInputs[featureMapId];
                uint featureNbSources = (uint)featureSources.Length;

                // Write features' number of sources
                m_extraBlock.Host[nbSourceOffset + featureMapId] = featureNbSources;

                // Write feature info offset
                m_extraBlock.Host[featureInfoOffset + featureMapId] = extraOffset;

                for (uint inputId = 0; inputId < featureNbSources; inputId++)
                    m_extraBlock.Host[sourceIndexOffset + extraOffset + inputId] = featureSources[inputId];

                extraOffset += featureNbSources;
            }

            // Pointers
            FeatureInfos = m_extraBlock.GetDevicePtr(m_network, (int)nbSourceOffset);
        }
Exemplo n.º 52
0
 public MyXORBinder(MyWorkingNode owner, int inputSize)
     : base(owner, inputSize, null)
 {
     m_XORKernel = MyKernelFactory.Instance.Kernel(owner.GPU, @"Common\CombineVectorsKernel", "CombineTwoVectorsKernel");
     m_XORKernel.SetupExecution(inputSize);
 }
Exemplo n.º 53
0
 public MyLabelDeltaProvider(MyAbstractFeedForwardNode network, int nGPU)
     : base(network)
 {
     m_combineKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Common\CombineVectorsKernel", "CombineTwoVectorsKernel");
     m_energyKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\EnergyKernel");
 }
Exemplo n.º 54
0
            // Sets up the genetic task
            public override void Init(int nGPU)
            {
                currentGen = 0;
                m_weights = 0;

                // Load the relevant kernels
                m_coeffGenKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Genetic\CosyneGenetics", "generateCoefficients");
                m_geneticKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Genetic\CosyneGenetics", "grow");
                m_extractKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Genetic\CosyneGenetics", "extractCoeffs");
                m_cosineGenKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Genetic\CosyneGenetics", "createCosineMatrix");
                m_implantKernel = MyKernelFactory.Instance.Kernel(nGPU, @"Genetic\CosyneGenetics", "implantCoeffs");

                // Init the random generator
                m_rand = new Random();

                // Set up coefficient Generation
                m_coeffGenKernel.SetupExecution(Owner.PopulationSize);
                // Set up genetic recombination
                m_geneticKernel.SetupExecution(Owner.PopulationSize);

                // This finds the first nn group in the network. Possibility of getting a list of networks and evolving them all seperately?
                List<MyNode> ch = Owner.Owner.Network.Children;
                foreach (MyNode n in ch)
                {
                    if (n is MyNeuralNetworkGroup)
                    {
                        nn = n as MyNeuralNetworkGroup;
                        MyLog.INFO.WriteLine("Evolving the layers of node: " + nn.Name);
                        break;
                    }
                }
                if (nn == null)
                {
                    throw new NullReferenceException("There is no top level NeuralNetworkGroup.");
                }

                // Construct the layerlist which is to be read from and written to
                constructLayerList(nn);

                // This is how big the weight matrix will be
                arr_size = (int)Math.Ceiling(Math.Sqrt(m_weights));

                // Get the relevant execution plan
                m_executionPlan = Owner.Owner.SimulationHandler.Simulation.ExecutionPlan[0];

                #region MemoryBlocks
                // Initialise the population
                population = new List<MyMemoryBlock<float>>();
                outputPop = new List<MyMemoryBlock<float>>();
                for (int i = 0; i < Owner.PopulationSize; i++)
                {
                    population.Add(new MyMemoryBlock<float>());
                    population[i].Owner = Owner;
                    population[i].Count = arr_size * arr_size;
                    population[i].AllocateMemory();

                    outputPop.Add(new MyMemoryBlock<float>());
                    outputPop[i].Owner = Owner;
                    outputPop[i].Count = arr_size * arr_size;
                    outputPop[i].AllocateMemory();
                }

                // Allocate space to manipulate weight matrices on the device
                cudaMatrices = new MyMemoryBlock<float>();
                cudaMatrices.Owner = Owner;
                cudaMatrices.Count = arr_size * arr_size * Owner.PopulationSize;
                cudaMatrices.AllocateDevice();

                // Allocate a memory block for the Cosine matrix
                multiplier = new MyMemoryBlock<float>();
                multiplier.Owner = Owner;
                multiplier.Count = arr_size * arr_size;
                multiplier.AllocateDevice();

                // Fill the cosine Matrices
                m_cosineGenKernel.SetupExecution(arr_size);
                m_cosineGenKernel.Run(multiplier, arr_size);

                // Allocate space needed for chromosomes
                chromosomePop = new MyMemoryBlock<float>();
                chromosomePop.Owner = Owner;
                if (DirectEvolution)
                    chromosomePop.Count = m_weights * Owner.PopulationSize;
                else
                    chromosomePop.Count = CoefficientsSaved * Owner.PopulationSize;
                chromosomePop.AllocateMemory();

                // Allocate some space for noise to seed the cuda_rand generator
                noise = new MyMemoryBlock<float>();
                noise.Owner = Owner;
                noise.Count = Owner.PopulationSize;
                noise.AllocateMemory();

                // Write some noise to the initial array
                for (int i = 0; i < Owner.PopulationSize; i++)
                {
                    noise.Host[i] = (float)m_rand.NextDouble() * 100000 + (float)m_rand.NextDouble() * 40;
                }
                noise.SafeCopyToDevice();

                // Allocate space for the fitnesses
                fitnesses = new MyMemoryBlock<float>();
                fitnesses.Owner = Owner;
                fitnesses.Count = Owner.PopulationSize;
                fitnesses.AllocateMemory();

                // Allocate some temporary storage
                tempMB = new MyMemoryBlock<float>();
                tempPop = new MyMemoryBlock<float>();
                tempMB.Owner = Owner;
                tempMB.Count = CoefficientsSaved;
                tempMB.AllocateDevice();

                tempPop.Owner = Owner;
                tempPop.Count = arr_size * arr_size;
                tempPop.AllocateDevice();

                marking = new MyMemoryBlock<int>();
                marking.Owner = Owner;
                marking.Count = CoefficientsSaved * Owner.PopulationSize;
                marking.AllocateDevice();
                #endregion

                // Check saved Coeffs size
                if (CoefficientsSaved > m_weights)
                {
                    MyLog.WARNING.Write("Saving more Coefficients than exist in the weight matrix. Setting to max permissable value\n");
                    CoefficientsSaved = m_weights;
                }

                if (CoefficientsSaved == m_weights)
                {
                    MyLog.INFO.Write("Saving a coefficient for every weight. Evolving weights directly\n");
                    DirectEvolution = true;
                }

                if (DirectEvolution)
                    CoefficientsSaved = m_weights;

                // Generate the rest of the population
                if (DirectEvolution)
                    m_coeffGenKernel.Run(chromosomePop, CoefficientsSaved, noise, Owner.PopulationSize, WeightMagnitude);
                else
                    m_coeffGenKernel.Run(chromosomePop, CoefficientsSaved, noise, Owner.PopulationSize, Alpha);

                //Disable Backprop tasks in Network
                if (nn.GetActiveBackpropTask() != null)
                {
                    if (!nn.GetActiveBackpropTask().DisableLearning)
                    {
                        MyLog.WARNING.WriteLine("Disabling backprop learning for Neural Network");
                        nn.GetActiveBackpropTask().DisableLearning = true;
                    }
                }
            }
Exemplo n.º 55
0
        public override void Initialize(Int32 nGPU)
        {
            // Create the kernels
            m_forwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\PoolLayerKernel", "ForwardKernel");
            m_backwardKernel = MyKernelFactory.Instance.Kernel(nGPU, @"XmlFeedForwardNet\PoolLayerKernel", "BackwardKernel");

            base.Initialize(nGPU);

            // Forward
            ChosenInputPtr = ChosenInput.GetDevicePtr(m_network, ChosenInputOffset);
        }