//Kernel initialization
        public override void Init(int nGPU)
        {
            layers = new List <MyAbstractLayer>();

            if (Owner.SortedChildren == null)
            {
                Owner.InitGroup.Execute(); // TODO - horrible hack
            }
            foreach (var i in Owner.SortedChildren)
            {
                if (i is MyRBMInputLayer || i is MyRBMLayer)
                {
                    layers.Add((MyAbstractLayer)i);
                    if (i is MyRBMInputLayer)
                    {
                        ((MyRBMInputLayer)i).Init(nGPU);
                    }
                    else
                    {
                        ((MyRBMLayer)i).Init(nGPU);
                    }
                }
            }

            InputLayer = (MyRBMInputLayer)layers[0];

            step = 0;
        }
        //Kernel initialization
        public override void Init(int nGPU)
        {
            layers = new List<MyAbstractLayer>();

            if (Owner.SortedChildren == null)
                Owner.InitGroup.Execute(); // TODO - horrible hack

            foreach (var i in Owner.SortedChildren)
            {
                if (i is MyRBMInputLayer || i is MyRBMLayer)
                {
                    layers.Add((MyAbstractLayer)i);
                    if (i is MyRBMInputLayer)
                        ((MyRBMInputLayer)i).Init(nGPU);
                    else
                        ((MyRBMLayer)i).Init(nGPU);
                }
            }

            InputLayer = (MyRBMInputLayer)layers[0];

            step = 0;
        }
        //Task execution
        public override void Execute()
        {
            // sampling between input and hidden layer
            if (CurrentLayerIndex <= 0)
            {
                InputLayer.RBMInputForwardAndStore();

                MyRBMInputLayer visible = InputLayer;
                hidden = (MyRBMLayer)layers[CurrentLayerIndex + 1];

                if (hidden.Dropout > 0)
                {
                    hidden.CreateDropoutMask();
                }

                // forward to visible and hidden layers and store for sampling biases
                hidden.RBMForwardAndStore(SigmoidSteepness);

                // sample positive weight data
                hidden.RBMSamplePositive();

                if (RandomHidden)
                {
                    hidden.RBMRandomActivation();
                }

                // do k-step Contrastive Divergence (go back and forth between visible and hidden)
                for (int i = 0; i < CD_k - 1; i++)
                {
                    // back
                    hidden.RBMBackward(visible.Bias, SigmoidSteepness);


                    if (RandomVisible)
                    {
                        visible.RBMRandomActivation();
                    }

                    // and forth
                    hidden.RBMForward(SigmoidSteepness, true);

                    // randomly activate the just updated hidden neurons if needed
                    if (RandomHidden)
                    {
                        hidden.RBMRandomActivation();
                    }
                }

                // in the last (= k-th) step of CD-k, we use probabilistic modeling -> we sample both with probability, not random activation
                hidden.RBMBackward(visible.Bias, SigmoidSteepness);
                hidden.RBMForward(SigmoidSteepness, true);

                if (hidden.StoreEnergy)
                {
                    hidden.Energy.Fill(0);
                }

                // update biases of both layers based on the sampled data stored by RBMForwardAndStore()
                visible.RBMUpdateBiases(LearningRate, Momentum, WeightDecay);
                hidden.RBMUpdateBiases(LearningRate, Momentum, WeightDecay);

                // sample negative weight data AND adapt weights at the same time
                hidden.RBMUpdateWeights(LearningRate, Momentum, WeightDecay);
            }

            // sampling between hidden layers

            else if (layers.Count > 2) // if number of  hidden layers is greater than 1)
            {
                InputLayer.RBMInputForward();

                // hidden layers, up to the one that will be visible (excluding)
                for (int i = 1; i < CurrentLayerIndex; i++)
                {
                    ((MyRBMLayer)layers[i]).RBMForward(SigmoidSteepness, false);
                    if (RandomPrevious)
                    {
                        ((MyRBMLayer)layers[i]).RBMRandomActivation();
                    }
                }

                MyRBMLayer visible = (MyRBMLayer)layers[CurrentLayerIndex];
                hidden = (MyRBMLayer)layers[CurrentLayerIndex + 1];

                if (hidden.Dropout > 0)
                {
                    hidden.CreateDropoutMask();
                }

                // forward to visible and hidden layers and store for sampling biases
                visible.RBMForwardAndStore(SigmoidSteepness);
                hidden.RBMForwardAndStore(SigmoidSteepness);

                // sample positive weight data
                hidden.RBMSamplePositive();

                if (RandomHidden)
                {
                    hidden.RBMRandomActivation();
                }

                // do k-step Contrastive Divergence (go back and forth between visible and hidden)
                for (int i = 0; i < CD_k - 1; i++)
                {
                    // back
                    hidden.RBMBackward(visible.Bias, SigmoidSteepness);


                    if (RandomVisible)
                    {
                        visible.RBMRandomActivation();
                    }

                    // and forth
                    hidden.RBMForward(SigmoidSteepness, true);

                    // randomly activate the just updated hidden neurons if needed
                    if (RandomHidden)
                    {
                        hidden.RBMRandomActivation();
                    }
                }

                // in last (= k-th) step of CD-k, we use probabilistic modeling -> we sample both with probability, not random activation
                hidden.RBMBackward(visible.Bias, SigmoidSteepness);
                hidden.RBMForward(SigmoidSteepness, true);

                if (hidden.StoreEnergy)
                {
                    hidden.Energy.Fill(0);
                }

                // update biases of both layers based on the sampled data stored by RBMForwardAndStore()
                visible.RBMUpdateBiases(LearningRate, Momentum, WeightDecay);
                hidden.RBMUpdateBiases(LearningRate, Momentum, WeightDecay);

                // sample negative weight data AND adapt weights at the same time
                hidden.RBMUpdateWeights(LearningRate, Momentum, WeightDecay);
            }
            else
            {
                MyLog.ERROR.WriteLine("Wrong CurrentLayerIndex parameter. There are " + layers.Count + " total layers, can't sample from " + CurrentLayerIndex);
            }


            MyLog.DEBUG.WriteLine("RBM initialization between layers [" + CurrentLayerIndex + ";" + (CurrentLayerIndex + 1) + "], step " + step + ".");
            ++step;
        }