//get summary error from connected maps
        public void get_map_error_from_convolution()
        {//W transp*sigma_prev*f_derived(ul)
            this.error = new float[outputwidth, outputheight];
            if (conv_maps_next_layer.Count > 0)
            {
                float[,] summfold = new float[outputwidth, outputheight];
                List <float[, ]>      part_folds = new List <float[, ]>();
                ConvolutionFeatureMap cur_nl_fm;
                //1) fold of next layer's maps with their transponed kernels
                for (int k = 0; k < conv_maps_next_layer.Count; k++)
                {
                    cur_nl_fm = conv_maps_next_layer[k];
                    part_folds.Add(ConvFuncs.fold_with_transponed_kernel(cur_nl_fm.error, cur_nl_fm.weights,
                                                                         cur_nl_fm.outputwidth, cur_nl_fm.outputheight, cur_nl_fm.w, cur_nl_fm.h));

                    for (int j = 0; j < outputheight; j++)
                    {
                        for (int i = 0; i < outputwidth; i++)
                        {
                            summfold[i, j] += part_folds[k][i, j];
                        }
                    }
                }

                for (int j = 0; j < outputheight; j++)
                {
                    for (int i = 0; i < outputwidth; i++)
                    {
                        error[i, j] = ActFuncs.f_act_linear_deriv(non_activated_stage[i, j]) * summfold[i, j];
                        b          += error[i, j];
                    }
                }
            }
        }
 public void get_map_error_from_subsampling(float[,] sigma_next_layer)
 {//W transp*sigma_prev*f_derived(ul)
     float[,] upsampled_next = ConvFuncs.upsample(sigma_next_layer, outputwidth, outputheight);
     for (int j = 0; j < outputheight; j++)
     {
         for (int i = 0; i < outputwidth; i++)
         {
             error[i, j] = upsampled_next[i, j] * ActFuncs.f_act_sigma_deriv(non_activated_stage[i, j]);
         }
     }
 }
 void convolution(float[,] input)
 {
     float[,] f = ConvFuncs.fold(input, weights, outputwidth, outputheight, w, h);
     for (int j = 0; j < outputheight; j++)
     {
         for (int i = 0; i < outputwidth; i++)
         {
             non_activated_stage[i, j] = f[i, j] + b;
             output[i, j] = ActFuncs.f_act_sigma(f[i, j] + b);
         }
     }
 }
Esempio n. 4
0
 public void get_output()
 {
     float[,] temp = ConvFuncs.upsample(input, outputwidth, outputheight);
     for (int j = 0; j < outputheight; j++)
     {
         for (int i = 0; i < outputwidth; i++)
         {
             non_activated_stage[i, j] = temp[i, j];
             output[i, j] = ActFuncs.f_act_linear(non_activated_stage[i, j]);
             deriv_non_activated_stage[i, j] = ActFuncs.f_act_linear_deriv(non_activated_stage[i, j]);
         }
     }
 }
 //get error from only connected next convolutional map
 public void get_map_error_from_convolution(ConvolutionFeatureMap next_l_fm)
 {//W transp*sigma_prev*f_derived(ul)
     this.error = new float[outputwidth, outputheight];
     //1) get deconvolution (back fold) of next layer's error
     float[,] summfold = ConvFuncs.back_fold(next_l_fm.error, next_l_fm.weights, next_l_fm.outputwidth, next_l_fm.outputheight, next_l_fm.w, next_l_fm.h);
     for (int j = 0; j < outputheight; j++)
     {
         for (int i = 0; i < outputwidth; i++)
         {
             error[i, j] = ActFuncs.f_act_linear_deriv(non_activated_stage[i, j]) * summfold[i, j];
             b          += error[i, j];
         }
     }
 }
 public void correct_weights()
 {
     foreach (var input in inputs)
     {
         float[,] folderr = ConvFuncs.fold_with_transponed_kernel(input, error, w, h, outputwidth, outputheight);
         for (int j = 0; j < h; j++)
         {
             for (int i = 0; i < w; i++)
             {
                 weights[i, j] += folderr[j, i];
                 b             += error[i, j];
             }
         }
     }
 }