public DeconvolutionFeatureMap(float[,] input, ConvolutionFeatureMap base_conv_fm) { this.w = base_conv_fm.w; this.h = base_conv_fm.h; //boundaries effect this.outputwidth = base_conv_fm.w + base_conv_fm.outputwidth - 1; this.outputheight = base_conv_fm.h + base_conv_fm.outputheight - 1; this.weights = new float[w, h]; this.input = input; this.weights = base_conv_fm.weights; this.output = new float[outputwidth, outputheight]; this.non_activated_stage = new float[outputwidth, outputheight]; this.deriv_non_activated_stage = new float[outputwidth, outputheight]; }
//get error from only connected next convolutional map public void get_map_error_from_convolution(ConvolutionFeatureMap next_l_fm) {//W transp*sigma_prev*f_derived(ul) this.error = new float[outputwidth, outputheight]; //1) get deconvolution (back fold) of next layer's error float[,] summfold = ConvFuncs.back_fold(next_l_fm.error, next_l_fm.weights, next_l_fm.outputwidth, next_l_fm.outputheight, next_l_fm.w, next_l_fm.h); for (int j = 0; j < outputheight; j++) { for (int i = 0; i < outputwidth; i++) { error[i, j] = ActFuncs.f_act_linear_deriv(non_activated_stage[i, j]) * summfold[i, j]; b += error[i, j]; } } }
public ConvolutionLayer(ConvolutionLayer prev_convolution, int k_w, int k_h) { //default value this.next_l_type = "convolution"; this.feature_maps_number = prev_convolution.feature_maps_number; this.bj = new float[this.feature_maps_number]; //bj=1 initially for (int j = 0; j < this.feature_maps_number; j++) { bj[j] = 0.01f; } this.feature_maps = new List <ConvolutionFeatureMap>(); this.errors = new List <float[, ]>(); this.kwidth = k_w; this.kheight = k_h; //if filter<map =>"valid" bounadary mode, else - full-mode this.map_width = prev_convolution.map_width; this.map_height = prev_convolution.map_height; if (this.map_width > kwidth) { this.map_width = -(kwidth - 1); } if (this.map_height > kheight) { this.map_height -= (kheight - 1); } //create kernels for (int k = 0; k < feature_maps_number; k++) { ConvolutionFeatureMap fm = new ConvolutionFeatureMap(kwidth, kheight, map_width, map_height); MatrixOperations.init_matrix_random(fm.weights, kwidth, kheight); feature_maps.Add(fm); errors.Add(new float[map_width, map_height]); } //link with previous layer //all-to-all connection for (int i = 0; i < this.feature_maps_number; i++) { //link prev layers output this.feature_maps[i].add_input_full_connection(prev_convolution.feature_maps[i].output); //link this error input path } }
public ConvolutionLayer(int f_maps_number, int k_w, int k_h, int map_w, int map_h) { //default value this.next_l_type = "subsampling"; this.feature_maps_number = f_maps_number; this.bj = new float[f_maps_number]; //bj=1 initially for (int j = 0; j < f_maps_number; j++) { bj[j] = 0.01f; } this.feature_maps = new List <ConvolutionFeatureMap>(); this.errors = new List <float[, ]>(); this.kwidth = k_w; this.kheight = k_h; //if filter<map =>"valid" bounadary mode, else - full-mode if (map_w > kwidth) { this.map_width = map_w - kwidth + 1; } if (map_h > kheight) { this.map_height = map_h - kheight + 1; } //create kernels for (int k = 0; k < feature_maps_number; k++) { ConvolutionFeatureMap fm = new ConvolutionFeatureMap(kwidth, kheight, map_width, map_height); MatrixOperations.init_matrix_random(fm.weights, kwidth, kheight); feature_maps.Add(fm); errors.Add(new float[map_width, map_height]); } }