void create_network() { imageVariable = Util.inputVariable(input_shape, "image"); transformationVariable = Util.inputVariable(extra_input_shape, "transformation"); transformedImageVariable = Util.inputVariable(input_shape, "transformed_image"); network = create_transforming_autoencoder(num_capsules, input_shape, extra_input_shape, recognizer_dim, generator_dim); Logging.log_number_of_parameters(network, show_filters: false); var mse_normalizing_factor = C.Constant.Scalar(C.DataType.Float, 1.0 / network.Output.Shape.TotalSize, computeDevice); var squared_error = CC.SquaredError(network.Output, transformedImageVariable); var mse = CC.ElementTimes(squared_error, mse_normalizing_factor); loss_function = mse; eval_function = mse; learner = CC.AdamLearner( new C.ParameterVector(network.Parameters().ToArray()), new C.TrainingParameterScheduleDouble(learning_rate * batch_size, (uint)batch_size), new C.TrainingParameterScheduleDouble(0.9), true, new C.TrainingParameterScheduleDouble(0.99)); trainer = CC.CreateTrainer(network, loss_function, new C.LearnerVector(new C.Learner[] { learner })); evaluator = CC.CreateEvaluator(eval_function); }
void create_network() { imageVariable = Util.inputVariable(input_shape, "image"); var conv1 = Layers.Convolution2D( imageVariable, 256, new int[] { 9, 9 }, computeDevice, use_padding: false, activation: CC.ReLU, name: "conv1"); var primarycaps = create_primary_cap( conv1, dim_capsule: 8, n_channels: 32, kernel_size: new int[] { 9, 9 }, strides: new int[] { 2, 2 }, pad: false); var digitcaps = create_capsule_layer( primarycaps, num_capsule: 10, dim_capsule: 16, routings: routings, name: "digitcaps"); var out_caps = get_length_and_remove_last_dimension(digitcaps, name: "capsnet"); categoricalLabel = Util.inputVariable(new int[] { 10 }, "label"); var masked_by_y = get_mask_and_infer_from_last_dimension(digitcaps, CC.Combine(new C.VariableVector() { categoricalLabel })); var masked = get_mask_and_infer_from_last_dimension(digitcaps, null); var decoder = create_decoder(masked.Output.Shape.Dimensions.ToArray()); var decoder_output_training = Model.invoke_model(decoder, new C.Variable[] { masked_by_y }); var decoder_output_evaluation = Model.invoke_model(decoder, new C.Variable[] { masked }); network = CC.Combine(new C.VariableVector() { out_caps, decoder_output_training }, "overall_training_network"); Logging.log_number_of_parameters(network); // first component of the loss var y_true = categoricalLabel; var y_pred = out_caps; var digit_loss = CC.Plus( CC.ElementTimes(y_true, CC.Square(CC.ElementMax(DC(0), CC.Minus(DC(0.9), y_pred), ""))), CC.ElementTimes(DC(0.5), CC.ElementTimes(CC.Minus(DC(1), y_true), CC.Square(CC.ElementMax(DC(0), CC.Minus(y_pred, DC(0.1)), ""))))); digit_loss = CC.ReduceSum(digit_loss, C.Axis.AllStaticAxes()); // second component of the loss var num_pixels_at_output = Util.np_prod(decoder_output_training.Output.Shape.Dimensions.ToArray()); var squared_error = CC.SquaredError(decoder_output_training, imageVariable); var image_mse = CC.ElementDivide(squared_error, DC(num_pixels_at_output)); loss_function = CC.Plus(digit_loss, CC.ElementTimes(DC(0.35), image_mse)); eval_function = CC.ClassificationError(y_pred, y_true); learner = CC.AdamLearner( new C.ParameterVector(network.Parameters().ToArray()), new C.TrainingParameterScheduleDouble(0.001 * batch_size, (uint)batch_size), new C.TrainingParameterScheduleDouble(0.9), true, new C.TrainingParameterScheduleDouble(0.99)); trainer = CC.CreateTrainer(network, loss_function, eval_function, new C.LearnerVector(new C.Learner[] { learner })); evaluator = CC.CreateEvaluator(eval_function); }