public void Initialize()
        {
            GraphEnvironment.SetBaseUri("http://localhost:7474/");

            ModelBuilder.Add(new OrderConfiguration());
            ModelBuilder.Add(new OrderItemConfiguration());
            ModelBuilder.Add(new ProductConfiguration());
        }
Esempio n. 2
0
 public void Initialize()
 {
     GraphEnvironment.SetBaseUri("http://localhost:7474/");
     ModelBuilder.Add(new MyClassConfig());
     ModelBuilder.Add(new ManagerConfig());
     //Add the assemblies to the builder. We should pass Assembly path and Assmebly Name.
     ModelBuilder.AddAssembly((new System.Uri(System.Reflection.Assembly.GetExecutingAssembly().CodeBase)).AbsolutePath);
 }
Esempio n. 3
0
        public ActionResult New(Applicant applicant)
        {
            if (!ModelState.IsValid)
            {
                return(View());
            }

            ModelBuilder.Add(applicant);
            return(RedirectToAction("List"));
        }
Esempio n. 4
0
    public Model BuildMLAgentsModel
    (
        int hidden_features = 256,
        int hidden_layers   = 3,
        int in_features     = 2,
        int out_features    = 1,
        float omega_0       = 30.0f,
        string input_name   = "vector_observation",
        string output_name  = "continuous_actions")
    {
        var layerCount = weight.Count;

        // build model
        var net = new ModelBuilder();
        var ctx = new Stack <object>();

        var Omega0    = net.Const("omega_0", new Tensor(1, 1, new float[] { omega_0 }));
        var N__C2C__N = new[] { 3, 1, 2, 0 }; // transpose weights by swapping N and C channels

        object x = net.Input(input_name, 1, in_features);

        // forward
        for (int i = 0; i < layerCount; ++i)
        {
            var isLastLayer = i == layerCount - 1;
            var w_name = weight[i].name; var b_name = bias[i].name;
            var w = net.Const(w_name, parameters[w_name]);
            var b = net.Const(b_name, parameters[b_name]);

            ctx.Push(x);
            x = net.MatMul($"mm{i}", x, net.Transpose($"{w}.T", w, N__C2C__N));
            x = net.Add($"bias{i}", new[] { x, b });
            if (!isLastLayer)
            {
                ctx.Push(x);
                x = net.Mul($"sin_premul{i}_omega", new[] { x, Omega0 });
                x = net.Sin($"sin{i}", x);
            }
        }
        object output = x;

        net.Output(net.Identity(output_name, output));
        net.Output(net.Const("version_number", new Tensor(1, 1, new float[] { 2.0f })));
        net.Output(net.Const("continuous_action_output_shape", new Tensor(1, 1, new float[] { out_features })));
        net.Output(net.Const("memory_size", new Tensor(1, 1, new float[] { 0 })));

        return(net.model);
    }
Esempio n. 5
0
        public static bool ConvertLSTM(Layer layer, ModelBuilder net, IOps ops)
        {
            // LSTM
            // TODO need to transpose before when dealing with batches?
            var transposedInput = net.Transpose($"Transpose_for_{layer.name}", layer.inputs[0], new[] { 3, 1, 2, 0 });

            layer.inputs[0] = transposedInput.name;

            //    - it = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
            //    - ft = f(Xt*Wf + Ht_1*Rf + Wbf + Rbf)
            //    - ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc), c means j in our formula
            //    - Ct =   ft . Ct_  + it . ct
            //    - ot = f(Xt*Wo + Ht_1*Ro + Wbo + Rbo)
            //    - Ht =   ot . h(Ct)

            var W = layer.DataSetToTensor(0);
            var R = layer.DataSetToTensor(1);
            var B = layer.DataSetToTensor(2);

            // gate order [iofj]

            var w_i = ops.StridedSlice(W, new[] { 0, 0, 0, 0 }, new[] { W.batch, 1, 1, W.channels / 4 }, new[] { 1, 1, 1, 1 });
            var w_o = ops.StridedSlice(W, new[] { 0, 0, 0, W.channels / 4 }, new[] { W.batch, 1, 1, 2 * W.channels / 4 }, new[] { 1, 1, 1, 1 });
            var w_f = ops.StridedSlice(W, new[] { 0, 0, 0, 2 * W.channels / 4 }, new[] { W.batch, 1, 1, 3 * W.channels / 4 }, new[] { 1, 1, 1, 1 });
            var w_j = ops.StridedSlice(W, new[] { 0, 0, 0, 3 * W.channels / 4 }, new[] { W.batch, 1, 1, 4 * W.channels / 4 }, new[] { 1, 1, 1, 1 });

            var r_i = ops.StridedSlice(R, new[] { 0, 0, 0, 0 }, new[] { R.batch, 1, 1, R.channels / 4 }, new[] { 1, 1, 1, 1 });
            var r_o = ops.StridedSlice(R, new[] { 0, 0, 0, R.channels / 4 }, new[] { R.batch, 1, 1, 2 * R.channels / 4 }, new[] { 1, 1, 1, 1 });
            var r_f = ops.StridedSlice(R, new[] { 0, 0, 0, 2 * R.channels / 4 }, new[] { R.batch, 1, 1, 3 * R.channels / 4 }, new[] { 1, 1, 1, 1 });
            var r_j = ops.StridedSlice(R, new[] { 0, 0, 0, 3 * R.channels / 4 }, new[] { R.batch, 1, 1, 4 * R.channels / 4 }, new[] { 1, 1, 1, 1 });

            var wb_i = ops.StridedSlice(B, new[] { 0, 0, 0, 0 }, new[] { 1, 1, 1, B.channels / 8 }, new[] { 1, 1, 1, 1 });
            var wb_o = ops.StridedSlice(B, new[] { 0, 0, 0, B.channels / 8 }, new[] { 1, 1, 1, 2 * B.channels / 8 }, new[] { 1, 1, 1, 1 });
            var wb_f = ops.StridedSlice(B, new[] { 0, 0, 0, 2 * B.channels / 8 }, new[] { 1, 1, 1, 3 * B.channels / 8 }, new[] { 1, 1, 1, 1 });
            var wb_j = ops.StridedSlice(B, new[] { 0, 0, 0, 3 * B.channels / 8 }, new[] { 1, 1, 1, 4 * B.channels / 8 }, new[] { 1, 1, 1, 1 });

            var rb_i = ops.StridedSlice(B, new[] { 0, 0, 0, 4 * B.channels / 8 }, new[] { 1, 1, 1, 5 * B.channels / 8 }, new[] { 1, 1, 1, 1 });
            var rb_o = ops.StridedSlice(B, new[] { 0, 0, 0, 5 * B.channels / 8 }, new[] { 1, 1, 1, 6 * B.channels / 8 }, new[] { 1, 1, 1, 1 });
            var rb_f = ops.StridedSlice(B, new[] { 0, 0, 0, 6 * B.channels / 8 }, new[] { 1, 1, 1, 7 * B.channels / 8 }, new[] { 1, 1, 1, 1 });
            var rb_j = ops.StridedSlice(B, new[] { 0, 0, 0, 7 * B.channels / 8 }, new[] { 1, 1, 1, 8 * B.channels / 8 }, new[] { 1, 1, 1, 1 });


            var memSize = r_i.flatHeight;

            var baseLSTMName = layer.outputs[3];
            var initial_h    = $"{baseLSTMName}_h";
            var initial_c    = $"{baseLSTMName}_c";

            var baseLSTMOutputName = layer.outputs[4];
            var output_h           = $"{baseLSTMOutputName}_h";
            var output_c           = $"{baseLSTMOutputName}_c";

            var i_mad_w = net.Dense($"{layer.name}_bc_i_mad_w", layer.inputs[0], w_i, wb_i);
            var i_mad_r = net.Dense($"{layer.name}_bc_i_mad_r", initial_h, r_i, rb_i);
            var i_mad   = net.Add($"{layer.name}_bc_i_mad", new[] { i_mad_w, i_mad_r });

            var j_mad_w = net.Dense($"{layer.name}_bc_j_mad_w", layer.inputs[0], w_j, wb_j);
            var j_mad_r = net.Dense($"{layer.name}_bc_j_mad_r", initial_h, r_j, rb_j);
            var j_mad   = net.Add($"{layer.name}_bc_j_mad", new[] { j_mad_w, j_mad_r });

            var f_mad_w = net.Dense($"{layer.name}_bc_f_mad_w", layer.inputs[0], w_f, wb_f);
            var f_mad_r = net.Dense($"{layer.name}_bc_f_mad_r", initial_h, r_f, rb_f);
            var f_mad   = net.Add($"{layer.name}_bc_f_mad", new[] { f_mad_w, f_mad_r });

            var o_mad_w = net.Dense($"{layer.name}_bc_o_mad_w", layer.inputs[0], w_o, wb_o);
            var o_mad_r = net.Dense($"{layer.name}_bc_o_mad_r", initial_h, r_o, rb_o);
            var o_mad   = net.Add($"{layer.name}_bc_o_mad", new[] { o_mad_w, o_mad_r });

            var i = net.Sigmoid($"{layer.name}_bc_i_sigmoid", i_mad);
            var j = net.Tanh($"{layer.name}_bc_j_tanh", j_mad);
            var f = net.Sigmoid($"{layer.name}_bc_f_sigmoid", f_mad);
            var o = net.Sigmoid($"{layer.name}_bc_o_sigmoid", o_mad);

            var state_c_mul  = net.Mul($"{layer.name}_bc_state_c_mul", new[] { initial_c, f.name });
            var i_j_mul      = net.Mul($"{layer.name}_bc_i_j_mul", new[] { i, j });
            var state_c      = net.Add(output_c, new[] { state_c_mul, i_j_mul });
            var state_c_tanh = net.Tanh($"{layer.name}_bc_state_c_tanh", state_c);
            var state_h      = net.Mul(output_h, new[] { o, state_c_tanh });

            net.Identity(layer.outputs[0], state_h);
            net.Identity(layer.outputs[1], state_h);
            net.Identity(layer.outputs[2], state_c);

            net.Memory(initial_c, state_c, new TensorShape(-1, 1, 1, memSize));
            net.Memory(initial_h, state_h, new TensorShape(-1, 1, 1, memSize));

            return(false);
        }
 protected override void OnModelCreating(ModelBuilder modelBuilder)
 {
     modelBuilder.Add();
 }
Esempio n. 7
0
    public SirenModel(
        int batch,
        bool biasOutputAndTarget = false,
        int hidden_features      = 256,
        int hidden_layers        = 3,
        int in_features          = 2,
        int out_features         = 1,
        float omega_0            = 30.0f,
        string input_name        = "input",
        string output_name       = "output",
        bool useAdam             = true,
        bool trainableBias       = true)
    {
        // model tensors
        weight.Add(new Tensor(hidden_features, in_features));
        bias.Add(new Tensor(1, hidden_features));
        for (int i = 0; i < hidden_layers; ++i)
        {
            weight.Add(new Tensor(hidden_features, hidden_features));
            bias.Add(new Tensor(1, hidden_features));
        }
        weight.Add(new Tensor(out_features, hidden_features));
        bias.Add(new Tensor(1, out_features));
        var layerCount = weight.Count;

        // initialize tensors
        {
            var w = weight[0];
            var b = bias[0];
            InitUniform(ref w, -1.0f / in_features, 1.0f / in_features);
            InitUniform(ref b, -1.0f / in_features, 1.0f / in_features);
        }
        for (int i = 1; i < layerCount; ++i)
        {
            var w = weight[i]; var b = bias[i];
            InitUniform(ref w, -Mathf.Sqrt(6.0f / hidden_features) / omega_0, Mathf.Sqrt(6.0f / hidden_features) / omega_0);
            InitUniform(ref b, -Mathf.Sqrt(6.0f / hidden_features) / omega_0, Mathf.Sqrt(6.0f / hidden_features) / omega_0);
        }

        // build model
        var net = new ModelBuilder();
        var ctx = new Stack <object>();

        // setup tensors as trainable parameters, constants & inputs
        AddToTrainableParametersAndAssignIndexedName(net, weight, "w");
        AddToTrainableParametersAndAssignIndexedName(net, bias, "b");
        if (useAdam)
        {
            beta1t = new Tensor(new TensorShape(1, 1), new float[1] {
                1
            });
            beta2t = new Tensor(new TensorShape(1, 1), new float[1] {
                1
            });
            moment_w   = CreateMoments(weight);
            moment_b   = CreateMoments(bias);
            velocity_w = CreateMoments(weight);
            velocity_b = CreateMoments(bias);
            AddToTrainableParametersAndAssignIndexedName(net, beta1t, "beta1t");
            AddToTrainableParametersAndAssignIndexedName(net, beta2t, "beta2t");
            AddToTrainableParametersAndAssignIndexedName(net, moment_w, "moment_w");
            AddToTrainableParametersAndAssignIndexedName(net, velocity_w, "velocity_w");

            if (trainableBias)
            {
                AddToTrainableParametersAndAssignIndexedName(net, moment_b, "moment_b");
                AddToTrainableParametersAndAssignIndexedName(net, velocity_b, "velocity_b");
            }
        }

        var Omega0         = net.Const("omega_0", new Tensor(1, 1, new float[] { omega_0 }));
        var One            = net.Const("one", new Tensor(1, 1, new float[] { 1f }));
        var Double         = net.Const("two", new Tensor(1, 1, new float[] { 2f }));
        var Half           = net.Const("half", new Tensor(1, 1, new float[] { 0.5f }));
        var InvBatchDouble = net.Const("invBatchDouble", new Tensor(1, 1, new float[] { 2.0f / batch }));
        var Batch          = net.Const("batch", new Tensor(1, 1, new float[] { batch }));
        var N__C2C__N      = new[] { 3, 1, 2, 0 }; // transpose weights by swapping N and C channels

        object lr      = net.Input("lr", 1, 1);
        object beta1   = net.Input("beta1", 1, 1);
        object beta2   = net.Input("beta2", 1, 1);
        object epsilon = net.Input("epsilon", 1, 1);
        object x       = net.Input(input_name, batch, in_features);

        // forward
        for (int i = 0; i < layerCount; ++i)
        {
            var isLastLayer = i == layerCount - 1;
            var w = weight[i].name; var b = bias[i].name;

            ctx.Push(x);
            x = net.MatMul($"mm{i}", x, net.Transpose($"{w}.T", w, N__C2C__N));
            x = net.Add($"bias{i}", new[] { x, b });
            if (!isLastLayer)
            {
                ctx.Push(x);
                x = net.Mul($"sin_premul{i}_omega", new[] { x, Omega0 });
                x = net.Sin($"sin{i}", x);
            }
        }
        object output = x;
        object target = net.Input("target", batch, out_features);

        if (biasOutputAndTarget)
        {
            output = net.Mul($"output_mul_0.5", new[] { output, Half });
            output = net.Add($"output_add_0.5", new[] { output, Half });

            target = net.Mul($"target_mul_2", new[] { target, Double });
            target = net.Sub($"target_sub_1", new[] { target, One });
        }
        net.Output(net.Identity(output_name, output));

        // loss
        var error = net.Sub("error", new[] { x, target });

        net.Output(net.Reduce(Layer.Type.ReduceMean, "loss",
                              net.Mul("error_sq", new[] { error, error }), axis: 0));
        object grad_output = net.Mul("loss_grad", new[] { error, InvBatchDouble });

        // backward
        if (useAdam)
        {
            object b1t      = beta1t.name;
            object beta1tp1 = net.Mul($"new_{b1t}", new[] { beta1, b1t });
            net.Output(beta1tp1);
            //object nbeta1tp1 = net.Sub("nbeta1tp1", new[] { One, beta1tp1 });

            object b2t      = beta2t.name;
            object beta2tp1 = net.Mul($"new_{b2t}", new[] { beta2, b2t });
            net.Output(beta2tp1);
            //object nbeta2tp1 = net.Sub("nbeta2tp1", new[] { One, beta2tp1 });

            object nbeta1t = net.Sub("nbeta1t", new[] { One, beta1tp1 });
            object nbeta2t = net.Sub("nbeta2t", new[] { One, beta2tp1 });
            object nbeta1  = net.Sub("nbeta1", new[] { One, beta1 });
            object nbeta2  = net.Sub("nbeta2", new[] { One, beta2 });

            for (int i = layerCount - 1; i >= 0; --i)
            {
                var isLastLayer = i == layerCount - 1;
                var w = weight[i].name; var b = bias[i].name;

                if (!isLastLayer)
                {
                    var input = ctx.Pop();
                    input       = net.Mul($"sin_grad_premul{i}_omega", new[] { input, Omega0 });
                    input       = net.Cos($"sin_grad_cos{i}", input);
                    grad_output =
                        net.Mul($"sin_grad{i}", new[] { grad_output, input, Omega0 });
                }

                // weights
                object grad_w = net.MatMul($"grad_{w}",
                                           net.Transpose($"grad_output{i}.T", grad_output, N__C2C__N), ctx.Pop());
                object grad_w2 = net.Mul($"grad_{w}2", new[] { grad_w, grad_w });

                object mom_w = moment_w[i].name;
                mom_w  = net.Mul($"m_moment0_{w}", new[] { beta1, mom_w });
                grad_w = net.Mul($"m_grad0_{grad_w}", new[] { nbeta1, grad_w });
                mom_w  = net.Add($"new_moment_{w}", new[] { mom_w, grad_w });
                net.Output(mom_w);

                mom_w = net.Div($"m_moment1_{w}", new[] { mom_w, nbeta1t });

                object vel_w = velocity_w[i].name;
                vel_w   = net.Mul($"m_velocity0_{w}", new[] { beta2, vel_w });
                grad_w2 = net.Mul($"m_grad0_{grad_w2}", new[] { nbeta2, grad_w2 });
                vel_w   = net.Add($"new_velocity_{w}", new[] { vel_w, grad_w2 });
                net.Output(vel_w);

                vel_w = net.Div($"m_velocity1_{w}", new[] { vel_w, nbeta2t });
                vel_w = net.Sqrt($"m_velocity2_{w}", vel_w);
                vel_w = net.Add($"m_velocity3_{w}", new[] { vel_w, epsilon });

                object etaw = net.Div($"etaw0_{w}", new[] { mom_w, vel_w });
                etaw = net.Mul($"etaw1_{w}", new[] { lr, etaw });
                net.Output(net.Sub($"new_{w}", new[] { w, etaw }));


                if (trainableBias)
                {
                    // bias
                    object grad_b  = net.Reduce(Layer.Type.ReduceSum, $"grad_{b}", grad_output, axis: 0);
                    object grad_b2 = net.Mul($"grad_{b}2", new[] { grad_b, grad_b });


                    object mom_b = moment_b[i].name;
                    mom_b  = net.Mul($"m_moment0_{b}", new[] { beta1, mom_b });
                    grad_b = net.Mul($"m_grad0_{grad_b}", new[] { nbeta1, grad_b });
                    mom_b  = net.Add($"new_moment_{b}", new[] { mom_b, grad_b });
                    net.Output(mom_b);

                    mom_b = net.Div($"m_moment1_{b}", new[] { mom_b, nbeta1t });

                    object vel_b = velocity_b[i].name;
                    vel_b   = net.Mul($"m_velocity0_{b}", new[] { beta2, vel_b });
                    grad_b2 = net.Mul($"m_grad0_{grad_b2}", new[] { nbeta2, grad_b2 });
                    vel_b   = net.Add($"new_velocity_{b}", new[] { vel_b, grad_b2 });
                    net.Output(vel_b);

                    vel_b = net.Div($"m_velocity1_{b}", new[] { vel_b, nbeta2t });
                    vel_b = net.Sqrt($"m_velocity2_{b}", vel_b);
                    vel_b = net.Add($"m_velocity3_{b}", new[] { vel_b, epsilon });

                    object etab = net.Div($"etab0_{b}", new[] { mom_b, vel_b });
                    etab = net.Mul($"etab1_{b}", new[] { lr, etab });

                    net.Output(net.Sub($"new_{b}", new[] { b, etab }));
                }
                else
                {
                    net.Output(net.Identity($"new_{b}", b));
                }

                if (i > 0)
                {
                    grad_output = net.MatMul($"grad_output{i - 1}", grad_output, w);
                }
            }
        }
        else
        {
            for (int i = layerCount - 1; i >= 0; --i)
            {
                var isLastLayer = i == layerCount - 1;
                var w = weight[i].name; var b = bias[i].name;

                if (!isLastLayer)
                {
                    var input = ctx.Pop();
                    input       = net.Mul($"sin_grad_premul{i}_omega", new[] { input, Omega0 });
                    input       = net.Cos($"sin_grad_cos{i}", input);
                    grad_output =
                        net.Mul($"sin_grad{i}", new[] { grad_output, input, Omega0 });
                }

                object grad_w = net.MatMul($"grad_{w}",
                                           net.Transpose($"grad_output{i}.T", grad_output, N__C2C__N), ctx.Pop());
                grad_w = net.Mul($"lr_grad_{w}", new[] { lr, grad_w });
                net.Output(net.Sub($"new_{w}", new[] { w, grad_w }));

                if (trainableBias)
                {
                    object grad_b = net.Reduce(Layer.Type.ReduceSum, $"grad_{b}", grad_output, axis: 0);
                    grad_b = net.Mul($"lr_grad_{b}", new[] { lr, grad_b });
                    net.Output(net.Sub($"new_{b}", new[] { b, grad_b }));
                }
                else
                {
                    net.Output(net.Identity($"new_{b}", b));
                }

                if (i > 0)
                {
                    grad_output = net.MatMul($"grad_output{i - 1}", grad_output, w);
                }
            }
        }

        model = net.model;
        Debug.Log(model);
    }