コード例 #1
0
    /// <summary>
    ///     Computes the probability of choosing an action given a stack of frames and the value function
    /// </summary>
    /// <param name="inputTensor">
    ///     Input tensor of shape HxWxM
    /// </param>

    public double[,] Predict(double[,,] inputTensor)
    {
        double[,,] dw_h_conv1 = NN_Utils.DepthWiseConv(inputTensor, W_DW_conv1, W_conv1_size[4]);
        double[,,] h_conv1    = NN_Utils.PointWiseConv(dw_h_conv1, PW_conv1);
        NN_Utils.ReLU(ref h_conv1);

        double[,,] dw_h_conv2 = NN_Utils.DepthWiseConv(h_conv1, W_DW_conv2, W_conv2_size[4]);
        double[,,] h_conv2    = NN_Utils.PointWiseConv(dw_h_conv2, PW_conv2);
        NN_Utils.ReLU(ref h_conv2);

        double[,] h_conv3_flat = NN_Utils.Flatten(h_conv2);

        double[,] h_fc1         = new double[1, W_fc1_size[1]];
        double[,] action_output = new double[1, W_fc2_size[1]];

        NN_Utils.GEMM(h_conv3_flat, W_fc_1, ref h_fc1);
        NN_Utils.ReLU(ref h_fc1);
        NN_Utils.GEMM(h_fc1, W_fc_2, ref action_output);
        NN_Utils.Softmax(ref action_output);

        Value_funtion = 0d;
        for (int i = 0; i < h_fc1.GetLength(0); i++)
        {
            for (int j = 0; j < h_fc1.GetLength(1); j++)
            {
                Value_funtion += h_fc1[i, j];
            }
        }

        return(action_output);
    }
コード例 #2
0
ファイル: GEMMDense.cs プロジェクト: gislersoft/TalesOfEtrya
    public override _Tensor[] ForwardPass(_Tensor[] inputs)
    {
        results[0] = (NN_Utils.GEMM(kernel, inputs[0])) + bias;
        activationFunction.Activate(ref results[0]);

        return(results);
    }
コード例 #3
0
    /// <summary>
    /// Initializes the Neural Network given the inputs above
    /// <para>
    ///     The DW convolution blocks are separated into 2 parts
    /// </para>
    /// <para>
    ///     DW Convolution: Given an input tensor of shape HxWxM (3D)
    ///     The DW convolution layer creates C kernels of shape KxK, resulting in a KxKxM tensor (3D)
    ///     It outputs a H'xW'xM tensor (3D)
    /// </para>
    /// <para>
    ///     Pointwise convolution: Given the output of its corresponding DW layer
    ///     The PW convolution creates N 1x1 Kernels with depth M, resulting in a 1x1xMxN Tensor (4D)
    ///     It outputs a H'xW'xN tensor (3D)
    /// </para>
    /// </summary>

    public void Init()
    {
        W_DW_conv1 = new double[W_conv1_size[0], W_conv1_size[1], W_conv1_size[2]];
        PW_conv1   = new double[1, 1, W_conv1_size[2], W_conv1_size[3]];

        W_DW_conv2 = new double[W_conv2_size[0], W_conv2_size[1], W_conv2_size[2]];
        PW_conv2   = new double[1, 1, W_conv2_size[2], W_conv2_size[3]];

        W_DW_conv3 = new double[W_conv3_size[0], W_conv3_size[1], W_conv3_size[2]];
        PW_conv3   = new double[1, 1, W_conv3_size[2], W_conv3_size[3]];

        W_fc_1 = new double[W_fc1_size[0], W_fc1_size[1]];
        W_fc_2 = new double[W_fc2_size[0], W_fc2_size[1]];

        NN_Utils.RandomInit(ref W_DW_conv1);
        NN_Utils.RandomInit(ref W_DW_conv2);
        NN_Utils.RandomInit(ref W_DW_conv3);

        NN_Utils.RandomInit(ref PW_conv1);
        NN_Utils.RandomInit(ref PW_conv2);
        NN_Utils.RandomInit(ref PW_conv3);

        NN_Utils.RandomInit(ref W_fc_1);
        NN_Utils.RandomInit(ref W_fc_2);

        gradients = 0;

        isInit = true;
    }
コード例 #4
0
    public void RunFullTest(_Tensor[] initTensors)
    {
        if (isInit)
        {
            _Tensor[] layerResult = convLayers[0].ForwardPass(initTensors);

            for (int i = 1; i < convLayers.Length; i++)
            {
                layerResult = convLayers[i].ForwardPass(layerResult);
            }

            flatten = NN_Utils.AppendToVector(layerResult);

            layerResult = denseLayers[0].ForwardPass(new _Tensor[1] {
                flatten
            });
            for (int k = 1; k < denseLayers.Length; k++)
            {
                layerResult = denseLayers[k].ForwardPass(layerResult);
            }

            layerResult[0].Shape();
            layerResult[0].Print();
        }
    }
コード例 #5
0
ファイル: ConvLayer.cs プロジェクト: gislersoft/TalesOfEtrya
    public override _Tensor[] ForwardPass(_Tensor[] inputs)
    {
        for (int i = 0; i < kernels.Length; i++)
        {
            results[i] = NN_Utils.Convolve(inputs, kernels[i], padding, stride) + bias;
        }

        return(results);
    }
コード例 #6
0
    public _Tensor ForwardPass(_Tensor[] initTensors)
    {
        if (initTensors.Length == 0)
        {
            return(new _Tensor());
        }
        if (isInit)
        {
            _Tensor[] layerResult = convLayers[0].ForwardPass(initTensors);

            for (int i = 1; i < convLayers.Length; i++)
            {
                layerResult = convLayers[i].ForwardPass(layerResult);
            }

            flatten = NN_Utils.AppendToVector(layerResult);

            layerResult = denseLayers[0].ForwardPass(new _Tensor[1] {
                flatten
            });
            for (int k = 1; k < denseLayers.Length; k++)
            {
                layerResult = denseLayers[k].ForwardPass(layerResult);
            }

            return(layerResult[0]);
        }
        else
        {
            InitializeNN(initTensors);

            _Tensor[] layerResult = convLayers[0].ForwardPass(initTensors);

            for (int i = 1; i < convLayers.Length; i++)
            {
                layerResult = convLayers[i].ForwardPass(layerResult);
            }

            flatten = NN_Utils.AppendToVector(layerResult);

            layerResult = denseLayers[0].ForwardPass(new _Tensor[1] {
                flatten
            });
            for (int k = 1; k < denseLayers.Length; k++)
            {
                layerResult = denseLayers[k].ForwardPass(layerResult);
            }

            return(layerResult[0]);
        }
    }
コード例 #7
0
    public override int GetAction()
    {
        if (UnityEngine.Random.Range(0f, 1f) < epsilon)
        {
            return(UnityEngine.Random.Range(0, Enum.GetNames(typeof(_AradisActions)).Length));
        }
        else
        {
            var input = screenInput.GetInputTensor();

            var result = threadNeuralNetwork.Predict(input);

            return(NN_Utils.Argmax(result));
        }
    }
コード例 #8
0
    public void TestInitNNDENSE(_Tensor[] initTensors)
    {
        flatten = NN_Utils.AppendToVector(initTensors);

        //Now, we start initializing the dense layers
        denseLayers[0].InitLayer(new _Tensor[1] {
            flatten
        });
        //We perform the same operations as above
        //input_i = results_(i-1)
        for (int i = 1; i < denseLayers.Length; i++)
        {
            denseLayers[i].InitLayer(denseLayers[i - 1].results);
        }

        isInit = true;
    }
コード例 #9
0
    public void RunGEMMDenseTest(_Tensor[] initTensors)
    {
        //float startTime = Time.realtimeSinceStartup;
        if (isInit)
        {
            flatten = NN_Utils.AppendToVector(initTensors);
            _Tensor[] layerResult = gemmLayers[0].ForwardPass(new _Tensor[1] {
                flatten
            });
            for (int k = 1; k < gemmLayers.Length; k++)
            {
                layerResult = gemmLayers[k].ForwardPass(layerResult);
            }
        }
        //float finishTime = Time.realtimeSinceStartup;

        //Debug.Log("GEMM Dense test time: " + (finishTime - startTime));
    }
コード例 #10
0
    public void TestInitNN(_Tensor[] initTensors)
    {
        //Start initializing the first layer
        convLayers[0].InitLayer(initTensors);
        //The next layers are initialized with the results of the last layer, since it is the input of the i-th layer
        //input_i = results_(i-1)
        for (int i = 1; i < convLayers.Length; i++)
        {
            convLayers[i].InitLayer(convLayers[i - 1].results);
        }

        int lastConvIndex = convLayers.Length - 1;

        //To start with the dense layers, we first need to flatten the 3D vector coming from the last conv layer.
        flatten = NN_Utils.AppendToVector(convLayers[lastConvIndex].results);

        //We first check if we have dense layers
        if (denseLayers.Length == 0)
        {
            denseLayers = new DenseLayer[1] {
                new DenseLayer()
            };
            denseLayers[0].activationFunction = new Softmax();
        }

        //Now, we start initializing the dense layers
        denseLayers[0].InitLayer(new _Tensor[1] {
            flatten
        });
        //We perform the same operations as above
        //input_i = results_(i-1)
        for (int i = 1; i < denseLayers.Length; i++)
        {
            denseLayers[i].InitLayer(denseLayers[i - 1].results);
        }

        isInit = true;
    }
コード例 #11
0
    // Update is called once per frame
    void Update()
    {
        if (Input.touchCount == 1)
        {
            float startTime = Time.realtimeSinceStartup;


            NN_Utils.Matmul(inputTensor, weightTensor);

            float endTime = Time.realtimeSinceStartup;

            stdTime = endTime - startTime;
        }
        if (Input.touchCount == 2)
        {
            float initTime = Time.realtimeSinceStartup;
            double[,] result = new double[1, colums];
            alglib.rmatrixgemm(1, colums, inputSize, 1, input, 0, 0, 0, weights, 0, 0, 0, 1, ref result, 0, 0);

            float endTime = Time.realtimeSinceStartup;

            gemmTime = endTime - initTime;
        }
    }