예제 #1
0
    public void TestAddTensorTensorBP()
    {
        TensorAllocator.InitDevices(ProcessorTypeEnums.CPU, new int[] { 0 });

        var graph = new ComputeGraphTensor(new WeightTensorFactory(), 0, true);

        var tensorA = new WeightTensor(new long[2] {
            2, 2
        }, 1, 0, name: "tensorA", isTrainable: true);
        var tensorB = new WeightTensor(new long[2] {
            2, 2
        }, 2, 0, name: "tensorB", isTrainable: true);

        var tensorSum = graph.Add(tensorA, tensorB);

        tensorSum.CopyWeightsToGradients(tensorSum);

        graph.Backward();


        float gA = tensorA.GetGradientAt(new long[] { 1, 1 });
        float gB = tensorB.GetGradientAt(new long[] { 1, 1, });

        Assert.IsTrue(gA == 3.0f);
        Assert.IsTrue(gB == 3.0f);
    }
예제 #2
0
    public void TestAtomicAdd()
    {
        int batchSize = 5;
        int vocabSize = 20;

        TensorAllocator.InitDevices(ProcessorTypeEnums.CPU, new int[] { 0 });
        var graph = new ComputeGraphTensor(new WeightTensorFactory(), 0, true);

        var tensorA = BuildRandomTensor(shape: new long[2] {
            batchSize, vocabSize
        }, name: "tensorA", isTrainable: true);
        var   tensorAWeights = tensorA.ToWeightArray();
        float sumA           = tensorAWeights.Sum();

        var tensorB = BuildRandomTensor(shape: new long[2] {
            batchSize, vocabSize
        }, name: "tensorB", isTrainable: true);
        var   tensorBWeights = tensorB.ToWeightArray();
        float sumB           = tensorBWeights.Sum();

        float sum = sumA + sumB;

        Ops.AtomicAdd(tensorA.TWeight, tensorB.TWeight);
        var   tensorSumWeights = tensorA.ToWeightArray();
        float sum2             = tensorSumWeights.Sum();

        Assert.IsTrue(Math.Round(sum, 5) == Math.Round(sum2, 5));
    }
예제 #3
0
    public void TestSum()
    {
        int batchSize = 5;
        int vocabSize = 20;

        TensorAllocator.InitDevices(ProcessorTypeEnums.CPU, new int[] { 0 });
        var graph = new ComputeGraphTensor(new WeightTensorFactory(), 0, true);

        var tensorA = BuildRandomTensor(shape: new long[2] {
            batchSize, vocabSize
        }, name: "tensorA", isTrainable: true);
        var   tensorAWeights = tensorA.ToWeightArray();
        float sum1           = tensorAWeights.Sum();

        var   tensorSum        = graph.Sum(tensorA, 1);
        var   tensorSumWeights = tensorSum.ToWeightArray();
        float sum2             = tensorSumWeights.Sum();


        sum1 = (float)Math.Round(sum1, 5);
        sum2 = (float)Math.Round(sum2, 5);

        Logger.WriteLine($"sum from .net core = '{sum1}', sum from sum operator = '{sum2}'");

        Assert.IsTrue(sum1 == sum2);
    }
예제 #4
0
        private IComputeGraph CreateComputGraph(int deviceIdIdx, bool needBack = true)
        {
            IComputeGraph g;

            if (m_archType == ArchTypeEnums.CPU_MKL)
            {
                g = new ComputeGraphMKL(m_weightFactory[deviceIdIdx], needBack);
            }
            else if (m_archType == ArchTypeEnums.GPU_CUDA)
            {
                g = new ComputeGraphTensor(m_weightFactory[deviceIdIdx], m_deviceIds[deviceIdIdx], needBack);
            }
            else
            {
                g = new ComputeGraph(m_weightFactory[deviceIdIdx], needBack);
            }

            return(g);
        }
예제 #5
0
    public void TestAddTensorTensor()
    {
        TensorAllocator.InitDevices(ProcessorTypeEnums.CPU, new int[] { 0 });

        var graph = new ComputeGraphTensor(new WeightTensorFactory(), 0, true);

        var tensorA = new WeightTensor(new long[2] {
            2, 2
        }, 1, 0, name: "tensorA", isTrainable: true);
        var tensorB = new WeightTensor(new long[2] {
            2, 2
        }, 2, 0, name: "tensorB", isTrainable: true);

        var tensorSum = graph.Add(tensorA, tensorB);

        float v = tensorSum.GetWeightAt(new long[] { 1, 1 });

        Assert.IsTrue(v == 3.0f);
    }
예제 #6
0
    public void TestAddSubGradients()
    {
        int batchSize = 5;
        int vocabSize = 20;

        TensorAllocator.InitDevices(ProcessorTypeEnums.CPU, new int[] { 0 });

        var graph = new ComputeGraphTensor(new WeightTensorFactory(), 0, true);

        var tensorA = new WeightTensor(new long[2] {
            batchSize, vocabSize
        }, 1, 0, name: "tensorA", isTrainable: true);
        var tensorB = new WeightTensor(new long[2] {
            batchSize, vocabSize
        }, 1, 0, name: "tensorB", isTrainable: true);
        var tensorIdx = BuildRandomLabelTensor(batchSize, vocabSize, "tensorIdx");

        var tensorANeg    = graph.Mul(tensorA, -1.0f);
        var tensorANegSum = graph.Add(tensorANeg, 100.0f);
        var tensorSub     = graph.Sub(100.0f, tensorB);

        float v1 = tensorANegSum.GetWeightAt(new long[] { 1, 1 });
        float v2 = tensorSub.GetWeightAt(new long[] { 1, 1 });

        Assert.IsTrue(v1 == v2);

        var softmax1 = graph.Softmax(tensorANegSum);
        var softmax2 = graph.Softmax(tensorSub);

        graph.CrossEntropyLoss(softmax1, tensorIdx);
        graph.CrossEntropyLoss(softmax2, tensorIdx);

        graph.Backward();

        float gA = tensorA.GetGradientAt(new long[] { 1, 1 });
        float gB = tensorB.GetGradientAt(new long[] { 1, 1, });

        Assert.IsTrue(gA == gB);
    }
예제 #7
0
    public void TestCrossEntropyLoss()
    {
        int batchSize = 5;
        int vocabSize = 20;

        TensorAllocator.InitDevices(ProcessorTypeEnums.CPU, new int[] { 0 });
        var graph = new ComputeGraphTensor(new WeightTensorFactory(), 0, true);

        var tensorA = BuildRandomTensor(shape: new long[2] {
            batchSize, vocabSize
        }, name: "tensorA", isTrainable: true);
        var tensorIdx = BuildRandomLabelTensor(batchSize, vocabSize, "tensorIdx");

        var probs = graph.Softmax(tensorA);

        float[] softmaxWeights = probs.ToWeightArray();
        graph.CrossEntropyLoss(probs, tensorIdx);

        graph.Backward();

        //Check if graidents are correct
        for (int i = 0; i < batchSize; i++)
        {
            for (int j = 0; j < vocabSize; j++)
            {
                float softmaxWeight = softmaxWeights[i * vocabSize + j];
                float tensorAGrad   = tensorA.GetGradientAt(new long[] { i, j });

                if (tensorIdx.GetWeightAt(new long[] { i, 0 }) != j)
                {
                    Assert.IsTrue(Math.Round(tensorAGrad, 5) == Math.Round(softmaxWeight, 5));
                }
                else
                {
                    Assert.IsTrue(Math.Round(tensorAGrad, 5) == Math.Round(softmaxWeight - 1.0f, 5));
                }
            }
        }
    }
예제 #8
0
        public IComputeGraph CreateSubGraph(string name)
        {
            ComputeGraphTensor subGraph = new ComputeGraphTensor(m_weightTensorFactory, m_deviceId, m_needsBackprop, m_backprop, isSubGraph: true);

            //if (m_visNeuralNetwork)
            //{
            //    // Create parameters for neural network visualization
            //    subGraph.m_opsViz = m_opsViz;
            //    subGraph.m_setEdges = m_setEdges;
            //    subGraph.m_name2SubGraph = m_name2SubGraph;
            //    if (m_name2SubGraph.ContainsKey(name) == false)
            //    {
            //        int index = name.LastIndexOf(".");
            //        subGraph.m_subGraph = new Subgraph(name)
            //        {
            //            LabelText = name.Substring(index + 1)
            //        };

            //        m_name2SubGraph.Add(name, subGraph.m_subGraph);

            //        if (m_subGraph == null)
            //        {
            //            m_opsViz.RootSubgraph.AddSubgraph(subGraph.m_subGraph);
            //        }
            //        else
            //        {
            //            m_subGraph.AddSubgraph(subGraph.m_subGraph);
            //        }
            //    }
            //    else
            //    {
            //        subGraph.m_subGraph = m_name2SubGraph[name];
            //    }
            //}

            return(subGraph);
        }