Example #1
0
        public void GenerateVectorObservation()
        {
            var inputTensor = new TensorProxy
            {
                shape = new long[] { 2, 3 }
            };
            const int batchSize  = 4;
            var       agentInfos = GetFakeAgents();
            var       alloc      = new TensorCachingAllocator();
            var       generator  = new VectorObservationGenerator(alloc);

            generator.AddSensorIndex(0);
            generator.AddSensorIndex(1);
            generator.AddSensorIndex(2);
            var agent0 = agentInfos[0];
            var agent1 = agentInfos[1];
            var inputs = new List <AgentInfoSensorsPair>
            {
                new AgentInfoSensorsPair {
                    agentInfo = agent0._Info, sensors = agent0.sensors
                },
                new AgentInfoSensorsPair {
                    agentInfo = agent1._Info, sensors = agent1.sensors
                },
            };

            generator.Generate(inputTensor, batchSize, inputs);
            Assert.IsNotNull(inputTensor.data);
            Assert.AreEqual(inputTensor.data[0, 0], 1);
            Assert.AreEqual(inputTensor.data[0, 2], 3);
            Assert.AreEqual(inputTensor.data[1, 0], 4);
            Assert.AreEqual(inputTensor.data[1, 2], 6);
            alloc.Dispose();
        }
Example #2
0
        public void ApplyDiscreteActionOutput()
        {
            var inputTensor = new TensorProxy()
            {
                shape = new long[] { 2, 5 },
                data  = new Tensor(
                    2,
                    5,
                    new[] { 0.5f, 22.5f, 0.1f, 5f, 1f, 4f, 5f, 6f, 7f, 8f })
            };
            var agentInfos = GetFakeAgentInfos();
            var alloc      = new TensorCachingAllocator();
            var applier    = new DiscreteActionOutputApplier(new[] { 2, 3 }, 0, alloc);

            applier.Apply(inputTensor, agentInfos);
            var agents = agentInfos;

            var agent = agents[0] as TestAgent;

            Assert.NotNull(agent);
            var action = agent.GetAction();

            Assert.AreEqual(action.vectorActions[0], 1);
            Assert.AreEqual(action.vectorActions[1], 1);

            agent = agents[1] as TestAgent;
            Assert.NotNull(agent);
            action = agent.GetAction();
            Assert.AreEqual(action.vectorActions[0], 1);
            Assert.AreEqual(action.vectorActions[1], 2);
            alloc.Dispose();
        }
Example #3
0
        public void ApplyDiscreteActionOutput()
        {
            var inputTensor = new TensorProxy()
            {
                shape = new long[] { 2, 5 },
                data  = new Tensor(
                    2,
                    5,
                    new[] { 0.5f, 22.5f, 0.1f, 5f, 1f, 4f, 5f, 6f, 7f, 8f })
            };
            var alloc   = new TensorCachingAllocator();
            var applier = new DiscreteActionOutputApplier(new[] { 2, 3 }, 0, alloc);

            var action0   = new AgentAction();
            var action1   = new AgentAction();
            var callbacks = new List <AgentIdActionPair>()
            {
                new AgentIdActionPair {
                    agentId = 0, action = (a) => action0 = a
                },
                new AgentIdActionPair {
                    agentId = 1, action = (a) => action1 = a
                }
            };

            applier.Apply(inputTensor, callbacks);

            Assert.AreEqual(action0.vectorActions[0], 1);
            Assert.AreEqual(action0.vectorActions[1], 1);

            Assert.AreEqual(action1.vectorActions[0], 1);
            Assert.AreEqual(action1.vectorActions[1], 2);
            alloc.Dispose();
        }
        public void ApplyDiscreteActionOutput()
        {
            var actionSpec  = ActionSpec.MakeDiscrete(2, 3);
            var inputTensor = new TensorProxy()
            {
                shape = new long[] { 2, 2 },
                data  = new Tensor(
                    2,
                    2,
                    new[] { 1f, 1f, 1f, 2f }),
            };
            var alloc   = new TensorCachingAllocator();
            var applier = new DiscreteActionOutputApplier(actionSpec, 0, alloc);

            var agentIds = new List <int>()
            {
                0, 1
            };
            // Dictionary from AgentId to Action
            var actionDict = new Dictionary <int, ActionBuffers>()
            {
                { 0, ActionBuffers.Empty }, { 1, ActionBuffers.Empty }
            };


            applier.Apply(inputTensor, agentIds, actionDict);

            Assert.AreEqual(actionDict[0].DiscreteActions[0], 1);
            Assert.AreEqual(actionDict[0].DiscreteActions[1], 1);

            Assert.AreEqual(actionDict[1].DiscreteActions[0], 1);
            Assert.AreEqual(actionDict[1].DiscreteActions[1], 2);
            alloc.Dispose();
        }
    public void TensorCachingAllocatorTest()
    {
        ReferenceComputeOps gpuOps;

        Debug.Log(ComputeShaderSingleton.Instance);
        gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);

        TensorCachingAllocator tca = new TensorCachingAllocator();

        int[]  shape = new[] { 2, 3, 5, 1 };
        Tensor X     = tca.Alloc(new TensorShape(shape));
        Tensor W     = tca.Alloc(new TensorShape(15, 7));

        X[0] = 3;
        W[0] = 5;
        Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}");
        Debug.Log($"W WxH:{W.flatHeight} {W.flatWidth}");
        Tensor Y = gpuOps.MatMul(X, false, W, false);

        Debug.Log($"Y WxH:{Y.flatHeight} {Y.flatWidth}");
        Debug.Log(X.data.GetType());
        tca.Dispose();
        gpuOps.ResetAllocator(false);
        Debug.Assert(true); // Just getting here is good enough
    }
        public void GenerateVectorObservation()
        {
            var inputTensor = new TensorProxy
            {
                shape = new long[] { 2, 4 }
            };
            const int batchSize  = 4;
            var       agentInfos = GetFakeAgents(ObservableAttributeOptions.ExamineAll);
            var       alloc      = new TensorCachingAllocator();
            var       generator  = new ObservationGenerator(alloc);

            generator.AddSensorIndex(0); // ObservableAttribute (size 1)
            generator.AddSensorIndex(1); // TestSensor (size 0)
            generator.AddSensorIndex(2); // TestSensor (size 0)
            generator.AddSensorIndex(3); // VectorSensor (size 3)
            var agent0 = agentInfos[0];
            var agent1 = agentInfos[1];
            var inputs = new List <AgentInfoSensorsPair>
            {
                new AgentInfoSensorsPair {
                    agentInfo = agent0._Info, sensors = agent0.sensors
                },
                new AgentInfoSensorsPair {
                    agentInfo = agent1._Info, sensors = agent1.sensors
                },
            };

            generator.Generate(inputTensor, batchSize, inputs);
            Assert.IsNotNull(inputTensor.data);
            Assert.AreEqual(inputTensor.data[0, 1], 1);
            Assert.AreEqual(inputTensor.data[0, 3], 3);
            Assert.AreEqual(inputTensor.data[1, 1], 4);
            Assert.AreEqual(inputTensor.data[1, 3], 6);
            alloc.Dispose();
        }
    public void MLP_Shape()
    {
        TensorCachingAllocator tca = new TensorCachingAllocator();
        var shape = new MultiLayerPerception.Shape {
            inputSize  = 2,
            outputSize = 3,
            hiddenSize = 5
        };
        MultiLayerPerception mlp = new MultiLayerPerception(shape);
        IWorker worker           = WorkerFactory.CreateWorker(mlp.model, WorkerFactory.Device.GPU);
        Tensor  input            = tca.Alloc(new TensorShape(1, 1, 1, shape.inputSize));

        for (int i = 0; i < shape.inputSize; i++)
        {
            input[i] = i;
        }
        IWorker ex = worker.Execute(input);

        ex.FlushSchedule(true);
        Tensor output = ex.PeekOutput();

        for (int i = 0; i < shape.outputSize; i++)
        {
            Debug.Log($"output[{i}] = {output[i]}");
        }
        tca.Dispose();
        ex.Dispose();
        worker.Dispose();
        Debug.Assert(true);
    }
        public void GenerateActionMaskInput()
        {
            var inputTensor = new TensorProxy
            {
                shape     = new long[] { 2, 5 },
                valueType = TensorProxy.TensorType.FloatingPoint
            };
            const int batchSize  = 4;
            var       agentInfos = GetFakeAgents();
            var       alloc      = new TensorCachingAllocator();
            var       generator  = new ActionMaskInputGenerator(alloc);

            var agent0 = agentInfos[0];
            var agent1 = agentInfos[1];
            var inputs = new List <AgentInfoSensorsPair>
            {
                new AgentInfoSensorsPair {
                    agentInfo = agent0._Info, sensors = agent0.sensors
                },
                new AgentInfoSensorsPair {
                    agentInfo = agent1._Info, sensors = agent1.sensors
                },
            };

            generator.Generate(inputTensor, batchSize, inputs);
            Assert.IsNotNull(inputTensor.data);
            Assert.AreEqual(inputTensor.data[0, 0], 1);
            Assert.AreEqual(inputTensor.data[0, 4], 1);
            Assert.AreEqual(inputTensor.data[1, 0], 0);
            Assert.AreEqual(inputTensor.data[1, 4], 1);
            alloc.Dispose();
        }
        public void GeneratePreviousActionInput()
        {
            var inputTensor = new TensorProxy
            {
                shape     = new long[] { 2, 2 },
                valueType = TensorProxy.TensorType.Integer
            };
            const int batchSize  = 4;
            var       agentInfos = GetFakeAgents();
            var       alloc      = new TensorCachingAllocator();
            var       generator  = new PreviousActionInputGenerator(alloc);
            var       agent0     = agentInfos[0];
            var       agent1     = agentInfos[1];
            var       inputs     = new List <AgentInfoSensorsPair>
            {
                new AgentInfoSensorsPair {
                    agentInfo = agent0.Info, sensors = agent0.sensors
                },
                new AgentInfoSensorsPair {
                    agentInfo = agent1.Info, sensors = agent1.sensors
                },
            };

            generator.Generate(inputTensor, batchSize, inputs);
            Assert.IsNotNull(inputTensor.data);
            Assert.AreEqual(inputTensor.data[0, 0], 1);
            Assert.AreEqual(inputTensor.data[0, 1], 2);
            Assert.AreEqual(inputTensor.data[1, 0], 3);
            Assert.AreEqual(inputTensor.data[1, 1], 4);
            alloc.Dispose();
        }
        public void ApplyDiscreteActionOutput()
        {
            var inputTensor = new TensorProxy()
            {
                shape = new long[] { 2, 5 },
                data = new Tensor(
                    2,
                    5,
                    new[] { 0.5f, 22.5f, 0.1f, 5f, 1f, 4f, 5f, 6f, 7f, 8f })
            };
            var alloc = new TensorCachingAllocator();
            var applier = new DiscreteActionOutputApplier(new[] { 2, 3 }, 0, alloc);

            var agentIds = new List<int>() { 0, 1 };
            // Dictionary from AgentId to Action
            var actionDict = new Dictionary<int, float[]>() { { 0, null }, { 1, null } };


            applier.Apply(inputTensor, agentIds, actionDict);

            Assert.AreEqual(actionDict[0][0], 1);
            Assert.AreEqual(actionDict[0][1], 1);

            Assert.AreEqual(actionDict[1][0], 1);
            Assert.AreEqual(actionDict[1][1], 2);
            alloc.Dispose();
        }
Example #11
0
        public void Construction()
        {
            var bp              = new BrainParameters();
            var alloc           = new TensorCachingAllocator();
            var tensorGenerator = new TensorApplier(bp, 0, alloc);

            Assert.IsNotNull(tensorGenerator);
            alloc.Dispose();
        }
 public void Construction()
 {
     var actionSpec = new ActionSpec();
     var alloc = new TensorCachingAllocator();
     var mem = new Dictionary<int, List<float>>();
     var tensorGenerator = new TensorApplier(actionSpec, 0, alloc, mem);
     Assert.IsNotNull(tensorGenerator);
     alloc.Dispose();
 }
        public void Construction()
        {
            var bp              = new BrainParameters();
            var alloc           = new TensorCachingAllocator();
            var mem             = new Dictionary <int, List <float> >();
            var tensorGenerator = new TensorApplier(bp, 0, alloc, mem);

            Assert.IsNotNull(tensorGenerator);
            alloc.Dispose();
        }
    public void OnDisable()
    {
        foreach (var moveContext in _currentGeneration)
        {
            moveContext.Terminate();
        }

        _currentGeneration = null;
        TensorAllocator.Dispose();
        TensorAllocator = null;
    }
        public void GenerateBatchSize()
        {
            var inputTensor = new TensorProxy();
            var alloc       = new TensorCachingAllocator();
            var batchSize   = 4;
            var generator   = new BatchSizeGenerator(alloc);

            generator.Generate(inputTensor, batchSize, null);
            Assert.IsNotNull(inputTensor.Data);
            Assert.AreEqual(inputTensor.Data[0], batchSize);
            alloc.Dispose();
        }
        public void GenerateSequenceLength()
        {
            var       inputTensor = new TensorProxy();
            var       alloc       = new TensorCachingAllocator();
            const int batchSize   = 4;
            var       generator   = new SequenceLengthGenerator(alloc);

            generator.Generate(inputTensor, batchSize, null);
            Assert.IsNotNull(inputTensor.data);
            Assert.AreEqual(inputTensor.data[0], 1);
            alloc.Dispose();
        }
Example #17
0
        public void TestResizeTensor(int dimension)
        {
            var alloc    = new TensorCachingAllocator();
            var height   = 64;
            var width    = 84;
            var channels = 3;

            // Set shape to {1, ..., height, width, channels}
            // For 8D, the ... are all 1's
            var shape = new long[dimension];

            for (var i = 0; i < dimension; i++)
            {
                shape[i] = 1;
            }

            shape[dimension - 3] = height;
            shape[dimension - 2] = width;
            shape[dimension - 1] = channels;

            var intShape = new int[dimension];

            for (var i = 0; i < dimension; i++)
            {
                intShape[i] = (int)shape[i];
            }

            var tensorProxy = new TensorProxy
            {
                valueType = TensorProxy.TensorType.Integer,
                data      = new Tensor(intShape),
                shape     = shape,
            };

            // These should be invariant after the resize.
            Assert.AreEqual(height, tensorProxy.data.shape.height);
            Assert.AreEqual(width, tensorProxy.data.shape.width);
            Assert.AreEqual(channels, tensorProxy.data.shape.channels);

            TensorUtils.ResizeTensor(tensorProxy, 42, alloc);

            Assert.AreEqual(height, tensorProxy.shape[dimension - 3]);
            Assert.AreEqual(width, tensorProxy.shape[dimension - 2]);
            Assert.AreEqual(channels, tensorProxy.shape[dimension - 1]);

            Assert.AreEqual(height, tensorProxy.data.shape.height);
            Assert.AreEqual(width, tensorProxy.data.shape.width);
            Assert.AreEqual(channels, tensorProxy.data.shape.channels);

            alloc.Dispose();
        }
        public void ApplyHybridActionOutput()
        {
            var actionSpec            = new ActionSpec(3, new[] { 2, 3 });
            var continuousInputTensor = new TensorProxy()
            {
                shape = new long[] { 2, 3 },
                data  = new Tensor(2, 3, new float[] { 1, 2, 3, 4, 5, 6 })
            };
            var discreteInputTensor = new TensorProxy()
            {
                shape = new long[] { 2, 2 },
                data  = new Tensor(
                    2,
                    2,
                    new[] { 1f, 1f, 1f, 2f }),
            };
            var continuousApplier = new ContinuousActionOutputApplier(actionSpec);
            var alloc             = new TensorCachingAllocator();
            var discreteApplier   = new DiscreteActionOutputApplier(actionSpec, 0, alloc);

            var agentIds = new List <int>()
            {
                0, 1
            };
            // Dictionary from AgentId to Action
            var actionDict = new Dictionary <int, ActionBuffers>()
            {
                { 0, ActionBuffers.Empty }, { 1, ActionBuffers.Empty }
            };


            continuousApplier.Apply(continuousInputTensor, agentIds, actionDict);
            discreteApplier.Apply(discreteInputTensor, agentIds, actionDict);

            Assert.AreEqual(actionDict[0].ContinuousActions[0], 1);
            Assert.AreEqual(actionDict[0].ContinuousActions[1], 2);
            Assert.AreEqual(actionDict[0].ContinuousActions[2], 3);
            Assert.AreEqual(actionDict[0].DiscreteActions[0], 1);
            Assert.AreEqual(actionDict[0].DiscreteActions[1], 1);

            Assert.AreEqual(actionDict[1].ContinuousActions[0], 4);
            Assert.AreEqual(actionDict[1].ContinuousActions[1], 5);
            Assert.AreEqual(actionDict[1].ContinuousActions[2], 6);
            Assert.AreEqual(actionDict[1].DiscreteActions[0], 1);
            Assert.AreEqual(actionDict[1].DiscreteActions[1], 2);
            alloc.Dispose();
        }
    public void ModelBuilderTest()
    {
        TensorCachingAllocator tca = new TensorCachingAllocator();
        ModelBuilder           mb  = new ModelBuilder();

        Model.Input inputLayer = mb.Input("Input", new int[] { -1, 1, 1, 1 });
        Layer       prevLayer  = null;

        prevLayer            = mb.Dense(MultiLayerPerception.LayerNames.Hidden, inputLayer, tca.Alloc(new TensorShape(1, 1)), tca.Alloc(new TensorShape(1, 1)));
        prevLayer.weights[0] = 1;
        prevLayer.weights[1] = 1;
        Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights));
        for (int i = 0; i < prevLayer.datasets.Length; i++)
        {
            Debug.Log(prevLayer.datasets[i].name + ":" + prevLayer.datasets[i].offset);
        }
        prevLayer = mb.Identity("hiddenAct", prevLayer);
        Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights));
        prevLayer            = mb.Dense("output", prevLayer, tca.Alloc(new TensorShape(1, 1)), tca.Alloc(new TensorShape(1, 1)));
        prevLayer.weights[0] = 3;
        prevLayer.weights[1] = 5;
        Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights));
        prevLayer = mb.Identity("outputActive", prevLayer);
        Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights));
        mb.Output(prevLayer);
        IWorker worker = WorkerFactory.CreateWorker(mb.model, WorkerFactory.Device.GPU);
        Tensor  input  = tca.Alloc(new TensorShape(4, 1, 1, 1));

        for (int i = 0; i < 4; i++)
        {
            input[i] = i;
        }
        IWorker ex = worker.Execute(input);

        ex.FlushSchedule(true);
        Tensor output = ex.PeekOutput();

        for (int i = 0; i < 4; i++)
        {
            Debug.Log($"output[{i}] = {output[i]}");
        }
        tca.Dispose();
        ex.Dispose();
        worker.Dispose();
        Debug.Assert(true); // Just getting here is good enough
    }
        public void GenerateRecurrentInput()
        {
            var inputTensor = new TensorProxy()
            {
                Shape = new long[] { 2, 5 }
            };
            var batchSize  = 4;
            var agentInfos = GetFakeAgentInfos();
            var alloc      = new TensorCachingAllocator();
            var generator  = new RecurrentInputGenerator(alloc);

            generator.Generate(inputTensor, batchSize, agentInfos);
            Assert.IsNotNull(inputTensor.Data);
            Assert.AreEqual(inputTensor.Data[0, 0], 0);
            Assert.AreEqual(inputTensor.Data[0, 4], 0);
            Assert.AreEqual(inputTensor.Data[1, 0], 1);
            Assert.AreEqual(inputTensor.Data[1, 4], 0);
            alloc.Dispose();
        }
        public void GenerateVectorObservation()
        {
            var inputTensor = new TensorProxy()
            {
                Shape = new long[] { 2, 3 }
            };
            var batchSize  = 4;
            var agentInfos = GetFakeAgentInfos();
            var alloc      = new TensorCachingAllocator();
            var generator  = new VectorObservationGenerator(alloc);

            generator.Generate(inputTensor, batchSize, agentInfos);
            Assert.IsNotNull(inputTensor.Data);
            Assert.AreEqual(inputTensor.Data[0, 0], 1);
            Assert.AreEqual(inputTensor.Data[0, 2], 3);
            Assert.AreEqual(inputTensor.Data[1, 0], 4);
            Assert.AreEqual(inputTensor.Data[1, 2], 6);
            alloc.Dispose();
        }
Example #22
0
        public MultiLayerPerception(Shape shape, Layer.FusedActivation activation = Layer.FusedActivation.Relu)
        {
            _shape = shape;
            ModelBuilder mb = new ModelBuilder();

            m_cache = new float[_shape.WeightCount];
            { // Build the model
                TensorCachingAllocator tca = new TensorCachingAllocator();
                string prevLayerName       = "[ERROR]NOT_INITIALIZED";
                prevLayerName = mb.Input(LayerNames.Input, new int[] { -1, 1, 1, _shape.inputSize }).name;
                prevLayerName = mb.Dense(LayerNames.Hidden, prevLayerName, tca.Alloc(new TensorShape(_shape.inputSize, _shape.hiddenSize)), tca.Alloc(new TensorShape(1, _shape.hiddenSize))).name;
                prevLayerName = MBActivationByName(ref mb, LayerNames.HiddenActive, prevLayerName, activation).name;
                prevLayerName = mb.Dense(LayerNames.Output, prevLayerName, tca.Alloc(new TensorShape(_shape.hiddenSize, _shape.outputSize)), tca.Alloc(new TensorShape(1, _shape.outputSize))).name;
                prevLayerName = MBActivationByName(ref mb, LayerNames.OutputActive, prevLayerName, activation).name;
                tca.Dispose();
                Debug.Assert(prevLayerName == mb.Output(prevLayerName));
                model = mb.model;
            }
            PrepareCache();
        }
        public void GeneratePreviousActionInput()
        {
            var inputTensor = new TensorProxy()
            {
                Shape     = new long[] { 2, 2 },
                ValueType = TensorProxy.TensorType.Integer
            };
            var batchSize  = 4;
            var agentInfos = GetFakeAgentInfos();
            var alloc      = new TensorCachingAllocator();
            var generator  = new PreviousActionInputGenerator(alloc);

            generator.Generate(inputTensor, batchSize, agentInfos);
            Assert.IsNotNull(inputTensor.Data);
            Assert.AreEqual(inputTensor.Data[0, 0], 1);
            Assert.AreEqual(inputTensor.Data[0, 1], 2);
            Assert.AreEqual(inputTensor.Data[1, 0], 3);
            Assert.AreEqual(inputTensor.Data[1, 1], 4);
            alloc.Dispose();
        }
        public void GenerateActionMaskInput()
        {
            var inputTensor = new TensorProxy()
            {
                Shape     = new long[] { 2, 5 },
                ValueType = TensorProxy.TensorType.FloatingPoint
            };
            var batchSize  = 4;
            var agentInfos = GetFakeAgentInfos();
            var alloc      = new TensorCachingAllocator();
            var generator  = new ActionMaskInputGenerator(alloc);

            generator.Generate(inputTensor, batchSize, agentInfos);
            Assert.IsNotNull(inputTensor.Data);
            Assert.AreEqual(inputTensor.Data[0, 0], 1);
            Assert.AreEqual(inputTensor.Data[0, 4], 1);
            Assert.AreEqual(inputTensor.Data[1, 0], 0);
            Assert.AreEqual(inputTensor.Data[1, 4], 1);
            alloc.Dispose();
        }
    public void TensorFlattenTest()
    {
        ReferenceComputeOps gpuOps;

        Debug.Log(ComputeShaderSingleton.Instance);
        gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels);

        TensorCachingAllocator tca = new TensorCachingAllocator();

        int[]  shape = new[] { 2, 2, 3, 4 };
        Tensor X     = tca.Alloc(new TensorShape(shape));

        for (int idx = 0; idx < new TensorShape(shape).length; idx++)
        {
            X[idx] = idx;
        }
        Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}");
        Debug.Log($"{X[0, 0]} {X[1, 0]}");
        Debug.Log($"{X[0, 0, 0, 0]} {X[0, 1, 0, 0]}");
        Debug.Log($"{X[0, 0, 0, 0]} {X[0, 0, 1, 0]}");
        Debug.Log($"{X[0, 0, 0, 0]} {X[0, 0, 0, 1]}");
        tca.Dispose();
        Debug.Assert(true); // Just getting here is good enough
    }
 public void Start()
 {
     TensorAllocator = new TensorCachingAllocator();
     _pickParent     = MutationManager.Inst.DefaultGenerator;
     Debug.Log(_simParams);
 }
Example #27
0
        public void TestResizeTensor(int dimension)
        {
            if (dimension == 8)
            {
                // Barracuda 1.0.x doesn't support 8D tensors
                // Barracuda 1.1.x does but it initially broke ML-Agents support
                // Unfortunately, the PackageInfo methods don't exist in earlier versions of the editor,
                // so just skip that variant of the test then.
                // It's unlikely, but possible that we'll upgrade to a newer dependency of Barracuda,
                // in which case we should make sure this test is run then.
#if UNITY_2019_3_OR_NEWER
                var packageInfo = UnityEditor.PackageManager.PackageInfo.FindForAssembly(typeof(Tensor).Assembly);
                Assert.AreEqual("com.unity.barracuda", packageInfo.name);
                var barracuda8DSupport       = new Version(1, 1, 0);
                var strippedBarracudaVersion = packageInfo.version.Replace("-preview", "");
                var version = new Version(strippedBarracudaVersion);
                if (version <= barracuda8DSupport)
                {
                    return;
                }
#else
                return;
#endif
            }
            var alloc    = new TensorCachingAllocator();
            var height   = 64;
            var width    = 84;
            var channels = 3;

            // Set shape to {1, ..., height, width, channels}
            // For 8D, the ... are all 1's
            var shape = new long[dimension];
            for (var i = 0; i < dimension; i++)
            {
                shape[i] = 1;
            }

            shape[dimension - 3] = height;
            shape[dimension - 2] = width;
            shape[dimension - 1] = channels;

            var intShape = new int[dimension];
            for (var i = 0; i < dimension; i++)
            {
                intShape[i] = (int)shape[i];
            }

            var tensorProxy = new TensorProxy
            {
                valueType = TensorProxy.TensorType.Integer,
                data      = new Tensor(intShape),
                shape     = shape,
            };

            // These should be invariant after the resize.
            Assert.AreEqual(height, tensorProxy.data.shape.height);
            Assert.AreEqual(width, tensorProxy.data.shape.width);
            Assert.AreEqual(channels, tensorProxy.data.shape.channels);

            TensorUtils.ResizeTensor(tensorProxy, 42, alloc);

            Assert.AreEqual(height, tensorProxy.shape[dimension - 3]);
            Assert.AreEqual(width, tensorProxy.shape[dimension - 2]);
            Assert.AreEqual(channels, tensorProxy.shape[dimension - 1]);

            Assert.AreEqual(height, tensorProxy.data.shape.height);
            Assert.AreEqual(width, tensorProxy.data.shape.width);
            Assert.AreEqual(channels, tensorProxy.data.shape.channels);

            alloc.Dispose();
        }
    public void MLP_Calc()
    {
        TensorCachingAllocator tca = new TensorCachingAllocator();
        var shape = new MultiLayerPerception.Shape {
            inputSize  = 2,
            outputSize = 3,
            hiddenSize = 2
        };
        MultiLayerPerception mlp = new MultiLayerPerception(shape);
        int layerCnt             = 0;

        foreach (Layer layer in mlp.model.layers)
        {
            layerCnt++;
            for (int iWB = 0; iWB < layer.weights.Length; iWB++)
            {
                layer.weights[iWB] = iWB * layerCnt;
            }

            if (layer.datasets.Length == 2)
            {
                Debug.Log($"" +
                          $"{layer.name} " +
                          $"({layer.weights.Length}: W{layer.datasets[0].length} + B{layer.datasets[1].length}): " +
                          $"<{string.Join(", ", layer.weights)}>");
            }
        }

        string  HiddenLayer = MultiLayerPerception.LayerNames.Hidden;
        IWorker worker      = WorkerFactory.CreateWorker(mlp.model, new string[] { HiddenLayer }, WorkerFactory.Device.GPU);
        Tensor  inTensor    = tca.Alloc(new TensorShape(1, 1, 1, shape.inputSize));

        for (int i = 0; i < shape.inputSize; i++)
        {
            inTensor[i] = i;
            Debug.Log($"input[{i}] = {inTensor[i]}");
        }
        IWorker ex = worker.Execute(inTensor);

        ex.FlushSchedule(true);


        Tensor hTensor = ex.PeekOutput(HiddenLayer);

        Debug.Assert(hTensor.length == shape.hiddenSize);
        for (int i = 0; i < hTensor.length; i++)
        {
            Debug.Log($"hidden1[{i}] = {hTensor[i]}");
        }
        Tensor output = ex.PeekOutput();

        Debug.Assert(output.length == shape.outputSize);
        for (int i = 0; i < output.length; i++)
        {
            Debug.Log($"output[{i}] = {output[i]}");
        }

        for (int iHNode = 0; iHNode < shape.hiddenSize; iHNode++)
        {
            string str = "";
            float  sum = 0;
            for (int iINode = 0; iINode < shape.inputSize; iINode++)
            {
                float w = mlp.GetWeight(HiddenLayer, iINode, iHNode);
                str += $"{w} * {inTensor[iINode]} + ";
                sum += w * inTensor[iINode];
            }

            float b = mlp.GetBias(HiddenLayer, iHNode);
            str += $"{b}";
            sum += b;
            str += $"= {hTensor[iHNode]} ({sum})";
            Debug.Assert(Mathf.Approximately(sum, hTensor[iHNode]));
            Debug.Log(str);
        }
        tca.Dispose();
        ex.Dispose();
        worker.Dispose();
        Debug.Assert(true);
    }