public void TensorCachingAllocatorTest() { ReferenceComputeOps gpuOps; Debug.Log(ComputeShaderSingleton.Instance); gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels); TensorCachingAllocator tca = new TensorCachingAllocator(); int[] shape = new[] { 2, 3, 5, 1 }; Tensor X = tca.Alloc(new TensorShape(shape)); Tensor W = tca.Alloc(new TensorShape(15, 7)); X[0] = 3; W[0] = 5; Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}"); Debug.Log($"W WxH:{W.flatHeight} {W.flatWidth}"); Tensor Y = gpuOps.MatMul(X, false, W, false); Debug.Log($"Y WxH:{Y.flatHeight} {Y.flatWidth}"); Debug.Log(X.data.GetType()); tca.Dispose(); gpuOps.ResetAllocator(false); Debug.Assert(true); // Just getting here is good enough }
public void MLP_Shape() { TensorCachingAllocator tca = new TensorCachingAllocator(); var shape = new MultiLayerPerception.Shape { inputSize = 2, outputSize = 3, hiddenSize = 5 }; MultiLayerPerception mlp = new MultiLayerPerception(shape); IWorker worker = WorkerFactory.CreateWorker(mlp.model, WorkerFactory.Device.GPU); Tensor input = tca.Alloc(new TensorShape(1, 1, 1, shape.inputSize)); for (int i = 0; i < shape.inputSize; i++) { input[i] = i; } IWorker ex = worker.Execute(input); ex.FlushSchedule(true); Tensor output = ex.PeekOutput(); for (int i = 0; i < shape.outputSize; i++) { Debug.Log($"output[{i}] = {output[i]}"); } tca.Dispose(); ex.Dispose(); worker.Dispose(); Debug.Assert(true); }
public void ModelBuilderTest() { TensorCachingAllocator tca = new TensorCachingAllocator(); ModelBuilder mb = new ModelBuilder(); Model.Input inputLayer = mb.Input("Input", new int[] { -1, 1, 1, 1 }); Layer prevLayer = null; prevLayer = mb.Dense(MultiLayerPerception.LayerNames.Hidden, inputLayer, tca.Alloc(new TensorShape(1, 1)), tca.Alloc(new TensorShape(1, 1))); prevLayer.weights[0] = 1; prevLayer.weights[1] = 1; Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights)); for (int i = 0; i < prevLayer.datasets.Length; i++) { Debug.Log(prevLayer.datasets[i].name + ":" + prevLayer.datasets[i].offset); } prevLayer = mb.Identity("hiddenAct", prevLayer); Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights)); prevLayer = mb.Dense("output", prevLayer, tca.Alloc(new TensorShape(1, 1)), tca.Alloc(new TensorShape(1, 1))); prevLayer.weights[0] = 3; prevLayer.weights[1] = 5; Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights)); prevLayer = mb.Identity("outputActive", prevLayer); Debug.Log(prevLayer.weights.Length + ": " + string.Join(",", prevLayer.weights)); mb.Output(prevLayer); IWorker worker = WorkerFactory.CreateWorker(mb.model, WorkerFactory.Device.GPU); Tensor input = tca.Alloc(new TensorShape(4, 1, 1, 1)); for (int i = 0; i < 4; i++) { input[i] = i; } IWorker ex = worker.Execute(input); ex.FlushSchedule(true); Tensor output = ex.PeekOutput(); for (int i = 0; i < 4; i++) { Debug.Log($"output[{i}] = {output[i]}"); } tca.Dispose(); ex.Dispose(); worker.Dispose(); Debug.Assert(true); // Just getting here is good enough }
public MultiLayerPerception(Shape shape, Layer.FusedActivation activation = Layer.FusedActivation.Relu) { _shape = shape; ModelBuilder mb = new ModelBuilder(); m_cache = new float[_shape.WeightCount]; { // Build the model TensorCachingAllocator tca = new TensorCachingAllocator(); string prevLayerName = "[ERROR]NOT_INITIALIZED"; prevLayerName = mb.Input(LayerNames.Input, new int[] { -1, 1, 1, _shape.inputSize }).name; prevLayerName = mb.Dense(LayerNames.Hidden, prevLayerName, tca.Alloc(new TensorShape(_shape.inputSize, _shape.hiddenSize)), tca.Alloc(new TensorShape(1, _shape.hiddenSize))).name; prevLayerName = MBActivationByName(ref mb, LayerNames.HiddenActive, prevLayerName, activation).name; prevLayerName = mb.Dense(LayerNames.Output, prevLayerName, tca.Alloc(new TensorShape(_shape.hiddenSize, _shape.outputSize)), tca.Alloc(new TensorShape(1, _shape.outputSize))).name; prevLayerName = MBActivationByName(ref mb, LayerNames.OutputActive, prevLayerName, activation).name; tca.Dispose(); Debug.Assert(prevLayerName == mb.Output(prevLayerName)); model = mb.model; } PrepareCache(); }
public void TensorFlattenTest() { ReferenceComputeOps gpuOps; Debug.Log(ComputeShaderSingleton.Instance); gpuOps = new ReferenceComputeOps(ComputeShaderSingleton.Instance.referenceKernels); TensorCachingAllocator tca = new TensorCachingAllocator(); int[] shape = new[] { 2, 2, 3, 4 }; Tensor X = tca.Alloc(new TensorShape(shape)); for (int idx = 0; idx < new TensorShape(shape).length; idx++) { X[idx] = idx; } Debug.Log($"X WxH:{X.flatHeight} {X.flatWidth}"); Debug.Log($"{X[0, 0]} {X[1, 0]}"); Debug.Log($"{X[0, 0, 0, 0]} {X[0, 1, 0, 0]}"); Debug.Log($"{X[0, 0, 0, 0]} {X[0, 0, 1, 0]}"); Debug.Log($"{X[0, 0, 0, 0]} {X[0, 0, 0, 1]}"); tca.Dispose(); Debug.Assert(true); // Just getting here is good enough }
public void MLP_Calc() { TensorCachingAllocator tca = new TensorCachingAllocator(); var shape = new MultiLayerPerception.Shape { inputSize = 2, outputSize = 3, hiddenSize = 2 }; MultiLayerPerception mlp = new MultiLayerPerception(shape); int layerCnt = 0; foreach (Layer layer in mlp.model.layers) { layerCnt++; for (int iWB = 0; iWB < layer.weights.Length; iWB++) { layer.weights[iWB] = iWB * layerCnt; } if (layer.datasets.Length == 2) { Debug.Log($"" + $"{layer.name} " + $"({layer.weights.Length}: W{layer.datasets[0].length} + B{layer.datasets[1].length}): " + $"<{string.Join(", ", layer.weights)}>"); } } string HiddenLayer = MultiLayerPerception.LayerNames.Hidden; IWorker worker = WorkerFactory.CreateWorker(mlp.model, new string[] { HiddenLayer }, WorkerFactory.Device.GPU); Tensor inTensor = tca.Alloc(new TensorShape(1, 1, 1, shape.inputSize)); for (int i = 0; i < shape.inputSize; i++) { inTensor[i] = i; Debug.Log($"input[{i}] = {inTensor[i]}"); } IWorker ex = worker.Execute(inTensor); ex.FlushSchedule(true); Tensor hTensor = ex.PeekOutput(HiddenLayer); Debug.Assert(hTensor.length == shape.hiddenSize); for (int i = 0; i < hTensor.length; i++) { Debug.Log($"hidden1[{i}] = {hTensor[i]}"); } Tensor output = ex.PeekOutput(); Debug.Assert(output.length == shape.outputSize); for (int i = 0; i < output.length; i++) { Debug.Log($"output[{i}] = {output[i]}"); } for (int iHNode = 0; iHNode < shape.hiddenSize; iHNode++) { string str = ""; float sum = 0; for (int iINode = 0; iINode < shape.inputSize; iINode++) { float w = mlp.GetWeight(HiddenLayer, iINode, iHNode); str += $"{w} * {inTensor[iINode]} + "; sum += w * inTensor[iINode]; } float b = mlp.GetBias(HiddenLayer, iHNode); str += $"{b}"; sum += b; str += $"= {hTensor[iHNode]} ({sum})"; Debug.Assert(Mathf.Approximately(sum, hTensor[iHNode])); Debug.Log(str); } tca.Dispose(); ex.Dispose(); worker.Dispose(); Debug.Assert(true); }