Ejemplo n.º 1
0
        public static unsafe Tensor Deserialize(TensorProto proto)
        {
            if (!_dataTypeToDType.Keys.Contains(proto.Type))
            {
                throw new Exception($"Tensors don't support '{proto.Type.ToString()}' data type");
            }

            var allocator = new CpuAllocator();
            var dtype     = _dataTypeToDType[proto.Type];
            var storage   = (CpuStorage)allocator.Allocate(dtype, proto.Data.Length / dtype.Size());

            var bytes = proto.Data.ToByteArray();

            fixed(byte *p = bytes)
            {
                IntPtr ptr = (IntPtr)p;

                storage.CopyToStorage(0, ptr, bytes.Length);
            }

            var sizes   = proto.Shape.Select(i => (long)i).ToArray();
            var strides = TensorDimensionHelpers.GetContiguousStride(sizes);

            return(new Tensor(sizes, strides, storage, 0));
        }
Ejemplo n.º 2
0
        public void TestDiv()
        {
            var shape     = new long[] { 60, 48, 64, 3 };
            var totalSize = 60 * 48 * 64 * 3;
            var data      = Enumerable.Range(1, totalSize).Select(i => (float)i + 0.1f).ToArray();

            var allocator = new CpuAllocator();
            var t1        = new Tensor(allocator, DType.Float32, shape);

            t1.CopyFrom(data);
            var t2 = Ops.Div(null, t1, 255f);

            for (var i = 0; i < shape[0]; ++i)
            {
                for (var j = 0; j < shape[1]; ++j)
                {
                    for (var k = 0; k < shape[2]; ++k)
                    {
                        for (var l = 0; l < shape[3]; ++l)
                        {
                            Assert.AreEqual(t1.GetElementAsFloat(i, j, k, l) / 255, t2.GetElementAsFloat(i, j, k, l), 0.0001);
                        }
                    }
                }
            }
        }
Ejemplo n.º 3
0
        public static IAllocator Allocator(int deviceId)
        {
            if (architectureType == ProcessorTypeEnums.GPU)
            {
                var index = GetDeviceIdIndex(deviceId);
                return(allocator[index] ?? (allocator[index] = new CudaAllocator(context, deviceId)));
            }

            return(allocator[0] ?? (allocator[0] = new CpuAllocator()));
        }
Ejemplo n.º 4
0
        public void FillByte()
        {
            var allocator = new CpuAllocator();
            var a         = new NDArray(allocator, DType.UInt8, 1);

            var value = 97f;

            Ops.Fill(a, value);

            Assert.AreEqual(value, a.GetElementAsFloat(0));
        }
Ejemplo n.º 5
0
        private void RunSetGet(DType type)
        {
            var allocator = new CpuAllocator();
            var a         = new NDArray(allocator, DType.Float32, 1);

            var value = 123.0f;

            a.SetElementAsFloat(value, 0);

            Assert.AreEqual(value, a.GetElementAsFloat(0));
        }
Ejemplo n.º 6
0
        private void RunCopy(Array srcData, DType destType)
        {
            var allocator = new CpuAllocator();
            var a         = NDArray.FromArray(allocator, srcData);
            var b         = new NDArray(allocator, destType, a.Shape);

            Ops.Copy(b, a);

            for (int i = 0; i < srcData.Length; ++i)
            {
                Assert.AreEqual(Convert.ToSingle(srcData.GetValue(i)), b.GetElementAsFloat(i));
            }
        }
Ejemplo n.º 7
0
        public void TestCopy()
        {
            var shape     = new long[] { 60, 48, 64, 3 };
            var totalSize = 60 * 48 * 64 * 3;
            var data      = Enumerable.Range(1, totalSize).Select(i => (float)i + 0.1f).ToArray();

            var allocator = new CpuAllocator();
            var t1        = new Tensor(allocator, DType.Float32, shape);

            t1.CopyFrom(data);
            var t2 = new Tensor(allocator, DType.Int32, shape);

            Ops.Copy(t2, t1);

            Assert.AreEqual(t2.ElementType, DType.Int32);

            Assert.AreEqual(t1.GetElementAsFloat(0, 0, 0, 0), 1.1f, float.Epsilon);
            Assert.AreEqual(t2.GetElementAsFloat(0, 0, 0, 0), 1f, float.Epsilon);
        }
Ejemplo n.º 8
0
        public static void SetBackend(Backend deviceType, int gpuId = 0)
        {
            switch (deviceType)
            {
            case Backend.CPU:
                Current = new CpuAllocator();
                break;

            case Backend.CUDA:
                var cudaContext = new TSCudaContext();
                cudaContext.Precompile(Console.Write);
                cudaContext.CleanUnusedPTX();
                Current = new CudaAllocator(cudaContext, gpuId);
                IsCuda  = true;
                break;

            default:
                break;
            }
        }
Ejemplo n.º 9
0
        public static IAllocator Allocator(int deviceId)
        {
            int idx = GetDeviceIdIndex(deviceId);

            if (m_archType == ProcessorTypeEnums.GPU)
            {
                if (m_allocator[idx] == null)
                {
                    m_allocator[idx] = new CudaAllocator(m_cudaContext, deviceId);
                }
            }
            else
            {
                if (m_allocator[idx] == null)
                {
                    m_allocator[idx] = new CpuAllocator();
                }
            }

            return(m_allocator[idx]);
        }
Ejemplo n.º 10
0
 public CudaRandom()
 {
     cpuAllocator = new CpuAllocator();
     cpuRandom    = new CpuRandom();
 }
Ejemplo n.º 11
0
 /// <summary>
 /// Initializes a new instance of the <see cref="CudaRandom"/> class.
 /// </summary>
 public CudaRandom()
 {
     this.cpuAllocator = new CpuAllocator();
     this.cpuRandom    = new CpuRandom();
 }
Ejemplo n.º 12
0
        // End of configuraion options
        //##########################################################################



        static void Main(string[] args)
        {
            // Init TensorSharp

            IAllocator allocator = null;

            if (AccMode == AccelMode.Cpu)
            {
                allocator = new CpuAllocator();
            }
            else
            {
                var cudaContext = new TSCudaContext();
                cudaContext.Precompile(Console.Write);
                cudaContext.CleanUnusedPTX();
                allocator = new CudaAllocator(cudaContext, 0);
            }

            var random = new SeedSource(42); // set seed to a known value - we do this to make the training repeatable



            // Load data

            if (string.IsNullOrEmpty(MnistFolder))
            {
                throw new ApplicationException("MnistFolder should be set to the path containing the MNIST data set");
            }

            Console.WriteLine("loading data sets");
            DataSet trainingSet, testingSet;

            using (new SimpleTimer("data set loading done in {0}ms"))
            {
                MnistDataSetBuilder.BuildDataSets(allocator, MnistFolder, TRAINING_SIZE, TESTING_SIZE, out trainingSet, out testingSet);
            }


            // Construct the model, loss function and optimizer

            int numInputs = MnistParser.ImageSize * MnistParser.ImageSize;

            Sequential model;
            ICriterion criterion;
            bool       useTargetClasses;

            var useCudnn = AccMode == AccelMode.Cudnn;

            switch (MType)
            {
            case ModelType.MLP: ModelBuilder.BuildMLP(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            case ModelType.MLPSoftmax: ModelBuilder.BuildMLPSoftmax(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            case ModelType.Cnn: ModelBuilder.BuildCnn(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            default: throw new InvalidOperationException("Unrecognized model type " + MType);
            }

            var optim = new SgdOptimizer(sgdConfig);


            // Train the model

            for (int i = 0; i < 50; ++i)
            {
                TrainEpoch(model, criterion, optim, trainingSet, numInputs, useTargetClasses);
                EvaluateModel(model, testingSet, numInputs);
            }
        }