예제 #1
0
        public static IAllocator Allocator(int deviceId)
        {
            if (architectureType == ProcessorTypeEnums.GPU)
            {
                var index = GetDeviceIdIndex(deviceId);
                return(allocator[index] ?? (allocator[index] = new CudaAllocator(context, deviceId)));
            }

            return(allocator[0] ?? (allocator[0] = new CpuAllocator()));
        }
예제 #2
0
        public static IAllocator Allocator(int deviceId)
        {
            int idx = GetDeviceIdIndex(deviceId);

            if (allocator[idx] == null)
            {
                allocator[idx] = new CudaAllocator(cudaContext, deviceId);
            }

            return(allocator[idx]);
        }
예제 #3
0
        public static IAllocator Allocator(int deviceId)
        {
            if (m_archType == ArchTypeEnums.GPU)
            {
                int idx = GetDeviceIdIndex(deviceId);
                if (allocator[idx] == null)
                {
                    allocator[idx] = new CudaAllocator(cudaContext, deviceId);
                }

                return(allocator[idx]);
            }
            else
            {
                return(new CpuAllocator());
            }
        }
예제 #4
0
        public static void SetBackend(Backend deviceType, int gpuId = 0)
        {
            switch (deviceType)
            {
            case Backend.CPU:
                Current = new CpuAllocator();
                break;

            case Backend.CUDA:
                var cudaContext = new TSCudaContext();
                cudaContext.Precompile(Console.Write);
                cudaContext.CleanUnusedPTX();
                Current = new CudaAllocator(cudaContext, gpuId);
                IsCuda  = true;
                break;

            default:
                break;
            }
        }
예제 #5
0
        public static IAllocator Allocator(int deviceId)
        {
            int idx = GetDeviceIdIndex(deviceId);

            if (m_archType == ProcessorTypeEnums.GPU)
            {
                if (m_allocator[idx] == null)
                {
                    m_allocator[idx] = new CudaAllocator(m_cudaContext, deviceId);
                }
            }
            else
            {
                if (m_allocator[idx] == null)
                {
                    m_allocator[idx] = new CpuAllocator();
                }
            }

            return(m_allocator[idx]);
        }
예제 #6
0
        // End of configuraion options
        //##########################################################################



        static void Main(string[] args)
        {
            // Init TensorSharp

            IAllocator allocator = null;

            if (AccMode == AccelMode.Cpu)
            {
                allocator = new CpuAllocator();
            }
            else
            {
                var cudaContext = new TSCudaContext();
                cudaContext.Precompile(Console.Write);
                cudaContext.CleanUnusedPTX();
                allocator = new CudaAllocator(cudaContext, 0);
            }

            var random = new SeedSource(42); // set seed to a known value - we do this to make the training repeatable



            // Load data

            if (string.IsNullOrEmpty(MnistFolder))
            {
                throw new ApplicationException("MnistFolder should be set to the path containing the MNIST data set");
            }

            Console.WriteLine("loading data sets");
            DataSet trainingSet, testingSet;

            using (new SimpleTimer("data set loading done in {0}ms"))
            {
                MnistDataSetBuilder.BuildDataSets(allocator, MnistFolder, TRAINING_SIZE, TESTING_SIZE, out trainingSet, out testingSet);
            }


            // Construct the model, loss function and optimizer

            int numInputs = MnistParser.ImageSize * MnistParser.ImageSize;

            Sequential model;
            ICriterion criterion;
            bool       useTargetClasses;

            var useCudnn = AccMode == AccelMode.Cudnn;

            switch (MType)
            {
            case ModelType.MLP: ModelBuilder.BuildMLP(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            case ModelType.MLPSoftmax: ModelBuilder.BuildMLPSoftmax(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            case ModelType.Cnn: ModelBuilder.BuildCnn(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            default: throw new InvalidOperationException("Unrecognized model type " + MType);
            }

            var optim = new SgdOptimizer(sgdConfig);


            // Train the model

            for (int i = 0; i < 50; ++i)
            {
                TrainEpoch(model, criterion, optim, trainingSet, numInputs, useTargetClasses);
                EvaluateModel(model, testingSet, numInputs);
            }
        }