コード例 #1
0
        public static void UseGpu(int gpuId = 0, bool cudnn = false)
        {
            var cudaContext = new TSCudaContext();

            cudaContext.Precompile(Console.Write);
            cudaContext.CleanUnusedPTX();
            Device   = new CudaAllocator(cudaContext, gpuId);
            UseCudnn = cudnn;
            UseCuda  = true;
        }
コード例 #2
0
        public static void InitDevices(int[] ids)
        {
            deviceIds = ids;

            foreach (var id in deviceIds)
            {
                Logger.WriteLine($"Initialize device '{id}'");
            }

            cudaContext = new TSCudaContext(deviceIds);
            cudaContext.Precompile(Console.Write);
            cudaContext.CleanUnusedPTX();

            allocator = new IAllocator[deviceIds.Length];
        }
コード例 #3
0
        public static void InitDevices(ProcessorTypeEnums archType, int[] ids, float memoryUsageRatio = 0.9f, string[] compilerOptions = null)
        {
            m_archType  = archType;
            m_deviceIds = ids;
            m_allocator = new IAllocator[m_deviceIds.Length];

            if (m_archType == ProcessorTypeEnums.GPU)
            {
                foreach (int id in m_deviceIds)
                {
                    Logger.WriteLine($"Initialize device '{id}'");
                }

                m_cudaContext = new TSCudaContext(m_deviceIds, memoryUsageRatio, compilerOptions);
                m_cudaContext.Precompile(Console.Write);
                m_cudaContext.CleanUnusedPTX();
            }
        }
コード例 #4
0
ファイル: DeviceManager.cs プロジェクト: SciSharp/TensorSharp
        public static void SetBackend(Backend deviceType, int gpuId = 0)
        {
            switch (deviceType)
            {
            case Backend.CPU:
                Current = new CpuAllocator();
                break;

            case Backend.CUDA:
                var cudaContext = new TSCudaContext();
                cudaContext.Precompile(Console.Write);
                cudaContext.CleanUnusedPTX();
                Current = new CudaAllocator(cudaContext, gpuId);
                IsCuda  = true;
                break;

            default:
                break;
            }
        }
コード例 #5
0
        public static void InitDevices(ProcessorTypeEnums archType, int[] ids)
        {
            m_archType = archType;
            if (m_archType == ProcessorTypeEnums.GPU)
            {
                m_deviceIds = ids;

                foreach (var id in m_deviceIds)
                {
                    Logger.WriteLine($"Initialize device '{id}'");
                }

                m_cudaContext = new TSCudaContext(m_deviceIds);
                m_cudaContext.Precompile(Console.Write);
                m_cudaContext.CleanUnusedPTX();

                m_allocator = new IAllocator[m_deviceIds.Length];
            }
            else
            {
                m_allocator = new IAllocator[1];
            }
        }
コード例 #6
0
        public static void InitDevices(ProcessorTypeEnums archType, int[] ids, float memoryUsageRatio = 0.9f, string[] compilerOptions = null)
        {
            architectureType = archType;

            if (architectureType == ProcessorTypeEnums.GPU)
            {
                deviceIds = ids;

                foreach (var id in deviceIds)
                {
                    Logger.WriteLine($"Initialize device '{id}'");
                }

                context = new TSCudaContext(deviceIds, memoryUsageRatio, compilerOptions);
                context.Precompile(Console.Write);
                context.CleanUnusedPTX();

                allocator = new IAllocator[deviceIds.Length];
            }
            else
            {
                allocator = new IAllocator[1];
            }
        }
コード例 #7
0
ファイル: Program.cs プロジェクト: SciSharp/TensorSharp
        // End of configuraion options
        //##########################################################################



        static void Main(string[] args)
        {
            // Init TensorSharp

            IAllocator allocator = null;

            if (AccMode == AccelMode.Cpu)
            {
                allocator = new CpuAllocator();
            }
            else
            {
                var cudaContext = new TSCudaContext();
                cudaContext.Precompile(Console.Write);
                cudaContext.CleanUnusedPTX();
                allocator = new CudaAllocator(cudaContext, 0);
            }

            var random = new SeedSource(42); // set seed to a known value - we do this to make the training repeatable



            // Load data

            if (string.IsNullOrEmpty(MnistFolder))
            {
                throw new ApplicationException("MnistFolder should be set to the path containing the MNIST data set");
            }

            Console.WriteLine("loading data sets");
            DataSet trainingSet, testingSet;

            using (new SimpleTimer("data set loading done in {0}ms"))
            {
                MnistDataSetBuilder.BuildDataSets(allocator, MnistFolder, TRAINING_SIZE, TESTING_SIZE, out trainingSet, out testingSet);
            }


            // Construct the model, loss function and optimizer

            int numInputs = MnistParser.ImageSize * MnistParser.ImageSize;

            Sequential model;
            ICriterion criterion;
            bool       useTargetClasses;

            var useCudnn = AccMode == AccelMode.Cudnn;

            switch (MType)
            {
            case ModelType.MLP: ModelBuilder.BuildMLP(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            case ModelType.MLPSoftmax: ModelBuilder.BuildMLPSoftmax(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            case ModelType.Cnn: ModelBuilder.BuildCnn(allocator, random, BatchSize, useCudnn, out model, out criterion, out useTargetClasses); break;

            default: throw new InvalidOperationException("Unrecognized model type " + MType);
            }

            var optim = new SgdOptimizer(sgdConfig);


            // Train the model

            for (int i = 0; i < 50; ++i)
            {
                TrainEpoch(model, criterion, optim, trainingSet, numInputs, useTargetClasses);
                EvaluateModel(model, testingSet, numInputs);
            }
        }