Esempio n. 1
0
        public static float[] AllocateGPUArray(GPUModule module, int size)
        {
            var res = module.Gpu.Allocate <float>(size);

            ClearGpuArray(module, res, size);
            return(res);
        }
Esempio n. 2
0
        public Layer(GPUModule gpuModule, Layer previousLayer = null,int size = 0, string id = "", int miniBatchSize = Int32.MinValue)
        {
            if (previousLayer != null) MinibatchSize = previousLayer.MinibatchSize;
            if (miniBatchSize != Int32.MinValue) MinibatchSize = miniBatchSize;

            LayerIndex = IdCounter++;
            Id = id;
            if (String.IsNullOrEmpty(Id))
            {
                Id = "ID" + LayerIndex.ToString().PadLeft(2, '0');
            }

            _gpuModule = gpuModule;
            _gpu = _gpuModule.Gpu;
            PreviousLayer = previousLayer;
            if (size != 0)
            {
                this.Size = size;
                AddArray(ArrayName.Outputs, MinibatchSize, this.Size);                
            }

            if ((previousLayer != null) && (size > 0))
            {
                AddArray(ArrayName.Gradients, MinibatchSize, size);
            }
        }
Esempio n. 3
0
 public SoftMaxCostLayer(GPUModule gpuModule, FullyConnectedLayer previousLayer, DataLayer labelLayer, string id = "") : base(gpuModule, previousLayer, labelLayer, 0, id)
 {
     this.Size = previousLayer.Size;
     AddArray(ArrayName.CorrectlyPredictedLabels, MinibatchSize, 1);
     AddArray(ArrayName.Outputs, MinibatchSize, this.Size);
     _fullyConnectedLayer = previousLayer;
 }
Esempio n. 4
0
 public SoftMaxCostLayer(GPUModule gpuModule, FullyConnectedLayer previousLayer, DataLayer labelLayer, string id = "") : base(gpuModule, previousLayer, labelLayer, 0, id)
 {
     this.Size = previousLayer.Size;
     AddArray(ArrayName.CorrectlyPredictedLabels, MinibatchSize, 1);
     AddArray(ArrayName.Outputs, MinibatchSize, this.Size);
     _fullyConnectedLayer = previousLayer;
 }
Esempio n. 5
0
 public static void ClearGpuArray(GPUModule module, float[] gpuArray, int size)
 {
     //var array = new float[size];
     //Array.Clear(array, 0, array.Length);
     //gpu.CopyToDevice(array, gpuArray);
     module.FillArrayRaw(gpuArray, size, 0f);
 }
Esempio n. 6
0
 public CpuGpuArray(GPUModule gpuModule, int size)
 {
     _gpuModule = gpuModule;
     CPUArray = new float[size];
     GPUArray = AllocateGPUArray(_gpuModule, size);
     RowCount = 1;
     ColCount = size;
 }
Esempio n. 7
0
 public CpuGpuArray(GPUModule gpuModule, int size)
 {
     _gpuModule = gpuModule;
     CPUArray   = new float[size];
     GPUArray   = AllocateGPUArray(_gpuModule, size);
     RowCount   = 1;
     ColCount   = size;
 }
Esempio n. 8
0
 public CpuGpuArray(CpuGpuArray array, int rows, int cols)
 {
     _gpuModule = array._gpuModule;
     CPUArray   = array.CPUArray;
     GPUArray   = array.GPUArray;
     RowCount   = rows;
     ColCount   = cols;
 }
Esempio n. 9
0
 public CpuGpuArray(CpuGpuArray array, int rows, int cols)
 {
     _gpuModule = array._gpuModule;
     CPUArray = array.CPUArray;
     GPUArray = array.GPUArray;
     RowCount = rows;
     ColCount = cols;
 }
 public OneHotRecordProvider(GPUModule gpuModule, List<OneHotRecordReadOnly> records, string id = "", bool shuffleEveryEpoch = false)
 {
     Id = id;
     _records = records;
     GpuModule = gpuModule;
     _shuffle = shuffleEveryEpoch;
     if (_shuffle) _records.Shuffle();
 }
Esempio n. 11
0
 public ActivationLayer(GPUModule gpuModule, Layer previousLayer, string id = "", bool createOutputs = true) : base(gpuModule, previousLayer, 0, id)
 {
     if (createOutputs)
     {
         this.Size = previousLayer.Size;
         AddArray(ArrayName.Outputs, previousLayer.MinibatchSize, this.Size);
         AddArray(ArrayName.Gradients, previousLayer.MinibatchSize, this.Size);
     }
 }
Esempio n. 12
0
 public ActivationLayer(GPUModule gpuModule, Layer previousLayer, string id = "", bool createOutputs = true) : base(gpuModule, previousLayer, 0, id)
 {
     if (createOutputs)
     {
         this.Size = previousLayer.Size;
         AddArray(ArrayName.Outputs, previousLayer.MinibatchSize, this.Size);
         AddArray(ArrayName.Gradients, previousLayer.MinibatchSize, this.Size);
     }
 }
Esempio n. 13
0
 public MaxoutLayer(GPUModule gpuModule, Layer previousLayer, int groupSize = 2, string id = "") : base(gpuModule, previousLayer, id, createOutputs: false)
 {
     GroupSize = groupSize;
     if (previousLayer.Size % GroupSize != 0) throw new ArgumentException("Invalid groupsize");
     this.Size = previousLayer.Size / GroupSize;
     AddArray(ArrayName.Outputs, previousLayer.MinibatchSize, this.Size);
     AddArray(ArrayName.Gradients, previousLayer.MinibatchSize, this.Size);
     AddIntArray(ArrayName.Winners, previousLayer.MinibatchSize, this.Size);
 }
Esempio n. 14
0
 public void CopyToHost(GPUModule gpuModule)
 {
     if (!this.IsGPUData)
     {
         throw new Exception("Not gpu anabled");
     }
     gpuModule.Gpu.CopyFromDevice(this.GpuInputs, this.Inputs);
     gpuModule.Gpu.CopyFromDevice(this.GpuLabels, this.Labels);
 }
Esempio n. 15
0
 public DataLayer(GPUModule gpuModule, int size, int batchesPerLoad = 1, int miniBatchSize = 128, int sparseDataSize = 0) : base(gpuModule, size: 0, miniBatchSize: miniBatchSize)
 {
     this.Size = size;
     this.IsSparse = sparseDataSize!=0;
     this.SparseDataSize = sparseDataSize;
     if (!IsSparse)
     {
         AddArray(ArrayName.Outputs, batchesPerLoad * MinibatchSize, size);
     }
 }
Esempio n. 16
0
 public DataLayer(GPUModule gpuModule, int size, int batchesPerLoad = 1, int miniBatchSize = 128, int sparseDataSize = 0) : base(gpuModule, size: 0, miniBatchSize: miniBatchSize)
 {
     this.Size           = size;
     this.IsSparse       = sparseDataSize != 0;
     this.SparseDataSize = sparseDataSize;
     if (!IsSparse)
     {
         AddArray(ArrayName.Outputs, batchesPerLoad * MinibatchSize, size);
     }
 }
Esempio n. 17
0
 public CpuGpuArray(GPUModule gpuModule, float[] gpuArray, int rows, int cols, bool createCpuData = true)
 {
     _gpuModule = gpuModule;
     if (createCpuData)
     {
         CPUArray = new float[rows * cols];
     }
     GPUArray = gpuArray;
     RowCount = rows;
     ColCount = cols;
 }
Esempio n. 18
0
 public CpuGpuMatrixSparse(GPUModule module, int dataSize, List<float[]> values, List<int[]> indices, int targetColCount)
 {
     _gpuModule = module;
     if (values.Count != indices.Count) throw new ArgumentException();
     RowCount = values.Count;
     ColCount = targetColCount;
     DataSize = dataSize;
     NonZeroCount = values.Sum(x=>x.Length);
     InitArrays(DataSize, RowCount);
     SetData(values, indices);
 }
Esempio n. 19
0
 public CpuGpuArray(GPUModule gpuModule, float[] gpuArray, int rows, int cols, bool createCpuData = true)
 {
     _gpuModule = gpuModule;
     if (createCpuData)
     {
         CPUArray = new float[rows * cols];
     }
     GPUArray = gpuArray;
     RowCount = rows;
     ColCount = cols;
 }
Esempio n. 20
0
 public MaxoutLayer(GPUModule gpuModule, Layer previousLayer, int groupSize = 2, string id = "") : base(gpuModule, previousLayer, id, createOutputs: false)
 {
     GroupSize = groupSize;
     if (previousLayer.Size % GroupSize != 0)
     {
         throw new ArgumentException("Invalid groupsize");
     }
     this.Size = previousLayer.Size / GroupSize;
     AddArray(ArrayName.Outputs, previousLayer.MinibatchSize, this.Size);
     AddArray(ArrayName.Gradients, previousLayer.MinibatchSize, this.Size);
     AddIntArray(ArrayName.Winners, previousLayer.MinibatchSize, this.Size);
 }
Esempio n. 21
0
 public CpuGpuMatrixSparse(GPUModule module, int dataSize, List <float[]> values, List <int[]> indices, int targetColCount)
 {
     _gpuModule = module;
     if (values.Count != indices.Count)
     {
         throw new ArgumentException();
     }
     RowCount     = values.Count;
     ColCount     = targetColCount;
     DataSize     = dataSize;
     NonZeroCount = values.Sum(x => x.Length);
     InitArrays(DataSize, RowCount);
     SetData(values, indices);
 }
        public FullyConnectedLayer(GPUModule gpuModule, Layer previousLayer, int size, string id = "") : base(gpuModule, previousLayer, size, id)
        {
            if (previousLayer != null)
            {
                AddArray(ArrayName.WeightUpdates, InputSize, this.Size);
                AddArray(ArrayName.LastWeightUpdates, InputSize, this.Size);
                AddArray(ArrayName.Weights, InputSize, this.Size);
                AddArray(ArrayName.BiasWeights, this.Size);
                AddArray(ArrayName.BiasWeightUpdates, this.Size);
                AddArray(ArrayName.LastBiasWeightUpdates, this.Size);

                var biasMultipliers = AddArray(ArrayName.BiasMultiplier, this.MinibatchSize, 1);
                biasMultipliers.FillCPU(1f);
            }
        }
Esempio n. 23
0
        public FullyConnectedLayer(GPUModule gpuModule, Layer previousLayer, int size, string id = "") : base(gpuModule, previousLayer, size, id)
        {
            if (previousLayer != null)
            {
                AddArray(ArrayName.WeightUpdates, InputSize, this.Size);
                AddArray(ArrayName.LastWeightUpdates, InputSize, this.Size);
                AddArray(ArrayName.Weights, InputSize, this.Size);
                AddArray(ArrayName.BiasWeights, this.Size);
                AddArray(ArrayName.BiasWeightUpdates, this.Size);
                AddArray(ArrayName.LastBiasWeightUpdates, this.Size);

                var biasMultipliers = AddArray(ArrayName.BiasMultiplier, this.MinibatchSize, 1);
                biasMultipliers.FillCPU(1f);
            }
        }
Esempio n. 24
0
        public static Network CreateNetworkRelu(GPUModule module, int minibatchSize)
        {
            var net = new Network(module, minibatchSize: minibatchSize);
            net.AddInputLayer(Constants.TOTAL_VALUE_COUNT, sparseDataSize: minibatchSize * RawRecord.FEATURE_COUNT * 2);
            net.AddLabelLayer(1);
            var fc1 = net.AddFullyConnectedLayer(128, "FC1");
            fc1.Weights.InitValuesUniformCPU(0.1f);

            fc1.L2Regularization = 0.00001f;
            fc1.RegularizationRatio = 10;
            net.AddReluLayer("REL1");

            var fc2 = net.AddFullyConnectedLayer(256, "FC2");
            fc2.Weights.InitValuesUniformCPU(0.1f);
            net.AddReluLayer("REL2");
            net.AddDropoutLayer();

            var sm = net.AddSoftmaxLayer(2, "SMAX");
            sm.Weights.InitValuesUniformCPU(0.1f);
            return net;
        }
Esempio n. 25
0
 public static void ClearGpuArray(GPUModule module, float[] gpuArray, int size)
 {
     //var array = new float[size];
     //Array.Clear(array, 0, array.Length);
     //gpu.CopyToDevice(array, gpuArray);
     module.FillArrayRaw(gpuArray, size, 0f);
 }
Esempio n. 26
0
 public CpuGpuArray(GPUModule gpuModule, int rows, int cols)
     : this(gpuModule, rows * cols)
 {
     RowCount = rows;
     ColCount = cols;
 }
Esempio n. 27
0
        public static void Train(GPUModule module, List<OneHotRecordReadOnly> allTrainRecords, Network net, float learnRate = 0.02f, float momentum = 0.5f, int epochsBeforeMergeHoldout = 30, int totalEpochs = 50, string tmpDir = null)
        {
            // use roughly last day for validation
            var trainCount = allTrainRecords.Count;
            var holdoutCount = trainCount / 7;
            trainCount = trainCount - holdoutCount;
            var holdoutRecords = allTrainRecords.Skip(trainCount).ToList();
            holdoutRecords.Shuffle();
            var trainRecords = allTrainRecords.Take(trainCount).ToList();

            var trainProvider = new OneHotRecordProvider(module, trainRecords, "train", shuffleEveryEpoch: true);
            //var trainProvider = new ClicksProvider(module, TRAINSET_BIN_PATH, "train");
            trainProvider._currentEpoch = 0;
            var holdoutProvider = new OneHotRecordProvider(module, holdoutRecords, "test");// new ClicksProvider(module, TESTSET_BIN_PATH, "test");
            //var testProvider = new ClicksProvider(module, TESTSET_BIN_PATH, "test");
            holdoutProvider._currentEpoch = 0;

            var trainer = new CriteoTrainer(net, trainProvider, holdoutProvider);
            trainer.Train(learnRate, momentum, epocsBeforeReport: 40, epocsBeforeMergeHoldout: epochsBeforeMergeHoldout, totalEpochs: totalEpochs, workDir: tmpDir);
        }
Esempio n. 28
0
        static void Main(string[] args)
        {
            var dataDir = Directory.GetCurrentDirectory();
            var csvTrainPath = Path.Combine(dataDir, "train.csv");
            var csvTestPath = Path.Combine(dataDir, "test.csv");
            var binTrainPath = Path.Combine(dataDir, "train_bin.bin");
            var binTestPath = Path.Combine(dataDir, "test_bin.bin");
            var recodedTrainPath = Path.Combine(dataDir, "train_recoded.bin");
            var recodedTestPath = Path.Combine(dataDir, "test_recoded.bin");
            var oneHotTrainPath = Path.Combine(dataDir, "train_onehot.bin");
            var oneHotTestPath = Path.Combine(dataDir, "test_onehot.bin");
            var scaledTrainPath = Path.Combine(dataDir, "train_scaled.bin");
            var scaledTestPath = Path.Combine(dataDir, "test_scaled.bin");

            Constants.HASH_SPACE_SIZE = 32768 * 2; // Like 'b' in vowpal but much smaller. We have less space on GPU and we need to multiply the space with the amount of nodes in the 1st layer
                                                   // When you change the value you need to preprocess again..
            Constants.InitOneHotIndices();

            // *** Remove processd files to reprocess ***

            // First process the CSV data into "zipped binary data" useful for when we have to reprocess. Faster and more compact..
            if (!File.Exists(binTrainPath))  PreprocessingRawValues.ConvertCSVToBinary(csvTrainPath, binTrainPath);
            if (!File.Exists(binTestPath))  PreprocessingRawValues.ConvertCSVToBinary(csvTestPath, binTestPath);

            // Recode categorical values. MISSING = missing, TRAINNOTTEST = in trainset, not testset, TESTNOTTRAIN = in testset, not trainset
            // LOWFREQUENCY = When a value occurs below a certain threshold, it is recoded to this value.
            if ((!File.Exists(recodedTrainPath)) || (!File.Exists(recodedTestPath))) 
            {
                var frequencyFilter = Constants.FREQUENCY_FILTER_AGGRESSIVE; // Vary for ensembling, Medium or mild results in more featurevalues = more GPU mem usage, potentially better accuracy but also potentially overfitting. Make sure you also increase HASH_SIZE
                PreprocessingRawValues.RecodeCategoricalValues(binTrainPath, binTestPath, recodedTrainPath, recodedTestPath, frequencyFilter);
            }

            // Now One-Hot encode the raw records. (actually it one-hot encodes the categories with few values and hashes the categories with many values)
            // This is probably way too complicated. Perhaps we could hash everything. Even the numeric values.
            var encodeMissingValues = true;  // vary for ensembling
            var logTransformNumerics = true; // vary for ensembling
            var encodeTestNotTrainAs = Constants.VALUE_MISSING; // vary for ensembling

            if ((!File.Exists(oneHotTrainPath)) || (!File.Exists(oneHotTestPath)))
            {
                PreprocessingRawToOneHot.ConvertRawToOneHot(recodedTrainPath, recodedTestPath, oneHotTrainPath, oneHotTestPath, encodeMissingValues, encodeTestNotTrainAs, logTransformNumerics);
            }

            // Now scale the numeric values. This leads to faster convergence..
            if ((!File.Exists(scaledTrainPath)) || (!File.Exists(scaledTestPath)))
            {
                PreprocessingScale.ScaleNumericValues(oneHotTrainPath, oneHotTestPath, scaledTrainPath, scaledTestPath);
            }
            
            // We create an "ensemble" of a relunet and a maxout net.
            var gpuModule = new GPUModule();
            gpuModule.InitGPU();
            var learnRate = 0.03f; // 0.01 - 0.04 also worked fine for me, 0.04 was the fastest.
            var momentum = 0.5f; // Did not play with this much since 1st layer is without momentum for performance reasons.
            var epochsBeforeMergeHoldout = 15; // When do we add the holdout set to the trainset (no more validation information after this)
            var totalEpochs = 20; // How many epochs to train.. Usually I saw no improvement after 40
                        
            var trainRecords = OneHotRecordReadOnly.LoadBinary(scaledTrainPath);

            // Train a maxout network (~LB 0.4556)
            var maxoutNet = CriteoNet.CreateNetworkMaxout(gpuModule, Constants.MINIBATCH_SIZE); // Example network that worked fine
            Train(gpuModule, trainRecords, maxoutNet, learnRate, momentum, epochsBeforeMergeHoldout, totalEpochs, tmpDir: dataDir);
            maxoutNet.SaveWeightsAndParams(dataDir, "maxoutnet_done");
            maxoutNet.Free();

            // Train a relu network (~LB 0.4555)
            var reluNet = CriteoNet.CreateNetworkRelu(gpuModule, Constants.MINIBATCH_SIZE); // Example network that worked fine
            Train(gpuModule, trainRecords, reluNet, learnRate, momentum, epochsBeforeMergeHoldout, totalEpochs, tmpDir: dataDir);
            reluNet.SaveWeightsAndParams(dataDir, "relunet_done");
            reluNet.Free();


            // Create the maxout submission (~LB 0.456, train longer for better scores)
            var submissionMaxoutNet = CriteoNet.CreateNetworkMaxout(gpuModule, Constants.MINIBATCH_SIZE); // Example network that worked fine
            var submissionMaxoutPath = Path.Combine(dataDir, "submissionMaxout.csv");
            submissionMaxoutNet.LoadStructureWeightsAndParams(dataDir, "maxoutnet_done");
            MakeSubmission(submissionMaxoutNet, scaledTestPath, submissionMaxoutPath);

            // Create the relu submission (~LB 0.455, train longer for better scores)
            var submissionReluNet = CriteoNet.CreateNetworkRelu(gpuModule, Constants.MINIBATCH_SIZE); // Example network that worked fine
            var submissionReluPath = Path.Combine(dataDir, "submissionRelu.csv");
            submissionReluNet.LoadStructureWeightsAndParams(dataDir, "relunet_done");
            MakeSubmission(submissionReluNet, scaledTestPath, submissionReluPath);

            // Now make a combined submission (~LB 0.45267)
            var submissionCombinedPath = Path.Combine(dataDir, "submissionCombined.csv");
            CombineSubmission(submissionCombinedPath, new string[] { submissionReluPath, submissionMaxoutPath });

            Console.WriteLine("Done press enter");
            Console.ReadLine();
        }
Esempio n. 29
0
 public CostLayer(GPUModule gpuModule, Layer previousLayer, DataLayer groundThruthLayer, int size, string id = "") : base(gpuModule, previousLayer, size, id)
 {
     GroundTruthLayer = groundThruthLayer;
 }
Esempio n. 30
0
 public DropoutLayer(GPUModule gpuModule, Layer previousLayer, string id = "") : base(gpuModule, previousLayer, previousLayer.Size, id)
 {
     AddArray(ArrayName.DropoutMask, MinibatchSize, this.Size);
 }
Esempio n. 31
0
 public TanhLayer(GPUModule gpuModule, Layer previousLayer, string id = "") : base(gpuModule, previousLayer, id)
 {
 }
Esempio n. 32
0
 public CostLayer(GPUModule gpuModule, Layer previousLayer, DataLayer groundThruthLayer, int size, string id = "") : base(gpuModule, previousLayer, size, id)
 {
     GroundTruthLayer = groundThruthLayer;
 }
Esempio n. 33
0
 public TanhLayer(GPUModule gpuModule, Layer previousLayer, string id = "") : base(gpuModule, previousLayer, id)
 {
 }
Esempio n. 34
0
 public static void ClearGpuArray(GPUModule module, int[] gpuArray, int size)
 {
     module.FillArrayRawInt(gpuArray, size, 0);
 }
Esempio n. 35
0
 public static void ClearGpuArray(GPUModule module, int[] gpuArray, int size)
 {
     module.FillArrayRawInt(gpuArray, size, 0);
 }
Esempio n. 36
0
 public CpuGpuMatrixSparse(GPUModule module, int dataSize, int rowCount, int colCount)
 {
     _gpuModule = module;
     ColCount   = colCount;
     InitArrays(dataSize, rowCount);
 }
Esempio n. 37
0
 public CpuGpuMatrixSparse(GPUModule module, int dataSize, int rowCount, int colCount)
 {
     _gpuModule = module;
     ColCount = colCount;
     InitArrays(dataSize, rowCount);
 }
Esempio n. 38
0
 public CpuGpuArray(GPUModule gpuModule, int rows, int cols)
     : this(gpuModule, rows *cols)
 {
     RowCount = rows;
     ColCount = cols;
 }
Esempio n. 39
0
 public void CopyToHost(GPUModule gpuModule)
 {
     if (!this.IsGPUData) throw new Exception("Not gpu anabled");
     gpuModule.Gpu.CopyFromDevice(this.GpuInputs, this.Inputs);
     gpuModule.Gpu.CopyFromDevice(this.GpuLabels, this.Labels);
 }
Esempio n. 40
0
 public Network(GPUModule gpuModule, int minibatchSize = 128)
 {
     _minibatchSize = minibatchSize;
     _gpuModule     = gpuModule;
     Timer          = Stopwatch.StartNew();
 }
Esempio n. 41
0
 public static int[] AllocateGPUArrayInt(GPUModule module, int size)
 {
     var res = module.Gpu.Allocate<int>(size);
     ClearGpuArray(module.Gpu, res, size);
     return res;
 }
Esempio n. 42
0
 public static float[] AllocateGPUArray(GPUModule module, int size)
 {
     var res = module.Gpu.Allocate<float>(size);
     ClearGpuArray(module, res, size);
     return res;
 }
Esempio n. 43
0
 public Network(GPUModule gpuModule, int minibatchSize = 128)
 {
     _minibatchSize = minibatchSize;
     _gpuModule = gpuModule;
     Timer = Stopwatch.StartNew();
 }