Example #1
0
 public void Matrix_WeightAdd_EX_Full(CudaPieceFloat gpu_floats_a, CudaPieceFloat gpu_floats_b, CudaPieceInt inver_neg_index,
                                      CudaPieceInt inver_neg_value, int nTrial, int BATCHSIZE, int batchsize, int dimension, CudaPieceFloat mweight,
                                      int start, int keep)
 {
     BasicMathlib.Matrix_WeightAdd_EX_Full(gpu_floats_a.MemPtr, gpu_floats_b.MemPtr, inver_neg_index.MemPtr,
                                           inver_neg_value.MemPtr, nTrial, BATCHSIZE, batchsize, dimension, mweight.MemPtr, start, keep);
 }
Example #2
0
 public BatchSample_Input(int MAX_BATCH_SIZE, int MAXSEQUENCE_PERBATCH, int MAXELEMENTS_PERBATCH)
 {
     sample_Idx = new CudaPieceInt(MAX_BATCH_SIZE, true, true);
     seg_Idx    = new CudaPieceInt(MAXSEQUENCE_PERBATCH, true, true);
     seg_Margin = new CudaPieceInt(MAXSEQUENCE_PERBATCH, true, true);
     seg_Len    = new CudaPieceFloat(MAXSEQUENCE_PERBATCH, true, true);
     fea_Idx    = new CudaPieceInt(MAXELEMENTS_PERBATCH, true, true);
     fea_Value  = new CudaPieceFloat(MAXELEMENTS_PERBATCH, true, true);
 }
Example #3
0
 /// <summary>
 /// </summary>
 /// <param name="num"></param>
 /// <param name="isValueNeeded">To save GPU memory, when no errors are needed, we should not allocate error piece. This usually happens on the input layer</param>
 public NeuralLayerData(NeuralLayer layerModel, bool isValueNeeded)
 {
     LayerModel = layerModel;
     if (isValueNeeded)
     {
         output     = new CudaPieceFloat(ParameterSetting.BATCH_SIZE * Number, true, true);
         errorDeriv = new CudaPieceFloat(ParameterSetting.BATCH_SIZE * Number, true, true);
     }
 }
Example #4
0
 public DNN_BatchTrain_CG_HS(DNN dnn)
     : base(dnn)
 {
     parameters = new CudaPieceFloat(Num, true, true);
     direction  = new CudaPieceFloat(Num, true, true);
     for (int i = 0; i < GradHistory; i++)
     {
         grad_list.Add(new CudaPieceFloat(Num, true, true));
     }
 }
Example #5
0
 public void Matrix_Multipy(CudaPieceFloat input, CudaPieceFloat weight, CudaPieceFloat output, int batchsize, int inputDimension, int outputDimension, int inverse)
 {
     if (ParameterSetting.CuBlasEnable)
     {
         Cudalib.CUBLAS_Matrix_Multipy(input.CudaPtr, weight.CudaPtr, output.CudaPtr, batchsize, inputDimension, outputDimension, inverse);
     }
     else
     {
         Cudalib.Matrix_Multipy(input.CudaPtr, weight.CudaPtr, output.CudaPtr, batchsize, inputDimension, outputDimension, inverse);
     }
 }
Example #6
0
        public NeuralLink(NeuralLayer layer_in, NeuralLayer layer_out, A_Func af, float hidBias, float weightSigma, N_Type nt, int win_size, bool backupOnly)
        {
            Neural_In  = layer_in;
            Neural_Out = layer_out;
            //Neural_In.Number = Neural_In.Number; // *N_Winsize;
            Nt        = nt;
            N_Winsize = win_size;

            Af              = af;
            initHidBias     = hidBias;
            initWeightSigma = weightSigma;

            weight = new CudaPieceFloat(Neural_In.Number * Neural_Out.Number * N_Winsize, true, backupOnly ? false : true);
            bias   = new CudaPieceFloat(Neural_Out.Number, true, backupOnly ? false : true);
        }
Example #7
0
        public NeuralLinkData(NeuralLink neuralLink)
        {
            neuralLinkModel = neuralLink;

            if (neuralLinkModel.Nt == N_Type.Convolution_layer)
            {
                layerPoolingOutput = new CudaPieceFloat(PairInputStream.MAXSEGMENT_BATCH * neuralLinkModel.Neural_Out.Number, false, true);

                layerMaxPooling_Index = new CudaPieceInt(ParameterSetting.BATCH_SIZE * neuralLinkModel.Neural_Out.Number, false, true);
            }

            weightDeriv = new CudaPieceFloat(neuralLinkModel.Neural_In.Number * neuralLinkModel.Neural_Out.Number * neuralLinkModel.N_Winsize, false, true);
            biasDeriv   = new CudaPieceFloat(neuralLinkModel.Neural_Out.Number, false, true);

            weightUpdate = new CudaPieceFloat(neuralLinkModel.Neural_In.Number * neuralLinkModel.Neural_Out.Number * neuralLinkModel.N_Winsize, false, true);
            biasUpdate   = new CudaPieceFloat(neuralLinkModel.Neural_Out.Number, false, true);
        }
Example #8
0
        void Init(DNN dnn_query, DNN dnn_doc)
        {
            dnn_model_query = new DNNRun(dnn_query);
            dnn_model_doc   = new DNNRun(dnn_doc);

            Pos_QD_Pair_TOP = new Layer_Output_Deriv_QD_PairTOP(dnn_query, dnn_doc);
            Neg_QD_Pair_TOP = new Layer_Output_Deriv_QD_PairTOP_Full(dnn_query, dnn_doc, ParameterSetting.NTRIAL);

            alphaCudaPiece = new CudaPieceFloat(ParameterSetting.BATCH_SIZE * (ParameterSetting.NTRIAL + 1), true, true);
            distCudaPiece  = new CudaPieceFloat(ParameterSetting.BATCH_SIZE * (ParameterSetting.NTRIAL + 1), true, true);

            GPU_negative_index_Array       = new CudaPieceInt(ParameterSetting.NTRIAL * ParameterSetting.BATCH_SIZE, true, true);
            GPU_Inver_negative_index_Array = new CudaPieceInt(ParameterSetting.NTRIAL * ParameterSetting.BATCH_SIZE, true, true);
            GPU_Inver_negative_value_Array = new CudaPieceInt(ParameterSetting.NTRIAL * ParameterSetting.BATCH_SIZE, true, true);

            if (ParameterSetting.PSEUDO_RANDOM)
            {
                neg_random = new Random(ParameterSetting.RANDOM_SEED);
            }
        }
Example #9
0
 public void SoftMax(CudaPieceFloat a, CudaPieceFloat b, int labelDim, int batchsize, float gamma)
 {
     BasicMathlib.SoftMax(a.MemPtr, b.MemPtr, labelDim, batchsize, gamma);
 }
Example #10
0
 public void Cosine_Similarity_SubSpace(CudaPieceFloat a, CudaPieceFloat b, CudaPieceFloat c, int labelDim, int BATCHSIZE, int batchsize, int subspaceDim, float eps)
 {
     BasicMathlib.Cosine_Similarity_SubSpace(a.MemPtr, b.MemPtr, c.MemPtr, labelDim, BATCHSIZE, batchsize, subspaceDim, eps);
 }
Example #11
0
 public void Matrix_Aggragate(CudaPieceFloat a, CudaPieceFloat b, int batchsize, int m)
 {
     BasicMathlib.Matrix_Aggragate(a.MemPtr, b.MemPtr, batchsize, m);
 }
Example #12
0
 public void Zero(CudaPieceFloat matrix, int size)
 {
     Array.Clear(matrix.MemPtr, 0, matrix.MemPtr.Length);
 }
Example #13
0
 public void Calculate_Alpha_PAIRRANK(CudaPieceFloat alpha, int nTrailPlus1, int BATCH_SIZE, int batchsize, float GAMMA)
 {
     BasicMathlib.Calculate_Alpha_PAIRRANK(alpha.MemPtr, nTrailPlus1, BATCH_SIZE, batchsize, GAMMA);
 }
Example #14
0
 public void Derive_Cosine_Rectified_EX(CudaPieceFloat srcTopLayerOutput, CudaPieceFloat tgtTopLayerOutput, CudaPieceInt GPU_negative_index, CudaPieceFloat srcTopLayerOutputDeriv, CudaPieceFloat tgtTopLayerOutputDeriv, int batchsize, int outputLayerSize, float eps)
 {
     BasicMathlib.Derive_Cosine_Rectified_EX(srcTopLayerOutput.MemPtr, tgtTopLayerOutput.MemPtr, GPU_negative_index.MemPtr, srcTopLayerOutputDeriv.MemPtr, tgtTopLayerOutputDeriv.MemPtr, batchsize, outputLayerSize, eps);
 }
Example #15
0
 public void Deriv_InnerProduct(CudaPieceFloat q, CudaPieceFloat d, CudaPieceFloat dcq, CudaPieceFloat dcd, CudaPieceFloat alpha, int act_type, int batchsize, int Dim, float gamma, float eps)
 {
     BasicMathlib.Deriv_InnerProduct(q.MemPtr, d.MemPtr, dcq.MemPtr, dcd.MemPtr, alpha.MemPtr, act_type, batchsize, Dim, gamma, eps);
 }
Example #16
0
 public void Matrix_Product(CudaPieceFloat lowerOutput, CudaPieceFloat upperOutputErrorDeriv, CudaPieceFloat weightDeriv, int batchsize, int inputDimension, int outputDimension)
 {
     BasicMathlib.Matrix_Product(lowerOutput.MemPtr, upperOutputErrorDeriv.MemPtr, weightDeriv.MemPtr,
                                 batchsize, inputDimension, outputDimension);
 }
Example #17
0
 public void Convolution_Sparse_Matrix_Product_INTEX(CudaPieceFloat upperOutputErrorDeriv, CudaPieceInt layerMaxPooling_Index, BatchSample_Input input_batch, int winSize, int batchsize, int outputDimension, CudaPieceFloat weightDeriv, int inputDimension)
 {
     BasicMathlib.Convolution_Sparse_Matrix_Product_INTEX(upperOutputErrorDeriv.MemPtr, layerMaxPooling_Index.MemPtr, input_batch.Seg_Idx_Mem, input_batch.Seg_Margin_Mem, input_batch.segsize, winSize,
                                                          batchsize, outputDimension, input_batch.Fea_Idx_Mem, input_batch.Fea_Value_Mem, weightDeriv.MemPtr, inputDimension);
 }
Example #18
0
 public void SEQ_Sparse_Matrix_Transpose_Multiply_INTEX(BatchSample_Input input_batch, CudaPieceFloat weightDeriv, CudaPieceFloat upperOutputErrorDeriv, int inputDimension, int outputDimension, int winSize)
 {
     BasicMathlib.SEQ_Sparse_Matrix_Transpose_Multiply_INTEX(input_batch.Sample_Idx_Mem, input_batch.batchsize, input_batch.Seg_Idx_Mem, input_batch.Seg_Margin_Mem, input_batch.Seg_Len_Mem, input_batch.segsize, input_batch.Fea_Idx_Mem, input_batch.Fea_Value_Mem, input_batch.elementsize,
                                                             weightDeriv.MemPtr, upperOutputErrorDeriv.MemPtr, inputDimension, outputDimension, winSize);
 }
Example #19
0
 public void Deriv_Rectified(CudaPieceFloat errorDeriv, CudaPieceFloat output, int batchsize, int inputDimension)
 {
     BasicMathlib.Deriv_Rectified(errorDeriv.MemPtr, output.MemPtr, batchsize, inputDimension);
 }
Example #20
0
 public void Matrix_WeightAdd_EX(CudaPieceFloat result, CudaPieceFloat addTerm, CudaPieceInt GPU_Inver_negative_index, CudaPieceInt GPU_Inver_negative_value, int batchsize, int outputLayerSize, CudaPieceFloat mweight, int start, int keep)
 {
     BasicMathlib.Matrix_WeightAdd_EX(result.MemPtr, addTerm.MemPtr, GPU_Inver_negative_index.MemPtr, GPU_Inver_negative_value.MemPtr, batchsize, outputLayerSize, mweight.MemPtr, start, keep);
 }
Example #21
0
 public void Matrix_WeightAdd(CudaPieceFloat result, CudaPieceFloat addTerm, int batchsize, int outputLayerSize, CudaPieceFloat mweight, int start, int keep)
 {
     BasicMathlib.Matrix_WeightAdd(result.MemPtr, addTerm.MemPtr, batchsize, outputLayerSize, mweight.MemPtr, start, keep);
 }
Example #22
0
 public void Deriv_Cosine_Subspace(CudaPieceFloat q, CudaPieceFloat d, CudaPieceFloat dcq, CudaPieceFloat dcd, CudaPieceFloat alpha, int act_type, int batchsize, int labelDim, int subspaceDim, float gamma, float eps)
 {
     BasicMathlib.Deriv_Cosine_Subspace(q.MemPtr, d.MemPtr, dcq.MemPtr, dcd.MemPtr, alpha.MemPtr, act_type, batchsize, labelDim, subspaceDim, gamma, eps);
 }
Example #23
0
 public void InnerProduct_Similarity(CudaPieceFloat a, CudaPieceFloat b, CudaPieceFloat c, int batchsize, int dimension)
 {
     BasicMathlib.InnerProduct_Similarity(a.MemPtr, b.MemPtr, c.MemPtr, batchsize, dimension);
 }
Example #24
0
 public void FillOut_Dist_NCE(CudaPieceFloat dist, CudaPieceInt GPU_negative_index, int nTrailPlus1, int BATCH_SIZE, int mIndex, int batchsize)
 {
     BasicMathlib.FillOut_Dist_NCE(dist.MemPtr, GPU_negative_index.MemPtr, nTrailPlus1, BATCH_SIZE, mIndex, batchsize);
 }
Example #25
0
 public void Matrix_Add_OFFSET(CudaPieceFloat a, int offset_a, CudaPieceFloat b, int offset_b, int len, float mweight)
 {
     BasicMathlib.Matrix_Add_OFFSET(a.MemPtr, offset_a, b.MemPtr, offset_b, len, mweight);
 }
Example #26
0
 public void Derive_Cosine_Linear(CudaPieceFloat srcTopLayerOutput, CudaPieceFloat tgtTopLayerOutput, CudaPieceFloat srcTopLayerOutputDeriv, CudaPieceFloat tgtTopLayerOutputDeriv, int batchsize, int outputLayerSize, float eps)
 {
     BasicMathlib.Derive_Cosine_Linear(srcTopLayerOutput.MemPtr, tgtTopLayerOutput.MemPtr, srcTopLayerOutputDeriv.MemPtr, tgtTopLayerOutputDeriv.MemPtr, batchsize, outputLayerSize, eps);
 }
Example #27
0
 public Layer_Output_Deriv_QD_PairTOP_Full(DNN query_dnn_model, DNN doc_dnn_model, int nTrail)
 {
     cuda_layer_Deriv_Q = new CudaPieceFloat(nTrail * query_dnn_model.OutputLayerSize * ParameterSetting.BATCH_SIZE, false, true);
     cuda_layer_Deriv_D = new CudaPieceFloat(nTrail * doc_dnn_model.OutputLayerSize * ParameterSetting.BATCH_SIZE, false, true);
 }
Example #28
0
 public void Matrix_Add(CudaPieceFloat matrix, CudaPieceFloat updates, int inputDimension, int outputDimnsion, float learning_rate)
 {
     BasicMathlib.Matrix_Add(matrix.MemPtr, updates.MemPtr, inputDimension, outputDimnsion, learning_rate);
 }
Example #29
0
 public void Scale_Matrix(CudaPieceFloat matrix, int inputDimension, int outputDimnsion, float momentum)
 {
     BasicMathlib.Scale_Matrix(matrix.MemPtr, inputDimension, outputDimnsion, momentum);
 }
Example #30
0
 public void Sparse2Dense_Matrix(BatchSample_Input data, CudaPieceFloat matrix, int batchsize, int outputDimension)
 {
     BasicMathlib.Sparse2Dense_Matrix(data.Seg_Idx_Mem, data.Fea_Idx_Mem, data.Fea_Value_Mem, matrix.MemPtr, batchsize, outputDimension);
 }