Exemple #1
0
 /// <summary>
 /// </summary>
 /// <param name="num"></param>
 /// <param name="isValueNeeded">To save GPU memory, when no errors are needed, we should not allocate error piece. This usually happens on the input layer</param>
 public NeuralLayerData(NeuralLayer layerModel, bool isValueNeeded)
 {
     LayerModel = layerModel;
     if (isValueNeeded)
     {
         output     = new CudaPieceFloat(ParameterSetting.BATCH_SIZE * Number, true, true);
         errorDeriv = new CudaPieceFloat(ParameterSetting.BATCH_SIZE * Number, true, true);
     }
 }
Exemple #2
0
        public NeuralLink(NeuralLayer layer_in, NeuralLayer layer_out, A_Func af, float hidBias, float weightSigma, N_Type nt, int win_size, bool backupOnly)
        {
            Neural_In  = layer_in;
            Neural_Out = layer_out;
            //Neural_In.Number = Neural_In.Number; // *N_Winsize;
            Nt        = nt;
            N_Winsize = win_size;

            Af              = af;
            initHidBias     = hidBias;
            initWeightSigma = weightSigma;

            weight = new CudaPieceFloat(Neural_In.Number * Neural_Out.Number * N_Winsize, true, backupOnly ? false : true);
            bias   = new CudaPieceFloat(Neural_Out.Number, true, backupOnly ? false : true);
        }
Exemple #3
0
        public DNN(int featureSize, int[] layerDim, int[] activation, float[] sigma, int[] arch, int[] wind, bool backupOnly)
        {
            NeuralLayer inputlayer = new NeuralLayer(featureSize);

            neurallayers.Add(inputlayer);
            for (int i = 0; i < layerDim.Length; i++)
            {
                NeuralLayer layer = new NeuralLayer(layerDim[i]);
                neurallayers.Add(layer);
            }

            for (int i = 0; i < layerDim.Length; i++)
            {
                NeuralLink link = new NeuralLink(neurallayers[i], neurallayers[i + 1], (A_Func)activation[i], 0, sigma[i],
                                                 (N_Type)arch[i], wind[i], backupOnly);
                neurallinks.Add(link);
            }
        }
Exemple #4
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="fileName"></param>
        /// <param name="allocateStructureFromEmpty">True will init DNN structure and allocate new space; False will only load data from file</param>
        public void Model_Load(string fileName, bool allocateStructureFromEmpty)
        {
            FileStream   mstream = new FileStream(fileName, FileMode.Open, FileAccess.Read);
            BinaryReader mreader = new BinaryReader(mstream);

            List <int> layer_info = new List <int>();
            int        mlayer_num = mreader.ReadInt32();

            for (int i = 0; i < mlayer_num; i++)
            {
                layer_info.Add(mreader.ReadInt32());
            }
            for (int i = 0; i < layer_info.Count; i++)
            {
                if (allocateStructureFromEmpty)
                {
                    NeuralLayer layer = new NeuralLayer(layer_info[i]);
                    neurallayers.Add(layer);
                }
            }

            int mlink_num = mreader.ReadInt32();

            for (int i = 0; i < mlink_num; i++)
            {
                int   in_num          = mreader.ReadInt32();
                int   out_num         = mreader.ReadInt32();
                float inithidbias     = mreader.ReadSingle();
                float initweightsigma = mreader.ReadSingle();

                NeuralLink link = null;
                if (ParameterSetting.LoadModelOldFormat)
                {
                    if (allocateStructureFromEmpty)
                    {
                        // for back-compatibility only. The old model format donot have those three fields
                        link = new NeuralLink(neurallayers[i], neurallayers[i + 1], A_Func.Tanh, 0, initweightsigma, N_Type.Fully_Connected, 1, false);
                    }
                }
                else
                {
                    // this is the eventually favorable loading format
                    int mws = mreader.ReadInt32();
                    //// decompose a Int32 integer, whose higher 16 bits store activiation function and lower 16 bits store network type
                    //// In addition, for backward-compatible, neurallinks[i].Af = tanh is stored as 0, neurallinks[i].Af = linear is stored as 1, neurallinks[i].Af = rectified is stored as 2
                    //// Refer to the Int2A_FuncMapping
                    int       afAndNt = mreader.ReadInt32();
                    A_Func    aF      = Int2A_Func(afAndNt >> 16);
                    N_Type    mnt     = (N_Type)(afAndNt & ((1 << 16) - 1));
                    P_Pooling mp      = (P_Pooling)mreader.ReadInt32();
                    if (allocateStructureFromEmpty)
                    {
                        link = new NeuralLink(neurallayers[i], neurallayers[i + 1], aF, 0, initweightsigma, mnt, mws, false);
                    }
                }
                if (allocateStructureFromEmpty)
                {
                    neurallinks.Add(link);
                }
            }

            for (int i = 0; i < mlink_num; i++)
            {
                int weight_len = mreader.ReadInt32(); // Write(neurallinks[i].Back_Weight.Length);
                if (weight_len != neurallinks[i].Back_Weight.Length)
                {
                    Console.WriteLine("Loading Model Weight Error!  " + weight_len.ToString() + " " + neurallinks[i].Back_Weight.Length.ToString());
                    Console.ReadLine();
                }
                for (int m = 0; m < weight_len; m++)
                {
                    neurallinks[i].Back_Weight[m] = mreader.ReadSingle();
                }
                int bias_len = mreader.ReadInt32();
                if (bias_len != neurallinks[i].Back_Bias.Length)
                {
                    Console.WriteLine("Loading Model Bias Error!  " + bias_len.ToString() + " " + neurallinks[i].Back_Bias.Length.ToString());
                    Console.ReadLine();
                }
                for (int m = 0; m < bias_len; m++)
                {
                    neurallinks[i].Back_Bias[m] = mreader.ReadSingle();
                }
            }
            mreader.Close();
            mstream.Close();
            CopyIntoCuda();
        }