Example #1
0
    void Start()
    {
        enemysprite = this.GetComponent <SpriteRenderer>();
        if (isFly)
        {
            Destination = FindClosestPlayer();
        }

        GameObject[] NetPlayers = GameObject.FindGameObjectsWithTag("Play");

        if (!isOffline)
        {
            foreach (GameObject NP in NetPlayers)
            {
                if (!isServer)
                {
                    if (NP.GetComponent <NetworkIdentity>().isLocalPlayer)
                    {
                        NetPlayer = NP.GetComponent <NetPlayer>();
                    }
                }
                if (isServer)
                {
                    if (NP.GetComponent <NetworkIdentity>().isLocalPlayer)
                    {
                        NetPlayer = NP.GetComponent <NetPlayer>();
                    }
                }
            }
        }
        healthNow = health;
        prevLoc   = transform.position;
        InvokeRepeating("ChangeDirection", 0f, 0.2f);
        InvokeRepeating("RecordPosition", 0f, 0.1f);
    }
Example #2
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="state"></param>
        /// <returns></returns>
        public int Predict(float[] state)
        {
            var(action, _) = ChooseAction(state);
            int typeValue = ActionToRawValue(NP.Argmax(action));

            return(typeValue);
        }
        public void ClassificationByDQN()
        {
            double _loss = 1.0;
            //
            GRasterLayer featureLayer = new GRasterLayer(featureFullFilename);
            GRasterLayer labelLayer   = new GRasterLayer(trainFullFilename);
            //create environment for agent exploring
            IEnv env = new ImageClassifyEnv(featureLayer, labelLayer);
            //create dqn alogrithm
            DQN dqn = new DQN(env);

            //in order to do this quickly, we set training epochs equals 10.
            //please do not use so few training steps in actual use.
            dqn.SetParameters(10, 0);
            //register event to get information while training
            dqn.OnLearningLossEventHandler += (double loss, double totalReward, double accuracy, double progress, string epochesTime) => { _loss = loss; };
            //start dqn alogrithm learning
            dqn.Learn();
            //in general, loss is less than 1
            Assert.IsTrue(_loss < 1.0);
            //apply dqn to classify fetureLayer
            //pick value
            IRasterLayerCursorTool pRasterLayerCursorTool = new GRasterLayerCursorTool();

            pRasterLayerCursorTool.Visit(featureLayer);
            //
            double[] state         = pRasterLayerCursorTool.PickNormalValue(50, 50);
            double[] action        = dqn.ChooseAction(state).action;
            int      landCoverType = dqn.ActionToRawValue(NP.Argmax(action));

            //do something as you need. i.e. draw landCoverType to bitmap at position ( i , j )
            //the classification results are not stable because of the training epochs are too few.
            Assert.IsTrue(landCoverType >= 0);
        }
        public void Create(FormCollection input)
        {
            using (DataContext db = new DataContext())
            {
                // действия по добавлению
                int      id_carrier  = Int32.Parse(input["Parcel.Carrier.Name"]);
                string   track       = input["Parcel.Track_number"];
                string   invoice_num = input["Invoice_number"];
                DateTime date        = DateTime.Parse(input["Date"]);
                string   responsible = input["Responsible"];

                var        carrier    = db.Carriers.FirstOrDefault(c => c.Id == id_carrier);
                RootObject novaPochta = NP.GetDataParcel(track);
                Parcel     parcel     = new Parcel()
                {
                    Track_number = track, Carrier = carrier
                };
                if (parcel.Carrier.Name == "Nova Poshta")
                {
                    parcel = Parcel.RefreshParcel(parcel, novaPochta);
                }


                db.Parcels.Add(parcel);
                db.SaveChanges();
                db.Invoices.Add(new Invoice()
                {
                    Invoice_number = invoice_num, Date = date, Responsible = responsible, Parcel = parcel
                });
                db.SaveChanges();
            }
        }
Example #5
0
        /// <summary>
        /// 随机从数组中取出数据
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="list"></param>
        /// <returns></returns>
        ///
        public static T RandomTake <T>(this List <T> list)
        {
            int index = NP.Random(list.Count);
            T   item  = list[index];

            return(item);
        }
Example #6
0
 public static void RandomRemove <T>(this List <T> list, int capacity)
 {
     if (list.Count >= capacity)
     {
         int index = NP.Random(list.Count);
         list.RemoveAt(index);
     }
 }
Example #7
0
        //Just quickly test something:
        public static void ShortTest()
        {
            Noun      n   = new Noun(@"C:\MBM\michmech\BuNaMo\noun\Gael_masc1.xml");
            Adjective adj = new Adjective(@"C:\MBM\michmech\BuNaMo\adjective\Gaelach_adj1.xml");
            NP        np  = new NP(n, adj);

            Console.WriteLine(np.print());
        }
Example #8
0
 /// <summary>
 /// Resets the state machine to its initial state.
 /// </summary>
 public void Reset()
 {
     //Neural preprocessor reset
     NP?.Reset();
     //ReadoutLayer reset
     RL.Reset();
     return;
 }
Example #9
0
        //Properties

        //Methods
        /// <summary>
        /// Sets State Machine internal state to its initial state
        /// </summary>
        public void Reset()
        {
            //Neural preprocessor reset
            NP.Reset(true);
            //Get rid the ReadoutLayer instance
            RL = null;
            return;
        }
Example #10
0
 public JobCNNClassify(GRasterLayer featureRasterLayer, GRasterLayer labelRasterLayer, int epochs, int model, int width, int height, int channel)
 {
     _t = new Thread(() => {
         ImageClassifyEnv env = new ImageClassifyEnv(featureRasterLayer, labelRasterLayer);
         CNN cnn = new CNN(new int[] { channel, width, height }, env.ActionNum);
         //training
         Summary = "模型训练中";
         for (int i = 0; i < epochs; i++)
         {
             int batchSize       = cnn.BatchSize;
             var(states, labels) = env.RandomEval(batchSize);
             double[][] inputX   = new double[batchSize][];
             for (int j = 0; j < batchSize; j++)
             {
                 inputX[j] = states[j];
             }
             double loss = cnn.Train(inputX, labels);
             Process     = (double)i / epochs;
         }
         //classify
         Summary = "分类应用中";
         IRasterLayerCursorTool pRasterLayerCursorTool = new GRasterLayerCursorTool();
         pRasterLayerCursorTool.Visit(featureRasterLayer);
         //GDI graph
         Bitmap classificationBitmap = new Bitmap(featureRasterLayer.XSize, featureRasterLayer.YSize);
         Graphics g = Graphics.FromImage(classificationBitmap);
         //
         int seed        = 0;
         int totalPixels = featureRasterLayer.XSize * featureRasterLayer.YSize;
         //应用dqn对图像分类
         for (int i = 0; i < featureRasterLayer.XSize; i++)
         {
             for (int j = 0; j < featureRasterLayer.YSize; j++)
             {
                 //get normalized input raw value
                 double[] normal = pRasterLayerCursorTool.PickNormalValue(i, j);
                 //}{debug
                 double[] action = cnn.Predict(normal);
                 //convert action to raw byte value
                 int gray = env.RandomSeedKeys[NP.Argmax(action)];
                 //后台绘制,报告进度
                 Color c          = Color.FromArgb(gray, gray, gray);
                 Pen p            = new Pen(c);
                 SolidBrush brush = new SolidBrush(c);
                 g.FillRectangle(brush, new Rectangle(i, j, 1, 1));
                 //report progress
                 Process = (double)(seed++) / totalPixels;
             }
         }
         //保存结果至tmp
         string fullFileName = Directory.GetCurrentDirectory() + @"\tmp\" + DateTime.Now.ToFileTimeUtc() + ".png";
         classificationBitmap.Save(fullFileName);
         //complete
         Summary  = "CNN训练分类完成";
         Complete = true;
         OnTaskComplete?.Invoke(Name, fullFileName);
     });
 }
        public List <EmployeeSalaryIncrementReportModel> GetReportData(DateTime?DateFrom, DateTime?DateTo, int?DepartmentID, int?DesignationID, int?LocationID, int?EmployementTypeID)
        {
            using (dbVisionEntities db = new dbVisionEntities())
            {
                var ReportData = (from sl in db.tblEmployeeSalaryIncrements
                                  join r in db.tblEmployees on sl.EmployeeID equals r.EmployeeID

                                  join pe in db.tblEmployeeServiceDetails on r.EmployeeLastServiceDetailID equals pe.EmployeeServiceDetailID into PE
                                  from ep in PE.DefaultIfEmpty()

                                  join np in db.tblEmployeeNoPrefixes on r.EmployeeNoPrefixID equals np.EmployeeNoPrefixID into NP
                                  from pn in NP.DefaultIfEmpty()

                                  join qe in db.tblEmployeeDesignations on ep.EmployeeDesignationID equals qe.EmployeeDesignationID into QE
                                  from eq in QE.DefaultIfEmpty()

                                  join lc in db.tblLocations on ep.LocationID equals lc.LocationID into LC
                                  from L in LC.DefaultIfEmpty()

                                  join jdep in db.tblEmployeeDepartments on ep.EmployeeDepartmentID equals jdep.EmployeeDepartmentID into gdep
                                  from dep in gdep.DefaultIfEmpty()

                                  where ((DateFrom == null || sl.EmployeeSalaryIncrementDate <= DateTo) &&
                                         (DateTo == null || sl.EmployeeSalaryIncrementDate == null || sl.EmployeeSalaryIncrementDate >= DateFrom)) &&
                                  r.CompanyID == Model.CommonProperties.LoginInfo.LoggedInCompany.CompanyID &&
                                  (DesignationID == null || ep.EmployeeDesignationID == DesignationID.Value) &&
                                  (LocationID == null || ep.LocationID == LocationID.Value) &&
                                  (DepartmentID == null || ep.EmployeeDepartmentID == DepartmentID.Value) &&
                                  (EmployementTypeID == null || (ep != null && (ep.EmploymentType == EmployementTypeID || (EmployementTypeID == 3 && (ep.EmploymentType == 1 || ep.EmploymentType == 2))))) &&
                                  r.rstate != 2

                                  select new EmployeeSalaryIncrementReportModel()
                {
                    EmployeeSalaryIncrementID = (sl != null ? sl.EmployeeSalaryIncrementID : 0),
                    EmployeeSalaryIncrementNo = (sl != null ? sl.EmployeeSalaryIncrementNo : 0),

                    EmployeeNoPrefix = (pn != null ? pn.EmployeeNoPrefixName : null),
                    PriviousBasicSalary = sl.LastIncAmount,
                    EmployeeNo = r.EmployeeNo,
                    EmployeeName = (r != null ? r.EmployeeFirstName + " " + r.EmployeeLastName : ""),
                    LastIncDate = sl.LastIncDate,
                    CurrentBasicSalary = (sl != null ? sl.CurrentBasicSalary : 0),
                    CurrentIncDate = sl.EmployeeSalaryIncrementDate,
                    NewBasicSalary = sl.NewBasicSalary,
                    IncrementAmount = (sl != null ? sl.IncrementAmount : 0),
                    IncrementPercentage = (sl != null ? sl.IncrementPercentage : 0),
                    Remarks = sl.Remarks,
                    Designation = eq.EmployeeDesignationName,
                    Department = dep.EmployeeDepartmentName,
                    Location = L.LocationName,
                    EmployementTypeID = (eEmploymentType)ep.EmploymentType,
                }).ToList();

                return(ReportData);
            }
        }
Example #12
0
        /// <summary>
        ///
        /// </summary>
        /// <returns></returns>
        private (int x, int y, double[] classIndex) RandomAccessMemory()
        {
            //use actionNumber represent real types
            int   rawValueIndex = NP.Random(_randomSeedKeys);
            Point p             = _memory[rawValueIndex].RandomTake();

            //current one-hot action
            double[] classIndex = NP.ToOneHot(Array.IndexOf(_randomSeedKeys, rawValueIndex), ActionNum);
            return(p.X, p.Y, classIndex);
        }
Example #13
0
 /// <summary>
 /// Used in SubjectNounList to ensure all NPs are in base form (singular but no determiner)
 /// </summary>
 public static bool ForceBaseForm(NP np)
 {
     np.ForceCommonNoun = true;
     if (np.Number == Number.Plural)
     {
         return(false);
     }
     np.Number = Number.Singular;
     return(true);
 }
Example #14
0
 /// <summary>
 /// Prepares input for regression stage of State Machine training.
 /// All input patterns are processed by internal reservoirs and the corresponding network predictors are recorded.
 /// </summary>
 /// <param name="patternBundle">
 /// The bundle containing known sample input patterns and desired output vectors
 /// </param>
 /// <param name="informativeCallback">
 /// Function to be called after each processed input.
 /// </param>
 /// <param name="userObject">
 /// The user object to be passed to informativeCallback.
 /// </param>
 public RegressionInput PrepareRegressionData(PatternBundle patternBundle,
                                              NeuralPreprocessor.PredictorsCollectionCallbackDelegate informativeCallback = null,
                                              Object userObject = null
                                              )
 {
     return(new RegressionInput(NP.InitializeAndPreprocessBundle(patternBundle, informativeCallback, userObject),
                                NP.CollectStatatistics(),
                                NP.NumOfNeurons,
                                NP.NumOfInternalSynapses
                                ));
 }
Example #15
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="state"></param>
        /// <returns></returns>
        private double[] MakeInput(double[] state)
        {
            double[] input  = new double[_featuresNumber + _actionsNumber];
            int      offset = 0;

            Array.ConstrainedCopy(state, 0, input, offset, _featuresNumber);
            offset += _featuresNumber;
            Array.ConstrainedCopy(NP.ToOneHot(0, _actionsNumber), 0, input, offset, _actionsNumber);
            offset += _actionsNumber;
            return(input);
        }
Example #16
0
        /// <summary>
        ///
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="list"></param>
        /// <param name="bacthSize"></param>
        /// <returns></returns>
        public static List <T> RandomTakeBatch <T>(this List <T> list, int limitSize = 200)
        {
            int      num  = list.Count;
            List <T> dist = new List <T>();

            for (int i = 0; i < limitSize; i++)
            {
                int key = NP.Random(num);
                dist.Add(list[key]);
            }
            return(dist);
        }
Example #17
0
 /// <summary>
 /// Compute function for a patterned input feeding.
 /// Processes given input pattern and computes the output.
 /// </summary>
 /// <param name="inputPattern">Input pattern</param>
 /// <returns>Computed output values</returns>
 public double[] Compute(List <double[]> inputPattern)
 {
     if (_settings.NeuralPreprocessorConfig.InputConfig.FeedingType == NeuralPreprocessor.InputFeedingType.Continuous)
     {
         throw new Exception("This version of Compute function is not useable for continuous input feeding.");
     }
     if (RL == null)
     {
         throw new Exception("Readout layer is not trained.");
     }
     //Compute and return output
     return(RL.Compute(NP.Preprocess(inputPattern)));
 }
Example #18
0
 /// <summary>
 /// Compute fuction for a continuous input feeding.
 /// Processes given input values and computes (predicts) the output.
 /// </summary>
 /// <param name="inputVector">Input values</param>
 /// <returns>Computed output values</returns>
 public double[] Compute(double[] inputVector)
 {
     if (_settings.NeuralPreprocessorConfig.InputConfig.FeedingType == CommonEnums.InputFeedingType.Patterned)
     {
         throw new Exception("This version of Compute function is not useable for patterned input feeding.");
     }
     if (RL == null)
     {
         throw new Exception("Readout layer is not trained.");
     }
     //Compute and return output
     return(RL.Compute(NP.Preprocess(inputVector)));
 }
Example #19
0
 /// <summary>
 /// MergeValue
 /// </summary>
 /// <param name="matchProperty"></param>
 /// <param name="subject"></param>
 /// <param name="value"></param>
 public void MergeValue(string matchProperty, string subject, string value, IScenarioFactor fa)
 {
     foreach (string propertyName in fa.Properties)
     {
         float[] mv = _net.ToFloat(subject);
         float[] pv = _net.ToFloat(propertyName.ToLower());
         float   p  = NP.Cosine(mv, pv);
         //为属性设置值
         if (p > CONFIDENCE)
         {
             fa.SetPerperty(propertyName, value);
         }
     }
 }
Example #20
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="inputs"></param>
 /// <param name="outputs"></param>
 /// <returns></returns>
 public double Train(float[][] inputs, float[][] outputs)
 {
     //ensure that data is destroyed after use
     using (Value inputsValue = Value.CreateBatch(inputVariable.Shape, NP.ToOneDimensional(inputs), device))
         using (Value outputsValue = Value.CreateBatch(outputVariable.Shape, NP.ToOneDimensional(outputs), device))
         {
             traindEpochs++;
             var miniBatch = new Dictionary <Variable, Value>()
             {
                 { inputVariable, inputsValue }, { outputVariable, outputsValue }
             };
             trainer.TrainMinibatch(miniBatch, false, device);
             return(trainer.PreviousMinibatchEvaluationAverage());
         }
 }
Example #21
0
        /// <summary>
        /// Prepares input for regression stage of State Machine training.
        /// All input vectors are processed by internal reservoirs and the corresponding network predictors are recorded.
        /// </summary>
        /// <param name="vectorBundle">
        /// The bundle containing known sample input and desired output vectors (in time order)
        /// </param>
        /// <param name="informativeCallback">
        /// Function to be called after each processed input.
        /// </param>
        /// <param name="userObject">
        /// The user object to be passed to informativeCallback.
        /// </param>
        public RegressionInput PrepareRegressionData(VectorBundle vectorBundle,
                                                     NeuralPreprocessor.PredictorsCollectionCallbackDelegate informativeCallback = null,
                                                     Object userObject = null
                                                     )
        {
            VectorBundle preprocessedData = NP.InitializeAndPreprocessBundle(vectorBundle, informativeCallback, userObject);

            InitPredictorsGeneralSwitches(preprocessedData.InputVectorCollection);
            return(new RegressionInput(preprocessedData,
                                       NP.CollectStatatistics(),
                                       NP.NumOfNeurons,
                                       NP.NumOfInternalSynapses,
                                       NumOfUnusedPredictors
                                       ));
        }
Example #22
0
        /// <summary>
        /// 获取当前actor下的action和reward
        /// </summary>
        /// <param name="step"></param>
        /// <param name="state"></param>
        /// <returns></returns>
        public (double[] action, double q) EpsilonGreedy(int step, double[] state)
        {
            int totalEpochs = Convert.ToInt32(_epoches * 0.9);
            var epsion      = EpsilonCalcute(step, eps_total: totalEpochs);

            if (NP.Random() < epsion)
            {
                return(_env.RandomAction(), 0);
            }
            else
            {
                var(action, q) = ChooseAction(state);
                return(action, q);
            }
        }
Example #23
0
        /// <summary>
        /// 计算分类精度
        /// </summary>
        /// <param name="list"></param>
        /// <returns></returns>
        private double Accuracy()
        {
            //eval data batchSize
            const int evalSize = 100;

            var(states, rawLabels) = _env.RandomEval(evalSize);
            double[][] predicts = new double[evalSize][];
            for (int i = 0; i < evalSize; i++)
            {
                predicts[i] = ChooseAction(states[i]).action;
            }
            //calcute accuracy
            var accuracy = NP.CalcuteAccuracy(predicts, rawLabels);

            return(accuracy);
        }
Example #24
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="action">use null to reset environment,else use one-hot vector</param>
 /// <returns></returns>
 public (double[] state, double reward) Step(double[] action)
 {
     if (action == null)
     {
         var(_c_x, _c_y, _c_classIndex) = (_current_x, _current_y, _current_classindex);
         (_current_x, _current_y, _current_classindex) = RandomAccessMemory();
         double[] raw = _pGRasterLayerCursorTool.PickNormalValue(_c_x, _c_y);
         return(raw, 0.0);
     }
     else
     {
         double reward = NP.Argmax(action) == NP.Argmax(_current_classindex) ? 1.0 : -1.0;
         (_current_x, _current_y, _current_classindex) = RandomAccessMemory();
         double[] raw = _pGRasterLayerCursorTool.PickNormalValue(_current_x, _current_y);
         return(raw, reward);
     }
 }
Example #25
0
        /// <summary>
        /// 输出每一个 state 对应的 action 值
        /// </summary>
        /// <returns></returns>
        public (double[] action, double q) ChooseAction(double[] state)
        {
            double[] input = new double[_featuresNumber + _actionsNumber];
            Dictionary <double[], double> predicts = new Dictionary <double[], double>();

            //1.create dict to simulate action,based on
            if (_env.IsSingleAction)//env.singleAction == true
            {
                for (int i = 0; i < _actionsNumber; i++)
                {
                    predicts.Add(NP.ToOneHot(i, _actionsNumber), -1.0);
                }
            }
            else//2.env.singleAction == false
            {
                for (int i = 1; i < Math.Pow(2, _actionsNumber); i++)
                {
                    char[]   strOnehot    = Convert.ToString(i, 2).PadLeft(_actionsNumber, '0').ToCharArray();
                    double[] doubleOnehot = new double[_actionsNumber];
                    for (int index = 0; index < _actionsNumber; index++)
                    {
                        doubleOnehot[_actionsNumber - index - 1] = Convert.ToDouble(strOnehot[index].ToString());
                    }
                    predicts.Add(doubleOnehot, -1.0);
                }
            }
            List <double[]> keyCollection = predicts.Keys.ToList();

            //2.choose action
            for (int i = 0; i < keyCollection.Count; i++)
            {
                double[] key    = keyCollection[i];
                int      offset = 0;
                Array.ConstrainedCopy(state, 0, input, offset, _featuresNumber);
                offset += _featuresNumber;
                Array.ConstrainedCopy(key, 0, input, offset, _actionsNumber);
                offset += _actionsNumber;
                double[] preditOutput = _actorNet.Predict(input);
                predicts[key] = preditOutput[0];
            }
            //3.sort dictionary
            var target = predicts.OrderByDescending(p => p.Value).ToDictionary(p => p.Key, o => o.Value).First();

            //3. calcute action and qvalue
            return(target.Key, target.Value);
        }
 /// <summary>
 /// action direction :
 /// -----------------------------------------------------
 /// *    0  |  1  |  2
 /// * -----------------------
 /// *    7  |  X  |  3
 /// * -----------------------
 /// *    6  |  5  |  4
 /// </summary>
 /// <param name="action"></param>
 /// <returns></returns>
 public (double[] state, double reward) Step(double[] action)
 {
     if (action == null)
     {
         var(_c_x, _c_y, _c_action) = (_current_x, _current_y, _current_action);
         (_current_x, _current_y, _current_action) = RandomAccessMemory();
         double[] raw = _pRasterLayerCursorTool.PickRagneNormalValue(_c_x, _c_y, _maskx, _masky);
         return(raw, 0);
     }
     else
     {
         double reward = NP.Equal(action, _current_action) ? 1.0 : -1.0;
         (_current_x, _current_y, _current_action) = RandomAccessMemory();
         double[] raw = _pRasterLayerCursorTool.PickRagneNormalValue(_current_x, _current_y, _maskx, _masky);
         return(raw, reward);
     }
 }
Example #27
0
 /// <summary>
 /// Preprocesses the data and computes the readout layer.
 /// </summary>
 /// <param name="inputVector">The input vector.</param>
 /// <param name="readoutData">The detailed data computed by the readout layer.</param>
 /// <returns>The computed output values in the natural form.</returns>
 public double[] Compute(double[] inputVector, out ReadoutLayer.ReadoutData readoutData)
 {
     if (!RL.Trained)
     {
         throw new InvalidOperationException($"Readout layer is not trained.");
     }
     if (NP == null)
     {
         //Neural preprocessor is bypassed
         return(RL.Compute(inputVector, out readoutData));
     }
     else
     {
         //Compute and return output
         return(RL.Compute(NP.Preprocess(inputVector), out readoutData));
     }
 }
 /// <summary>
 /// DQN classify task
 /// </summary>
 /// <param name="featureRasterLayer"></param>
 /// <param name="labelRasterLayer"></param>
 /// <param name="epochs"></param>
 public JobDQNClassify(GRasterLayer featureRasterLayer, GRasterLayer labelRasterLayer, int epochs = 3000)
 {
     _t = new Thread(() =>
     {
         ImageClassifyEnv env = new ImageClassifyEnv(featureRasterLayer, labelRasterLayer);
         _dqn = new DQN(env);
         _dqn.SetParameters(epochs: epochs, gamma: _gamma);
         _dqn.OnLearningLossEventHandler += _dqn_OnLearningLossEventHandler;
         //training
         Summary = "模型训练中";
         _dqn.Learn();
         //classification
         Summary = "分类应用中";
         IRasterLayerCursorTool pRasterLayerCursorTool = new GRasterLayerCursorTool();
         pRasterLayerCursorTool.Visit(featureRasterLayer);
         Bitmap classificationBitmap = new Bitmap(featureRasterLayer.XSize, featureRasterLayer.YSize);
         Graphics g      = Graphics.FromImage(classificationBitmap);
         int seed        = 0;
         int totalPixels = featureRasterLayer.XSize * featureRasterLayer.YSize;
         for (int i = 0; i < featureRasterLayer.XSize; i++)
         {
             for (int j = 0; j < featureRasterLayer.YSize; j++)
             {
                 //get normalized input raw value
                 double[] normal = pRasterLayerCursorTool.PickNormalValue(i, j);
                 var(action, q)  = _dqn.ChooseAction(normal);
                 //convert action to raw byte value
                 int gray         = _dqn.ActionToRawValue(NP.Argmax(action));
                 Color c          = Color.FromArgb(gray, gray, gray);
                 Pen p            = new Pen(c);
                 SolidBrush brush = new SolidBrush(c);
                 g.FillRectangle(brush, new Rectangle(i, j, 1, 1));
                 //report progress
                 Process = (double)(seed++) / totalPixels;
             }
         }
         //save result
         string fullFileName = Directory.GetCurrentDirectory() + @"\tmp\" + DateTime.Now.ToFileTimeUtc() + ".png";
         classificationBitmap.Save(fullFileName);
         //complete
         Summary  = "DQN训练分类完成";
         Complete = true;
         OnTaskComplete?.Invoke(Name, fullFileName);
     });
 }
Example #29
0
        /// <summary>
        /// this function calculates the normalized power
        /// </summary>
        /// <returns>returns NP based on a power value</returns>
        public string CalculateNormalizedPower()
        {
            List <double> power1 = new List <double>();
            List <double> power2 = new List <double>();
            List <double> power3 = new List <double>();
            double        num    = 0;

            string[] arrpower = power.ToArray();
            for (int counter = 0; counter <= 600; counter++)
            {
                num = Double.Parse(arrpower[counter]);
                power1.Add(num);
            }
            for (int counter = 601; counter <= 1200; counter++)
            {
                num = Double.Parse(arrpower[counter]);
                power2.Add(num);
            }

            for (int counter = 1201; counter <= 1800; counter++)
            {
                num = Double.Parse(arrpower[counter]);
                power3.Add(num);
            }
            double avg_power1 = Average(power1);
            double avg_power2 = Average(power2);
            double avg_power3 = Average(power3);

            double in4 = Math.Pow(avg_power1, 4);
            double in5 = Math.Pow(avg_power2, 4);
            double in6 = Math.Pow(avg_power3, 4);

            double calc1 = in4 * 10;
            double calc2 = in5 * 10;
            double calc3 = in6 * 10;

            double np1 = Math.Sqrt(Math.Sqrt(calc1));
            double np2 = Math.Sqrt(Math.Sqrt(calc2));
            double np3 = Math.Sqrt(Math.Sqrt(calc3));

            double NP1 = (np1 + np2 + np3) / 3;

            NP = Math.Round(NP1, 2);
            return(NP.ToString());
        }
        /// <summary>
        /// -----------------------------------------------------
        /// *    0  |  1  |  2
        /// * -----------------------
        /// *    7  |  8  |  3
        /// * -----------------------
        /// *    6  |  5  |  4
        /// </summary>
        /// <returns></returns>
        private (int x, int y, double[] actions) RandomAccessMemory()
        {
            //
            int   rawValueIndex = NP.Random(_randomSeedKeys);
            Point pt            = _memory[rawValueIndex].RandomTake();

            //
            double[] actions = new double[ActionNum];
            //快速搜索x++方向点
            List <Point> points = new List <Point>()
            {
                new Point(pt.X - 1, pt.Y - 1), //(-1,-1)
                new Point(pt.X, pt.Y - 1),     //(0,-1)
                new Point(pt.X + 1, pt.Y - 1), //(1,-1)
                new Point(pt.X + 1, pt.Y),     //(1,0)
                new Point(pt.X + 1, pt.Y + 1), //(1,1)
                new Point(pt.X, pt.Y + 1),     //(0,1)
                new Point(pt.X - 1, pt.Y + 1), //(-1,1)
                new Point(pt.X - 1, pt.Y),     //(-1,0)
            };

            //search next point
            for (int pointIndex = 0; pointIndex < ActionNum; pointIndex++)
            {
                Point p = points[pointIndex];
                //if reach to the end, use original point
                if (p.X >= _limit_x || p.X < 0 || p.Y >= _limit_y || p.Y < 0)
                {
                    continue;
                }
                //store right action(one-hot)
                if (_queryTable[p.X, p.Y] == rawValueIndex)
                {
                    actions.CombineOneHot(NP.ToOneHot(pointIndex, ActionNum));
                }
            }
            //
            if (!_existActions.Exists(p => NP.Equal(p, actions)))
            {
                _existActions.Add(actions);
            }
            //
            return(pt.X, pt.Y, actions);
        }