コード例 #1
0
ファイル: GeneticApprox.cs プロジェクト: CDMMKY/fuzzy_core
        public virtual void Init(ILearnAlgorithmConf conf)
        {
            currentConf     = conf as GeneticConf;
            fullFuzzySystem = result;
            step            = 0;
            errorAfter      = 0;
            errorBefore     = result.approxLearnSamples(result.RulesDatabaseSet[0]);
            backUp          = result.RulesDatabaseSet[0];

            initFunc = new initFuncType(localInit);
            if (currentConf.GENCTypeInit == GeneticConf.Alg_Init_Type.Глобальный)
            {
                initFunc = new initFuncType(globalInit);
            }

            crossoverFunc = new crossoverFuncType(unifiedCrossover);
            if (currentConf.GENCTypeCrossover == GeneticConf.Alg_Crossover_Type.Многоточечный)
            {
                crossoverFunc = new crossoverFuncType(pointsCrossover);
            }

            selectionFunc = new selectionFuncType(rouletteSelection);
            if (currentConf.GENCTypeSelection == GeneticConf.Alg_Selection_Type.Случайный)
            {
                selectionFunc = new selectionFuncType(randomSelection);
            }
            if (currentConf.GENCTypeSelection == GeneticConf.Alg_Selection_Type.Элитарный)
            {
                selectionFunc = new selectionFuncType(eliteSelection);
            }
            fullInit(); // Здесь проходит инициализация
        }
コード例 #2
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf conf)
        {
            BacteryAlgorithmConfig Config = conf as BacteryAlgorithmConfig;

            sendBactery    = Config.BFOCountSolution;
            interPSOtoSend = Config.BFOCountIteration;
            result         = Approximate;


            if (result.RulesDatabaseSet.Count < 1)
            {
                throw new InvalidDataException("Нечеткая система не проинициализированна");
            }
            KnowlegeBaseTSARules backSave = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);
            double backResult             = result.approxLearnSamples(result.RulesDatabaseSet[0]);

            savetoUFS(result.RulesDatabaseSet, 0, 0, 0);
            BacteryRunner();
            KnowlegeBaseTSARules[] solutions = loadDatabase();
            solutions = sortSolution(solutions);
            if (solutions.Count() < 1)
            {
                result.RulesDatabaseSet[0] = backSave; return(result);
            }
            result.RulesDatabaseSet[0] = solutions[0];
            double newResult = result.approxLearnSamples(result.RulesDatabaseSet[0]);

            if (newResult > backResult)
            {
                result.RulesDatabaseSet[0] = backSave;
            }

            result.RulesDatabaseSet[0].TermsSet.Trim();
            return(result);
        }
コード例 #3
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf conf)
        {
            if (Approximate.RulesDatabaseSet.Count == 0)
            {
                throw new InvalidOperationException("Нечеткая система не была корректно инициализированна");
            }
            KnowlegeBaseTSARules newBase = new KnowlegeBaseTSARules(Approximate.RulesDatabaseSet[0]);


            double result_before = Approximate.approxLearnSamples(newBase);

            foreach (TSARule Rule in newBase.RulesDatabase)
            {
                double [] coefficient = null;
                double    Value       = LSMWeghtReqursiveSimple.EvaluteConsiquent(Approximate, Rule.ListTermsInRule, out coefficient);
                Rule.IndependentConstantConsequent = Value;
                Rule.IndependentConstantConsequent = Value;
                Rule.RegressionConstantConsequent  = coefficient;
            }

            double result_after = Approximate.approxLearnSamples(newBase);

            if (result_before > result_after)
            {
                Approximate.RulesDatabaseSet[0] = newBase;
            }
            GC.Collect();
            Approximate.RulesDatabaseSet[0].TermsSet.Trim();
            return(Approximate);
        }
コード例 #4
0
ファイル: Base_ACO.cs プロジェクト: CDMMKY/fuzzy_core
        /// <summary>
        /// Last step
        /// </summary>
        protected virtual void prepareFinalFuzzySystem()
        {
            double startError = result.approxLearnSamples(result.RulesDatabaseSet[0]);
            double afterError = result.approxLearnSamples(newSolution);

            if (startError > afterError)
            {
                result.RulesDatabaseSet[0] = newSolution;
            }
            result.RulesDatabaseSet.RemoveRange(1, result.RulesDatabaseSet.Count - 1);
        }
コード例 #5
0
        public override TSAFuzzySystem Generate(TSAFuzzySystem Approximate, IGeneratorConf config)
        {
            TSAFuzzySystem result = Approximate;

            if (result.RulesDatabaseSet.Count == 0)
            {
                AbstractNotSafeGenerator tempGen = new GeneratorRulesEveryoneWithEveryone();
                result = tempGen.Generate(result, config);
                GC.Collect();
            }

            count_shrink = ((TermShrinkAndRotateConf)config).TSARCShrinkVars;
            size_shrink  = ((TermShrinkAndRotateConf)config).TSARCShrinkTerm;
            type_func    = ((TermShrinkAndRotateConf)config).IEWEFuncType;
            count_slices = ((TermShrinkAndRotateConf)config).IEWECountSlice;



            List <int> Varians_of_run_system = new List <int>();

            for (int i = 0; i < Approximate.CountFeatures; i++)
            {
                int count_terms_for_var = Approximate.RulesDatabaseSet[0].TermsSet.FindAll(x => x.NumVar == i).Count;
                if (i < count_shrink)
                {
                    Varians_of_run_system.Add(count_terms_for_var - size_shrink);
                }
                else
                {
                    Varians_of_run_system.Add(count_terms_for_var);
                }
            }

            Varians_of_run_system.Sort();
            TypeTermFuncEnum type_of_term = Approximate.RulesDatabaseSet[0].TermsSet[0].TermFuncType;

            Generate_all_variant_in_pool(Varians_of_run_system);

            for (int i = 0; i < Pull_of_systems.Count; i++)
            {
                Approximate.RulesDatabaseSet.Clear();

                GeneratorRulesEveryoneWithEveryone.InitRulesEveryoneWithEveryone(Approximate, type_of_term, Pull_of_systems[i].ToArray());
                Systems_ready_to_test.Add(Approximate.RulesDatabaseSet[0]);
                errors_of_systems.Add(result.approxLearnSamples(result.RulesDatabaseSet[0]));
            }

            int best_index = errors_of_systems.IndexOf(errors_of_systems.Min());

            result.RulesDatabaseSet.Clear();
            result.RulesDatabaseSet.Add(Systems_ready_to_test[best_index]);
            Console.WriteLine(Pull_of_systems.Count());



            GC.Collect();
//            result.UnlaidProtectionFix();
            result.RulesDatabaseSet[0].TermsSet.Trim();
            return(result);
        }
コード例 #6
0
ファイル: GeneticApprox.cs プロジェクト: CDMMKY/fuzzy_core
        public void eliteSelection()
        {
            double[] currentError = new double[childrenMassive.Count()];
            for (int i = 0; i < childrenMassive.Count(); i++)
            //Parallel.For(0, childrenMassive.Count(), i =>
            {
                fullFuzzySystem.RulesDatabaseSet.Add(childrenMassive[i]);
                fullFuzzySystem.UnlaidProtectionFix(childrenMassive[i]);

                currentError[i] = fullFuzzySystem.approxLearnSamples(fullFuzzySystem.RulesDatabaseSet[i + 1]);
            }
            //});
            Array.Sort(currentError, childrenMassive);
            populationMassive = childrenMassive.ToList().GetRange(0, populationMassive.Count()).ToArray();
            fullFuzzySystem.RulesDatabaseSet.RemoveRange(1, childrenMassive.Count());
        }
コード例 #7
0
        public override TSAFuzzySystem Generate(TSAFuzzySystem Approximate, IGeneratorConf config)
        {
            start_add_rules = Approximate.RulesDatabaseSet.Count;
            TSAFuzzySystem result = Approximate;

            if (result.RulesDatabaseSet.Count == 0)
            {
                AbstractNotSafeGenerator tempGen = new GeneratorRulesEveryoneWithEveryone();
                result = tempGen.Generate(result, config);

                GC.Collect();
            }



            Request_count_rules = ((RullesShrinkConf)config).RSCCountRules;
            max_count_rules     = ((RullesShrinkConf)config).RSCMaxRules;
            count_slices        = ((RullesShrinkConf)config).IEWECountSlice;
            min_count_rules     = ((RullesShrinkConf)config).RSCMinRules;
            type_term           = ((RullesShrinkConf)config).IEWEFuncType;

            int         count_of_swith_off    = ((RullesShrinkConf)config).RSCMaxRules - Request_count_rules;
            List <byte> Varians_of_run_system = new List <byte>();

            for (int i = 0; i < Approximate.RulesDatabaseSet[0].RulesDatabase.Count; i++)
            {
                Varians_of_run_system.Add(1);
            }
            for (int i = 0; i < count_of_swith_off; i++)
            {
                Varians_of_run_system[i] = 0;
            }
            Generate_all_variant_in_pool(Varians_of_run_system);
            for (int i = 0; i < Pull_of_systems.Count; i++)
            {
                KnowlegeBaseTSARules temp_rules = new  KnowlegeBaseTSARules(result.RulesDatabaseSet[0], Pull_of_systems[i]);
                temp_rules.TrimTerms();

                result.RulesDatabaseSet.Add(temp_rules);
                result.UnlaidProtectionFix(result.RulesDatabaseSet[start_add_rules + i]);
                errors_of_systems.Add(result.approxLearnSamples(result.RulesDatabaseSet [start_add_rules + i]));
            }

            int best_index            = errors_of_systems.IndexOf(errors_of_systems.Min());
            KnowlegeBaseTSARules best = result.RulesDatabaseSet[start_add_rules + best_index];

            result.RulesDatabaseSet.Clear();
            result.RulesDatabaseSet.Add(best);
            Console.WriteLine(Pull_of_systems.Count());



            GC.Collect();
//            result.UnlaidProtectionFix();
            result.RulesDatabaseSet[0].TermsSet.Trim();
            return(result);
        }
コード例 #8
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf config)
        {
            TSAFuzzySystem result = Approximate;

            if (result.RulesDatabaseSet.Count == 0)
            {
                throw new System.FormatException("Что то не то с входными данными");
            }
            OptimizeTermShrinkAndRotateConf Config = config as OptimizeTermShrinkAndRotateConf;

            count_shrink = Config.OTSARCountShrinkVars;
            size_shrink  = Config.OTSARCountShrinkTerm;



            List <int> Varians_of_run_system = new List <int>();

            for (int i = 0; i < Approximate.CountFeatures; i++)
            {
                int count_terms_for_var = Approximate.RulesDatabaseSet[0].TermsSet.FindAll(x => x.NumVar == i).Count;
                if (i < count_shrink)
                {
                    Varians_of_run_system.Add(count_terms_for_var - size_shrink);
                }
                else
                {
                    Varians_of_run_system.Add(count_terms_for_var);
                }
            }

            Varians_of_run_system.Sort();
            TypeTermFuncEnum type_of_term = Approximate.RulesDatabaseSet[0].TermsSet[0].TermFuncType;

            Generate_all_variant_in_pool(Varians_of_run_system);

            for (int i = 0; i < Pull_of_systems.Count; i++)
            {
                Approximate.RulesDatabaseSet.Clear();

                GeneratorRulesEveryoneWithEveryone.InitRulesEveryoneWithEveryone(result, type_of_term, Pull_of_systems[i].ToArray());
                Systems_ready_to_test.Add(Approximate.RulesDatabaseSet[0]);
                errors_of_systems.Add(result.approxLearnSamples(result.RulesDatabaseSet[0]));
            }

            int best_index = errors_of_systems.IndexOf(errors_of_systems.Min());

            result.RulesDatabaseSet.Clear();
            result.RulesDatabaseSet.Add(Systems_ready_to_test[best_index]);
            Console.WriteLine(Pull_of_systems.Count());



            result.RulesDatabaseSet[0].TermsSet.Trim();
//            result.UnlaidProtectionFix();
            return(result);
        }
コード例 #9
0
 public static KnowlegeBaseTSARules[] SortRules(this KnowlegeBaseTSARules[] Source, TSAFuzzySystem Approx)
 {
     double[] keys = new double[Source.Count()];
     KnowlegeBaseTSARules[] tempSol = Source.Clone() as KnowlegeBaseTSARules[];
     for (int i = 0; i < Source.Count(); i++)
     {
         keys[i] = Approx.approxLearnSamples(Source[i]);
     }
     Array.Sort(keys, tempSol);
     return(tempSol);
 }
コード例 #10
0
        public virtual void oneIterate(TSAFuzzySystem result)
        {
            for (int j = 0; j < count_particle; j++)
            {
                w = 1 / (1 + Math.Exp(-(Errors[j] - OldErrors[j]) / 0.01));
                for (int k = 0; k < X[j].TermsSet.Count; k++)
                {
                    for (int q = 0; q < X[j].TermsSet[k].CountParams; q++)
                    {
                        double bp = Pi[j].TermsSet[k].Parametrs[q];
                        V[j].TermsSet[k].Parametrs[q] = V[j].TermsSet[k].Parametrs[q] * w + c1 * rnd.NextDouble() * (bp - X[j].TermsSet[k].Parametrs[q]) +
                                                        c2 * rnd.NextDouble() * (Pg.TermsSet[k].Parametrs[q] - X[j].TermsSet[k].Parametrs[q]);
                        X[j].TermsSet[k].Parametrs[q] += V[j].TermsSet[k].Parametrs[q];
                    }
                }
                double[] bf  = new double[V[j].all_conq_of_rules.Length];
                double[] bfw = new double[V[j].all_conq_of_rules.Length];
                for (int k = 0; k < V[j].all_conq_of_rules.Length; k++)
                {
                    bfw[k] = V[j].all_conq_of_rules[k] * w + c1 * rnd.NextDouble() * (Pi[j].all_conq_of_rules[k] - X[j].all_conq_of_rules[k]) +
                             c2 * rnd.NextDouble() * (Pg.all_conq_of_rules[k] - X[j].all_conq_of_rules[k]);
                    double sw = X[j].all_conq_of_rules[k] + bfw[k];
                    bf[k] = sw;
                }
                X[j].all_conq_of_rules = bf;
                V[j].all_conq_of_rules = bfw;
                double newError = 0;
                result.RulesDatabaseSet.Add(X[j]);
                int  temp_index = result.RulesDatabaseSet.Count - 1;
                bool success    = true;
                try
                {
                    newError = result.approxLearnSamples(result.RulesDatabaseSet[temp_index]);
                }
                catch (Exception)
                {
                    success = false;
                }
                result.RulesDatabaseSet.RemoveAt(temp_index);
                if (success && (newError < Errors[j]))
                {
                    OldErrors[j] = Errors[j];
                    Errors[j]    = newError;

                    Pi[j] = new KnowlegeBaseTSARules(X[j]);
                }
                if (minError > newError)
                {
                    minError = newError;
                    Pg       = new KnowlegeBaseTSARules(X[j]);
                }
            }
        }
コード例 #11
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf config)
        {
            theFuzzySystem = Approximate;
            if (theFuzzySystem.RulesDatabaseSet.Count == 0)
            {
                throw new System.FormatException("Что то не то с входными данными");
            }
            OptimizeTermShrinkHardCoreConf Config = config as OptimizeTermShrinkHardCoreConf;

            count_shrink = Config.OTSHCCountShrinkTerm;


            for (int i = 0; i < Approximate.CountFeatures; i++)
            {
                int count_terms_for_var = Approximate.RulesDatabaseSet[0].TermsSet.FindAll(x => x.NumVar == i).Count;

                if (count_terms_for_var >= count_shrink)
                {
                    int        shrinkcounter  = count_shrink;
                    List <int> Varians_of_cut = new List <int>();
                    for (int j = 0; j < count_terms_for_var; j++)
                    {
                        if (shrinkcounter > 0)
                        {
                            Varians_of_cut.Add(0);
                        }
                        else
                        {
                            Varians_of_cut.Add(1);
                        }
                        shrinkcounter--;
                    }
                    Generate_all_variant_in_pool(Varians_of_cut);


                    for (int j = 0; j < Pull_of_systems.Count; j++)
                    {
                        KnowlegeBaseTSARules current = MakeCut(Approximate.RulesDatabaseSet[0], Pull_of_systems[j], i);
                        Systems_ready_to_test.Add(current);
                        errors_of_systems.Add(theFuzzySystem.approxLearnSamples(current));
                    }
                    Pull_of_systems.Clear();
                }
            }

            int best_index = errors_of_systems.IndexOf(errors_of_systems.Min());

            theFuzzySystem.RulesDatabaseSet[0] = Systems_ready_to_test[best_index];

            return(theFuzzySystem);
        }
コード例 #12
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf config)
        {
            start_add_rules = Approximate.RulesDatabaseSet.Count;
            TSAFuzzySystem result = Approximate;

            if (result.RulesDatabaseSet.Count == 0)
            {
                throw new System.FormatException("Что то не то с входными данными");
            }



            OptimizeRullesShrinkConf Config = config as OptimizeRullesShrinkConf;

            count_Shrink_rule = Config.ORSCCountShrinkRules;

            int         count_of_swith_off    = count_Shrink_rule;
            List <byte> Varians_of_run_system = new List <byte>();

            for (int i = 0; i < Approximate.RulesDatabaseSet[0].RulesDatabase.Count; i++)
            {
                Varians_of_run_system.Add(1);
            }
            for (int i = 0; i < count_of_swith_off; i++)
            {
                Varians_of_run_system[i] = 0;
            }
            Generate_all_variant_in_pool(Varians_of_run_system);
            for (int i = 0; i < Pull_of_systems.Count; i++)
            {
                KnowlegeBaseTSARules temp_rules = new  KnowlegeBaseTSARules(result.RulesDatabaseSet[0], Pull_of_systems[i]);
                temp_rules.TrimTerms();

                result.RulesDatabaseSet.Add(temp_rules);
                result.UnlaidProtectionFix(result.RulesDatabaseSet[start_add_rules + i]);
                errors_of_systems.Add(result.approxLearnSamples(result.RulesDatabaseSet[start_add_rules + i]));
            }

            int best_index            = errors_of_systems.IndexOf(errors_of_systems.Min());
            KnowlegeBaseTSARules best = result.RulesDatabaseSet[start_add_rules + best_index];

            result.RulesDatabaseSet.Clear();
            result.RulesDatabaseSet.Add(best);
            Console.WriteLine(Pull_of_systems.Count());



            result.RulesDatabaseSet[0].TermsSet.Trim();
//            result.UnlaidProtectionFix();
            return(result);
        }
コード例 #13
0
ファイル: Base_ACO.cs プロジェクト: CDMMKY/fuzzy_core
        /// <summary>
        /// Step 1
        /// </summary>
        /// <param name="Classifier"></param>
        /// <param name="config"></param>

        protected virtual void init(TSAFuzzySystem Approx, ACOSearchConf config)
        {
            ACO_iterationCount       = config.ACOCountIteration;
            ACO_antCount             = config.ACOCountAnt;
            ACO_decisionArchiveCount = config.ACODescisionArchiveSize;
            ACO_q       = config.ACOQ;
            ACO_xi      = config.ACOXi;
            result      = Approx;
            colonyCount = result.RulesDatabaseSet[0].TermsSet.Count;
            colonyList  = new List <Colony>();
            newSolution = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);

            //    current_database = result.RulesDatabaseSet.Count -1;

            baseError = result.approxLearnSamples(newSolution);
        }
コード例 #14
0
        protected KnowlegeBaseTSARules[] sortSolution(KnowlegeBaseTSARules[] Source)
        {
            KnowlegeBaseTSARules temp = result.RulesDatabaseSet[0];

            double[] keys = new double[Source.Count()];

            KnowlegeBaseTSARules[] tempSol = Source.Clone() as KnowlegeBaseTSARules[];
            for (int i = 0; i < Source.Count(); i++)
            {
                result.RulesDatabaseSet[0] = Source[i];
                keys[i] = result.approxLearnSamples(result.RulesDatabaseSet[0]);
            }

            Array.Sort(keys, tempSol);

            result.RulesDatabaseSet[0] = temp;
            return(tempSol);
        }
コード例 #15
0
 protected void preIterate(TSAFuzzySystem result)
 {
     for (int i = 0; i < count_particle; i++)
     {
         KnowlegeBaseTSARules temp_c_Rule = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);
         X[i]         = temp_c_Rule;
         Errors[i]    = result.approxLearnSamples(result.RulesDatabaseSet[0]);
         OldErrors[i] = Errors[i];
         Pi[i]        = new KnowlegeBaseTSARules(X[i]);
         V[i]         = new KnowlegeBaseTSARules(X[i]);
         //
         for (int j = 0; j < V[i].TermsSet.Count; j++)
         {
             for (int k = 0; k < Term.CountParamsinSelectedTermType(V[i].TermsSet[j].TermFuncType); k++)
             {
                 if (i == 0)
                 {
                     V[i].TermsSet[j].Parametrs[k] = 0;
                 }
                 else
                 {
                     V[i].TermsSet[j].Parametrs[k] = rnd.NextDouble() - 0.5;
                 }
             }
             double[] bf = new double[V[i].all_conq_of_rules.Length];
             for (int k = 0; k < V[i].all_conq_of_rules.Length; k++)
             {
                 if (i == 0)
                 {
                     bf[k] = V[i].all_conq_of_rules[k];
                 }
                 else
                 {
                     bf[k] = GaussRandom.Random_gaussian(rand, V[i].all_conq_of_rules[k], V[i].all_conq_of_rules[k] * 0.01);
                 }
             }
             V[i].all_conq_of_rules = bf;
         }
     }
     Pg       = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);
     minError = Errors[0];
 }
コード例 #16
0
        private string ErrorInfoTSA(IFuzzySystem FS)
        {
            TSAFuzzySystem IFS = FS as TSAFuzzySystem;

            if (IFS.RulesDatabaseSet.Count < 1)
            {
                return("Точность нечеткой системы недоступна");
            }

            approxLearnResult.Add(IFS.approxLearnSamples(IFS.RulesDatabaseSet[0]));
            approxTestResult.Add(IFS.approxTestSamples(IFS.RulesDatabaseSet[0]));
            approxLearnResultMSE.Add(IFS.RMSEtoMSEforLearn(approxLearnResult[approxLearnResult.Count - 1]));
            approxTestResultMSE.Add(IFS.RMSEtoMSEforTest(approxTestResult[approxTestResult.Count - 1]));
            approxLearnResultMSEdiv2.Add(IFS.RMSEtoMSEdiv2forLearn(approxLearnResult[approxLearnResult.Count - 1]));
            approxTestResultMSEdiv2.Add(IFS.RMSEtoMSEdiv2forTest(approxTestResult[approxTestResult.Count - 1]));

            //    Console.WriteLine($"Time\t{IFS.sw.ElapsedMilliseconds} {Environment.NewLine }Ticks\t{IFS.sw.ElapsedTicks}");

            return("Точностью на обучающей выборке(RSME)  " + approxLearnResult[approxLearnResult.Count - 1].ToString() + " , Точность на тестовой выборке(RMSE)  " + approxTestResult[approxTestResult.Count - 1].ToString() + " " + Environment.NewLine +
                   "Точностью на обучающей выборке(MSE)  " + approxLearnResultMSE[approxLearnResultMSE.Count - 1].ToString() + " , Точность на тестовой выборке(MSE)  " + approxTestResultMSE[approxTestResultMSE.Count - 1].ToString() + " " + Environment.NewLine +
                   "Точностью на обучающей выборке(MSE/2)  " + approxLearnResultMSEdiv2[approxLearnResultMSEdiv2.Count - 1].ToString() + " , Точность на тестовой выборке(MSE/2)  " + approxTestResultMSEdiv2[approxTestResultMSEdiv2.Count - 1].ToString() + " " + Environment.NewLine);
        }
コード例 #17
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf conf)
        {
            TSAFuzzySystem result = Approximate;

            double errorBefore = result.approxLearnSamples(result.RulesDatabaseSet[0]);

            var a = 2000;

            #region Basic initialization
            var config = (RLSconfig)conf;
            numberOfIterations = config.NumberOfIterantions;
            lambda             = config.ForgettingFactor;

            var knowledgeBaseToOptimize = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);
            var kbToOptimize            = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);

            R = knowledgeBaseToOptimize.RulesDatabase.Count;                                  // Number of rules
            m = result.LearnSamplesSet.CountSamples;                                          // Number of samples
            n = knowledgeBaseToOptimize.RulesDatabase[0].RegressionConstantConsequent.Length; // Number of variables
            #endregion

            #region x, y
            double[][] x = new double[m][];
            double[]   y = new double[m];
            for (int i = 0; i < m; i++)
            {
                x[i] = result.LearnSamplesSet.DataRows[i].InputAttributeValue;
                y[i] = result.LearnSamplesSet.DataRows[i].DoubleOutput;
            }
            #endregion

            #region B
            double[][] consequents = new double[n + 1][];

            // B[0]
            consequents[0] = new double[R];
            for (int i = 0; i < R; i++)
            {
                consequents[0][i] = knowledgeBaseToOptimize.RulesDatabase[i].IndependentConstantConsequent;
            }

            // B[1..n+1]
            for (int i = 1; i < n + 1; i++)
            {
                consequents[i] = new double[R];
                for (int j = 0; j < R; j++)
                {
                    consequents[i][j] = knowledgeBaseToOptimize.RulesDatabase[j].RegressionConstantConsequent[i - 1];
                }
            }

            HyperVector B = new HyperVector(consequents);
            #endregion

            #region P
            double[][] p = new double[n + 1][];
            for (int i = 0; i < n + 1; i++)
            {
                p[i] = new double[n + 1];
                for (int j = 0; j < n + 1; j++)
                {
                    p[i][j] = (i == j) ? a : 0;
                }
            }

            Matrix P = new Matrix(p);
            #endregion

            #region Xi
            dividerXi = new double[m];
            xiValue   = new double[m][];
            for (int i = 0; i < m; i++)
            {
                xiValue[i] = new double[R];
            }
            EvalXi(x, y, knowledgeBaseToOptimize);

            // XiBold
            xiBoldValue = new HyperVector[m];
            EvalXiBold(x);
            #endregion

            GC.Collect();

            //double[][] aDoubles = new[]
            //                          {
            //                              new[] { 0.1, 0.2, 0.123 },
            //                              new[] { 1.213, 2.1, 1.2 },
            //                              new[] { 13.3, 0.1231, 31.1 }
            //                          },
            //           bDoubles = new[]
            //                          {
            //                              new[] { 0.1, 12.2, 5.445 },
            //                              new[] { 5.3, 4.553, 1.545 },
            //                              new[] { 3.4, 87.545, 0.255 }
            //                          };
            //Matrix aMatrix = new Matrix(aDoubles);
            //HyperVector bVector = new HyperVector(bDoubles);
            //HyperVector cVector = new HyperVector(aDoubles);

            //var c = aMatrix * bVector;
            //var cc = c;
            //var d = bVector * cVector;
            //var dd = d;
            //var e = bVector ^ cVector;
            //var ee = e;
            //var f = cVector * aMatrix;
            //var ff = f;



            #region The Cycle
            for (int i = 0; i < numberOfIterations; i++)
            {
                for (int j = 0; j < m; j++)
                {
                    var temp1 = -1d * P;
                    var temp3 = temp1 * xiBoldValue[j];

                    var temp5 = xiBoldValue[j] * P * xiBoldValue[j];
                    var temp6 = lambda + temp5;
                    var temp7 = temp3 * (1d / temp6);
                    var temp8 = xiBoldValue[j] * P;
                    var temp9 = temp7 ^ temp8;
                    P += temp9;
                    P /= lambda;

                    ////P = (P + (-1d * P * xiBoldValue[j] * (1d / (lambda + (xiBoldValue[j] * P * xiBoldValue[j]))) ^ xiBoldValue[j] * P)) / lambda;

                    B += P * xiBoldValue[j] * (y[j] - xiBoldValue[j] * B);
                }
            }
            #endregion

            #region Comparison
            // Get consequents into the KB
            for (int i = 0; i < R; i++)
            {
                knowledgeBaseToOptimize.RulesDatabase[i].IndependentConstantConsequent = B.Elements[0].Elements[i];

                for (int j = 1; j < n + 1; j++)
                {
                    knowledgeBaseToOptimize.RulesDatabase[i].RegressionConstantConsequent[j - 1] = B.Elements[j].Elements[i]; // NOT WORKING!!!
                }
            }

            // Get the best knowledge base on the 1st place
            double errorAfter = result.approxLearnSamples(kbToOptimize);

            if (errorAfter < errorBefore)
            {
                result.RulesDatabaseSet.Insert(0, knowledgeBaseToOptimize);
            }
            else
            {
                result.RulesDatabaseSet.Insert(0, kbToOptimize);
                //    result.RulesDatabaseSet.Add(kbToOptimize);
            }

            return(result);

            #endregion
        }
コード例 #18
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approx, ILearnAlgorithmConf conf)
        {
            iskl_prizn      = "";
            count_iteration = ((Param)conf).Количество_итераций;
            count_populate  = ((Param)conf).Число_осколков;
            exploration     = ((Param)conf).Фактор_исследования;
            reduce_koef     = ((Param)conf).Уменьшающий_коэффициент;
            priznaki_usech  = ((Param)conf).Усечённые_признаки;

            int    iter = 0, i, j, count_terms;
            int    count_cons;
            double RMSE_best, cosFi, RMSE_best2, MSEbefore, MSEafter;
            int    Nd, variables, k = 1, best = 0;

            string[] buf;
            buf = priznaki_usech.Split(' ');
            TSAFuzzySystem result = Approx;
            int            type   = Approx.RulesDatabaseSet[0].TermsSet[0].CountParams;

            Nd = Approx.RulesDatabaseSet[0].TermsSet.Count * type;
            double[] X_best = new double[Nd + 1];
            double[,] X_pred    = new double[2, Nd + 1];
            double[,] direction = new double[count_populate, Nd + 1];
            double[,] d         = new double[count_populate, Nd + 1];
            double[,] explosion = new double[count_populate, Nd + 1];
            double[,] shrapnel  = new double[count_populate, Nd + 1];
            cosFi      = Math.Cos(2 * Math.PI / count_populate);
            RMSE_best  = Approx.approxLearnSamples(0);
            RMSE_best2 = Approx.approxLearnSamples(0);
            count_cons = Approx.RulesDatabaseSet[0].all_conq_of_rules.Count();
            double[] RMSE      = new double[count_populate];
            double[] RMSE_all  = new double[iter];
            double[] RMSE_tst  = new double[count_populate];
            double[] RMSE2     = new double[count_populate];
            double[] RMSE_pred = new double[2];
            double[] cons_best = new double[count_cons];
            count_terms = Approx.RulesDatabaseSet[0].TermsSet.Count;
            variables   = Approx.LearnSamplesSet.CountVars;
            int[] terms = new int[variables];

            double[] X_best2 = new double[variables];
            double[,] d3      = new double[count_populate, variables];
            double[,] priznak = new double[count_populate, variables];
            for (i = 0; i < variables; i++)
            {
                priznak[0, i] = 1;
                X_best2[i]    = 1;
            }
            KnowlegeBaseTSARules[] X = new KnowlegeBaseTSARules[count_populate];
            for (int s = 0; s < count_populate - 1; s++)
            {
                X[s] = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[0]);
                Approx.RulesDatabaseSet.Add(X[s]);
            }


            if (buf[0] != "")
            {
                for (k = 0; k < buf.Count(); k++)
                {
                    Approx.AcceptedFeatures[int.Parse(buf[k]) - 1] = false;
                    priznak[0, int.Parse(buf[k]) - 1] = 0;
                    iskl_prizn += buf[k] + " ";
                }
            }

            RMSE_best = Approx.approxLearnSamples(0);
            for (iter = 0; iter <= count_iteration; iter++)
            {
                best = 0;
                if (iter == 0)
                {
                    k = 1;
                    for (int h = 0; h < count_terms; h++)
                    {
                        for (int p = 0; p < type; p++)
                        {
                            shrapnel[0, k] = Approx.RulesDatabaseSet[0].TermsSet[h].Parametrs[p];
                            X_best[k]      = shrapnel[0, k];
                            X_pred[0, k]   = shrapnel[0, k];
                            X_pred[1, k]   = shrapnel[0, k];
                            k++;
                        }
                    }
                    RMSE_pred[0] = Approx.approxLearnSamples(0);
                    RMSE_pred[1] = Approx.approxLearnSamples(0);
                    k            = 1;
                    for (int h = 0; h < count_terms; h++)
                    {
                        for (int p = 0; p < type; p++)
                        {
                            d[0, k] = RandomNext(Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[h].NumberOfInputVar).Min, Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[h].NumberOfInputVar).Max);
                            k++;
                        }
                    }
                }
                for (i = 1; i <= Nd; i++)
                {
                    if (exploration > iter)
                    {
                        for (j = 1; j < count_populate; j++)
                        {
                            int sum = 0, sum2 = 0;
generate:
                            sum++;
                            sum2++;
                            //формула расстояния исправлена

                            d[j, i] = d[j - 1, i] * randn();

                            //double sluch = randn();
                            //if (sluch < 0) d[j, i] = d[j - 1, i] * (-1) * Math.Pow(sluch, 2);
                            //else d[j, i] = d[j - 1, i] * Math.Pow(sluch, 2);
                            explosion[j, i] = d[j, i] * cosFi;
                            if (sum > 20)
                            {
                                if ((i + (type - 2)) % type == 0)
                                {
                                    shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                                    if (sum2 > 2)
                                    {
                                        shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - type] - shrapnel[j, i]);
                                        sum            = 19;
                                    }
                                    if (sum2 > 3)
                                    {
                                        shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - type] - shrapnel[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                                        sum            = 19;
                                        sum2           = 0;
                                    }
                                }
                                else
                                {
                                    shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                                    sum            = 19;
                                }
                            }
                            else
                            {
                                shrapnel[j, i] = shrapnel[0, i] + explosion[j, i];
                            }
                            if ((i == 2) || (i == 1))
                            {
                                shrapnel[j, i] = Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar).Min;
                            }

                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 2) / type)].NumberOfInputVar)
                            {
                                shrapnel[j, i] = Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar).Min; goto exit;
                            }

                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - type) / type)].NumberOfInputVar)
                            {
                                shrapnel[j, i] = Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar).Min; goto exit;
                            }
                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != (variables - 1))
                            {
                                if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i) / type)].NumberOfInputVar)
                                {
                                    shrapnel[j, i] = Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar).Max; goto exit;
                                }
                                if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i + 1) / type)].NumberOfInputVar)
                                {
                                    shrapnel[j, i] = Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar).Max; goto exit;
                                }
                            }
                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar == (variables - 1))
                            {
                                if ((i == (count_terms * 3 - 1)) || (i == (count_terms * 3)))
                                {
                                    shrapnel[j, i] = Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar).Max;
                                }
                            }

                            if (((i + (type - 2)) % type == 0) && (shrapnel[j, i] < shrapnel[j, i - 1]))
                            {
                                if (shrapnel[j, i] == Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar).Min)
                                {
                                    i--;
                                }
                                goto generate;
                            }
                            if ((i % type == 0) && (shrapnel[j, i] < shrapnel[j, i - 1]))
                            {
                                goto generate;
                            }
                            if (i != 1)
                            {
                                if (((i - (type - 2)) % type == 0) && ((shrapnel[j, i] > shrapnel[j, i - 1]) || (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar).Max) || (shrapnel[j, i] < Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar).Min)))
                                {
                                    goto generate;
                                }
                            }
                            if (((i + (type - 2)) % type == 0) && ((shrapnel[j, i] < Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar).Min) || (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar).Max)))
                            {
                                goto generate;
                            }
exit:
                            if (i > type)
                            {
                                if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != 0)
                                {
                                    if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1 - type) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar - 1)
                                    {
                                        if (((i + (type - 2)) % type == 0) && ((shrapnel[j, i] < shrapnel[j, i - type])))
                                        {
                                            goto generate;
                                        }
                                    }
                                }
                            }
                        }
                    }
                    else
                    {
                        //d[0, i] = d2(X_pred[0, i], X_pred[1, i], RMSE_pred[0], RMSE_pred[1]);

                        //for (j = 1; j < count_populate; j++)
                        //{
                        //    if ((X_pred[1, i] - X_pred[0, i]) != 0)
                        //    {
                        //        direction[j, i] = m(X_pred[0, i], X_pred[1, i], RMSE_pred[0], RMSE_pred[1]);
                        //    }
                        //    else direction[j, i] = 1;
                        //    int sum = 0, sum2 = 0;
                        //generate:
                        //    sum++;
                        //    sum2++;
                        //    double random;
                        //    random = randn();
                        //    if(random<0) explosion[j, i] = d[j-1, i]*rand.NextDouble() * cosFi*(-1);
                        //    else explosion[j, i] = d[j - 1, i] * rand.NextDouble() * cosFi;
                        //    if (sum2 > 50) sum2 = 0;

                        //    if (sum > 20)
                        //    {
                        //        if ((i + (type - 2)) % type == 0)
                        //        {
                        //            shrapnel[j, i] = Shrapnel(explosion[j, i], shrapnel[0, i], d[j - 1, i], direction[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                        //            if (sum2 > 2)
                        //            {
                        //                shrapnel[j, i] = Shrapnel(explosion[j, i], shrapnel[0, i], d[j - 1, i], direction[j, i]) + (shrapnel[j, i - type] - shrapnel[j, i]);
                        //                sum = 19;
                        //            }
                        //            if (sum2 > 3)
                        //            {
                        //                shrapnel[j, i] = Shrapnel(explosion[j, i], shrapnel[0, i], d[j - 1, i], direction[j, i]) + (shrapnel[j, i - type] - shrapnel[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                        //                sum = 19;
                        //                sum2 = 0;
                        //            }
                        //        }
                        //        else
                        //        {
                        //            shrapnel[j, i] = Shrapnel(explosion[j, i], shrapnel[0, i], d[j - 1, i], direction[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                        //            sum = 19;
                        //        }
                        //    }
                        //    else shrapnel[j, i] = Shrapnel(explosion[j,i],shrapnel[0,i],d[j-1,i],direction[j,i]);

                        //    if ((i == 2) || (i == 1))
                        //    {
                        //        shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributeMin(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar);
                        //    }
                        //    if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 2) / type)].NumberOfInputVar)
                        //    {
                        //        shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributeMin(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar); goto exit;
                        //    }

                        //    if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - type) / type)].NumberOfInputVar)
                        //    {
                        //        shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributeMin(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar); goto exit;
                        //    }
                        //    if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != (variables - 1))
                        //    {
                        //        if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i) / type)].NumberOfInputVar)
                        //        {
                        //            shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributeMax(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar); goto exit;
                        //        }
                        //        if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i + 1) / type)].NumberOfInputVar)
                        //        {
                        //            shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributeMax(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar); goto exit;
                        //        }
                        //    }
                        //    if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar == (variables - 1))
                        //    {
                        //        if((i==(count_terms*3-1))||(i==(count_terms*3)))
                        //        shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributeMax(Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumberOfInputVar);
                        //    }

                        //    if (((i + (type - 2)) % type == 0) && (shrapnel[j, i] < shrapnel[j, i - 1]))
                        //    {
                        //        if (shrapnel[j, i] == Approx.LearnSamplesSet.InputAttributeMin(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar)) i--;
                        //        goto generate;
                        //    }
                        //    if ((i % type == 0) && (shrapnel[j, i] < shrapnel[j, i - 1]))
                        //    {
                        //        goto generate;
                        //    }
                        //    if (i != 1)
                        //    {
                        //        if (((i - (type - 2)) % type == 0) && ((shrapnel[j, i] > shrapnel[j, i - 1]) || (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttributeMax(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar)) || (shrapnel[j, i] < Approx.LearnSamplesSet.InputAttributeMin(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar))))
                        //        {
                        //            goto generate;
                        //        }
                        //    }
                        //    if (((i + (type - 2)) % type == 0) && ((shrapnel[j, i] < Approx.LearnSamplesSet.InputAttributeMin(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar)) || (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttributeMax(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumberOfInputVar))))
                        //    {
                        //        goto generate;
                        //    }
                        //exit:
                        //    if (i > type)
                        //    {
                        //        if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar != 0)
                        //        {
                        //            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1 - type) / type)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumberOfInputVar - 1)
                        //            {
                        //                if (((i + (type - 2)) % type == 0) && ((shrapnel[j, i] < shrapnel[j, i - type])))
                        //                {
                        //                    goto generate;
                        //                }
                        //            }
                        //        }
                        //    }
                        //    d[j, i] = d[j-1,i] / Math.Pow(Math.E, (double)iter/(double)reduce_koef);
                        //}
                    }
                }

                for (int z = 0; z < count_populate; z++)
                {
                    k = 1;
                    for (int h = 0; h < count_terms; h++)
                    {
                        for (int p = 0; p < type; p++)
                        {
                            Approx.RulesDatabaseSet[z].TermsSet[h].Parametrs[p] = shrapnel[z, k];
                            k++;
                        }
                    }
                }
                for (j = 0; j < count_populate; j++)
                {
                    RMSE[j]     = Approx.approxLearnSamples(j);
                    RMSE_tst[j] = Approx.approxTestSamples(j);
                    if (RMSE[j] < RMSE_best)
                    {
                        RMSE_best = RMSE[j];
                        best      = j;
                    }
                }
                if ((iter != 0) && (iter % 1000 == 0))
                {
                    RWLSMTakagiSugeno LSM = new RWLSMTakagiSugeno();
                    MSEbefore = RMSE[best];
                    KnowlegeBaseTSARules zeroSolution = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[0]);
                    Approx.RulesDatabaseSet[0] = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[best]);
                    KnowlegeBaseTSARules tempSolution = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[best]);
                    Approx   = LSM.TuneUpFuzzySystem(Approx, new NullConfForAll()) as TSAFuzzySystem;
                    MSEafter = Approx.approxLearnSamples(0);
                    if (MSEafter > MSEbefore)
                    {
                        Approx.RulesDatabaseSet[0] = tempSolution;
                        RMSE2[best] = MSEbefore;
                    }
                    else
                    {
                        RMSE2[best] = MSEafter;
                        for (int p = 0; p < count_cons; p++)
                        {
                            cons_best[p] = Approx.RulesDatabaseSet[0].all_conq_of_rules[p];
                        }
                    }
                    if (RMSE2[best] < RMSE_best)
                    {
                        RMSE_best = RMSE2[best];
                    }
                    Approx.RulesDatabaseSet[best] = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[0]);
                    Approx.RulesDatabaseSet[0]    = new KnowlegeBaseTSARules(zeroSolution);
                    for (int z = 0; z < count_populate; z++)
                    {
                        for (int p = 0; p < count_cons; p++)
                        {
                            Approx.RulesDatabaseSet[z].RulesDatabase[p].IndependentConstantConsequent = cons_best[p];
                        }
                    }
                }
                k = 1;
                for (int h = 0; h < count_terms; h++)
                {
                    for (int p = 0; p < type; p++)
                    {
                        shrapnel[0, k] = shrapnel[best, k];
                        if (exploration > iter)
                        {
                            d[0, k] = RandomNext(Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[h].NumberOfInputVar).Min, Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[h].NumberOfInputVar).Max);
                        }
                        Approx.RulesDatabaseSet[0].TermsSet[h].Parametrs[p] = shrapnel[0, k];
                        k++;
                    }
                }

                if (iter % 10 == 0)
                {
                    if (RMSE_pred[1] > RMSE2[best])
                    {
                        for (k = 1; k <= Nd; k++)
                        {
                            X_pred[0, k] = X_pred[1, k];
                            X_pred[1, k] = shrapnel[best, k];
                        }
                        RMSE_pred[0] = RMSE_pred[1];
                        RMSE_pred[1] = RMSE2[best];
                    }
                }
                else
                {
                    if (RMSE_pred[1] > RMSE[best])
                    {
                        for (k = 1; k <= Nd; k++)
                        {
                            X_pred[0, k] = X_pred[1, k];
                            X_pred[1, k] = shrapnel[best, k];
                        }
                        RMSE_pred[0] = RMSE_pred[1];
                        RMSE_pred[1] = RMSE[best];
                    }
                }
            }

            return(result);
        }
コード例 #19
0
        private void OriginalOperator(int cluster_index)
        {
            NewPopulation = new KnowlegeBaseTSARules[groups[cluster_index].Length];
            //double epsi = rand.NextDouble() * (1/ (1 + Math.Exp(-(0.5*iter-cur_iter)/p)));
            double epsi_newstep = rand.NextDouble() * Math.Exp(1 - (iter / (iter - cur_iter + 1)));

            for (int i = 0; i < groups[cluster_index].Length; i++)
            {
                NewPopulation[i] = Population[groups[cluster_index][i]];
            }
            for (int i = 0; i < groups[cluster_index].Length; i++)
            {
                int number = groups[cluster_index][i];

                for (int j = 0; j < NewPopulation[i].TermsSet.Count; j++)
                {
                    for (int k = 0; k < NewPopulation[i].TermsSet[j].Parametrs.Length; k++)
                    {
                        NewPopulation[i].TermsSet[j].Parametrs[k] += epsi_newstep * rand.NextDouble();
                    }
                }
                if (result.approxLearnSamples(NewPopulation[i]) < result.approxLearnSamples(Population[number]))
                {
                    //Console.WriteLine("знач 1ориг = " + (result.approxLearnSamples(Population[number]) - result.approxLearnSamples(NewPopulation[i])));
                    Population[number] = NewPopulation[i];
                }
                //else
                //{
                //    for (int j = 0; j < NewPopulation[i].TermsSet.Count; j++)
                //    {
                //        for (int k = 0; k < NewPopulation[i].TermsSet[j].Parametrs.Length; k++)
                //        {
                //            NewPopulation[i].TermsSet[j].Parametrs[k] += epsi_newstep * rand.NextDouble();
                //        }
                //    }
                //    if (result.approxLearnSamples(NewPopulation[i]) < result.approxLearnSamples(Population[number]))
                //    {
                //        //Console.WriteLine("знач 1ориг нью = " + (result.approxLearnSamples(Population[number]) - result.approxLearnSamples(NewPopulation[i])));
                //        Population[number] = NewPopulation[i];
                //  }
                //}
            }
        }
コード例 #20
0
        private static void writeAboutEstimates(XmlWriter writer, TSAFuzzySystem Approximate)
        {
            writer.WriteStartElement("Estimates");
            if (Approximate.TestSamplesSet != null)
            {
                writer.WriteAttributeString("Count", XmlConvert.ToString(22));
                writer.WriteStartElement("Estimate");
                writer.WriteAttributeString("Table", Approximate.LearnSamplesSet.FileName);
                writer.WriteAttributeString("Type", "RMSE");
                writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.approxLearnSamples(Approximate.RulesDatabaseSet[0])));
                writer.WriteEndElement();
                writer.WriteStartElement("Estimate");
                writer.WriteAttributeString("Table", Approximate.LearnSamplesSet.FileName);
                writer.WriteAttributeString("Type", "MSE");
                writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.RMSEtoMSEforLearn(Approximate.approxLearnSamples(Approximate.RulesDatabaseSet[0]))));
                writer.WriteEndElement();
                writer.WriteStartElement("Estimate");
                writer.WriteAttributeString("Table", Approximate.TestSamplesSet.FileName);
                writer.WriteAttributeString("Type", "RMSE");
                writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.approxTestSamples(Approximate.RulesDatabaseSet[0])));
                writer.WriteEndElement();
                writer.WriteStartElement("Estimate");
                writer.WriteAttributeString("Table", Approximate.TestSamplesSet.FileName);
                writer.WriteAttributeString("Type", "MSE");
                writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.RMSEtoMSEforTest(Approximate.approxTestSamples(Approximate.RulesDatabaseSet[0]))));
                writer.WriteEndElement();
            }
            else
            {
                writer.WriteAttributeString("Count", XmlConvert.ToString(20));
                writer.WriteStartElement("Estimate");
                writer.WriteAttributeString("Table", Approximate.LearnSamplesSet.FileName);
                writer.WriteAttributeString("Type", "RMSE");
                writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.approxLearnSamples(Approximate.RulesDatabaseSet[0])));
                writer.WriteEndElement();
                writer.WriteStartElement("Estimate");
                writer.WriteAttributeString("Table", Approximate.LearnSamplesSet.FileName);
                writer.WriteAttributeString("Type", "MSE");
                writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.RMSEtoMSEforLearn(Approximate.approxLearnSamples(Approximate.RulesDatabaseSet[0]))));
                writer.WriteEndElement();
            }

            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GIBNormal");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGIBNormal()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GIBSumStraigh");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGIBSumStrait()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GIBSumReverse");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGIBSumReverse()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GICNormal");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGICNormal()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GICSumStraigh");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGICSumStraigth()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GICSumReverse");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGICSumReverce()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GISNormal");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGISNormal()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GISSumStraigh");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGISSumStraigt()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "GISSumReverce");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getGISSumReverce()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "LindisNormal");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getLindisNormal()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "LindisSumStraigh");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getLindisSumStraight()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "LindisSumReverse");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getLindisSumReverse()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "NormalIndex");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getNormalIndex()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "RealIndex");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getIndexReal()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "SumStraigthIndex");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getIndexSumStraigt()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "SumReverseIndex");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getIndexSumReverse()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "ComplexitIt");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getComplexit()));
            writer.WriteEndElement();
            writer.WriteStartElement("Estimate");
            writer.WriteAttributeString("Type", "CountRules");
            writer.WriteAttributeString("Value", XmlConvert.ToString(Approximate.getRulesCount()));
            writer.WriteEndElement();
            writer.WriteEndElement();
        }
コード例 #21
0
ファイル: Individ.cs プロジェクト: CDMMKY/fuzzy_core
 public void calc_Error(TSAFuzzySystem error_checker)
 {
     error_checker.UnlaidProtectionFix(hrom_vector.Core_Check);
     Error = error_checker.approxLearnSamples(hrom_vector.Core_Check);
 }
コード例 #22
0
        public override TSAFuzzySystem Generate(TSAFuzzySystem Approximate, IGeneratorConf config)
        {
            TSAFuzzySystem result = Approximate;

            Pull_of_systems       = Pull_of_systems = new List <List <int> >();
            Systems_ready_to_test = new List <KnowlegeBaseTSARules>();
            errors_of_systems     = new List <double>();

            DynamicTuneConfGenerator config1 = config as DynamicTuneConfGenerator;

            type_func = config1.IEWOTypeFunc;



            List <int> Varians_of_run_system = new List <int>();


            List <int> allOne = new List <int>();

            for (int i = 0; i < Approximate.CountFeatures; i++)
            {
                allOne.Add(1);
            }
            Pull_of_systems.Add(new List <int>(allOne));

            for (int j = 0; j < Approximate.CountFeatures - 1; j++)
            {
                Varians_of_run_system.Clear();
                allOne[j] = 2;
                Varians_of_run_system.AddRange(new List <int> (allOne));
                Varians_of_run_system.Sort();
                Generate_all_variant_in_pool(Varians_of_run_system);
            }
            allOne[Approximate.CountFeatures - 1] = 2;
            Pull_of_systems.Add(new List <int> (allOne));

            for (int i = 0; i < Pull_of_systems.Count; i++)
            {
                Approximate.RulesDatabaseSet.Clear();

                GeneratorRulesEveryoneWithEveryone.InitRulesEveryoneWithEveryone(Approximate, type_func, Pull_of_systems[i].ToArray());
                Systems_ready_to_test.Add(Approximate.RulesDatabaseSet[0]);
                errors_of_systems.Add(result.approxLearnSamples(result.RulesDatabaseSet[0]));
            }

            DynamicTuneClass dt = new DynamicTuneClass();

            Approximate = dt.TuneUpFuzzySystem(Approximate, config1);
            Systems_ready_to_test.Add(Approximate.RulesDatabaseSet[0]);
            errors_of_systems.Add(result.approxLearnSamples(result.RulesDatabaseSet[0]));


            int best_index = errors_of_systems.IndexOf(errors_of_systems.Min());

            result.RulesDatabaseSet.Clear();
            result.RulesDatabaseSet.Add(Systems_ready_to_test[best_index]);
            count_slice_vars = new int[Approximate.CountFeatures];
            for (int i = 0; i < count_slice_vars.Count(); i++)
            {
                count_slice_vars[i] = result.RulesDatabaseSet[0].TermsSet.Count(x => x.NumVar == i);
            }
            maxError  = config1.MaxError;
            TryCount  = config1.TryCount;
            RuleCount = result.RulesDatabaseSet[0].RulesDatabase.Count;
            Console.WriteLine(Pull_of_systems.Count());
            result.RulesDatabaseSet[0].TermsSet.Trim();
            GC.Collect();

            return(result);
        }
コード例 #23
0
ファイル: Takagi_approx.cs プロジェクト: CDMMKY/fuzzy_core
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approx, ILearnAlgorithmConf conf)
        {
            iskl_prizn      = "";
            count_iteration = ((Param)conf).Количество_итераций;
            count_populate  = ((Param)conf).Число_осколков;
            exploration     = ((Param)conf).Фактор_исследования;
            reduce_koef     = ((Param)conf).Уменьшающий_коэффициент;
            priznaki_usech  = ((Param)conf).Усечённые_признаки;

            int    iter = 0, i, j, count_terms;
            double cosFi;
            int    Nd, variables, k = 1, best = 0;

            string[] buf;
            buf = priznaki_usech.Split(' ');
            TSAFuzzySystem result = Approx;
            int            type   = Approx.RulesDatabaseSet[0].TermsSet[0].CountParams;

            Nd = Approx.RulesDatabaseSet[0].TermsSet.Count * type;
            double[] X_best = new double[Nd + 1];
            double[,] X_pred    = new double[2, Nd + 1];
            double[,] d         = new double[count_populate, Nd + 1];
            double[,] explosion = new double[count_populate, Nd + 1];
            double[,] shrapnel  = new double[count_populate, Nd + 1];
            cosFi = Math.Cos(2 * Math.PI / count_populate);
            double RMSE_best = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);

            double[] RMSE      = new double[count_populate];
            double[] RMSE2     = new double[count_populate];
            double[] RMSE_pred = new double[2];
            count_terms = Approx.RulesDatabaseSet[0].TermsSet.Count;
            variables   = Approx.LearnSamplesSet.CountVars;
            double[] X_best2 = new double[variables];
            double[,] priznak = new double[count_populate, variables];
            for (i = 0; i < variables; i++)
            {
                priznak[0, i] = 1;
                X_best2[i]    = 1;
            }
            KnowlegeBaseTSARules[] X = new KnowlegeBaseTSARules[count_populate];
            for (int s = 0; s < count_populate - 1; s++)
            {
                X[s] = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[0]);
                Approx.RulesDatabaseSet.Add(X[s]);
            }

            if (buf[0] != "")
            {
                for (k = 0; k < buf.Count(); k++)
                {
                    Approx.AcceptedFeatures[int.Parse(buf[k]) - 1] = false;
                    priznak[0, int.Parse(buf[k]) - 1] = 0;
                    //iskl_prizn += buf[k] + " ";
                }
            }
            for (k = 0; k < variables; k++)
            {
                if (Approx.AcceptedFeatures[k] == false)
                {
                    iskl_prizn += (k + 1).ToString() + " ";
                }
            }
            RMSE_best2 = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
            //for (j = 0; j < count_populate; j++)
            //{
            //    for (int h = 0; h < variables; h++)
            //    {
            //        if (priznak[0, h] == 1) Approx.AcceptedFeatures[h] = true;
            //        else
            //        {
            //            Approx.AcceptedFeatures[h] = false;
            //            for (int h1 = 0; h1 < Approx.RulesDatabaseSet[0].RulesDatabase.Count(); h1++)
            //            {
            //                Approx.RulesDatabaseSet[j].RulesDatabase[h1].RegressionConstantConsequent[h] = 0;
            //            }
            //        }
            //    }
            //}
            countRules = Approx.RulesDatabaseSet[0].RulesDatabase.Count();
            RMSE_best  = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
            for (iter = 0; iter <= count_iteration; iter++)
            {
                best = 0;
                if (iter == 0)
                {
                    k = 1;
                    for (int h = 0; h < count_terms; h++)
                    {
                        for (int p = 0; p < type; p++)
                        {
                            shrapnel[0, k] = Approx.RulesDatabaseSet[0].TermsSet[h].Parametrs[p];
                            X_best[k]      = shrapnel[0, k];
                            X_pred[0, k]   = shrapnel[0, k];
                            X_pred[1, k]   = shrapnel[0, k];
                            k++;
                        }
                    }
                    RMSE_pred[0] = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
                    RMSE_pred[1] = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
                    k            = 1;
                    for (int h = 0; h < count_terms; h++)
                    {
                        for (int p = 0; p < type; p++)
                        {
                            d[0, k] = RandomNext(Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[h].NumVar].Min, Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[h].NumVar].Max);
                            k++;
                        }
                    }
                }
                for (i = 1; i <= Nd; i++)
                {
                    for (j = 1; j < count_populate; j++)
                    {
                        int sum = 0, sum2 = 0;
generate:
                        sum++;
                        sum2++;

                        d[j, i] = d[j - 1, i] * randn();

                        explosion[j, i] = d[j, i] * cosFi;
                        if (type == 2)
                        {
                            if (sum > 20)
                            {
                                if ((i + 1) % type == 0)
                                {
                                    if (i != 1)
                                    {
                                        shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i] - shrapnel[j, i - 2]);
                                    }
                                }
                                if (sum2 > 1000)
                                {
                                    sum = 0; sum2 = 0;
                                }
                            }
                            else
                            {
                                shrapnel[j, i] = shrapnel[0, i] + explosion[j, i];
                            }
                            if (i != 1)
                            {
                                if (((i + 1) % 2 == 0) && (shrapnel[j, i] < shrapnel[j, i - 2]) && (Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar))
                                {
                                    goto generate;
                                }
                            }
                            if (((i + 1) % 2 == 0) && (shrapnel[j, i] < Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i) / type].NumVar].Min))
                            {
                                goto generate;
                            }
                            if (((i + 1) % 2 == 0) && (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i) / type].NumVar].Max))
                            {
                                goto generate;
                            }
                            if ((i % 2 == 0) && (shrapnel[j, i] < 0))
                            {
                                goto generate;
                            }
                        }
                        if (type != 2)
                        {
                            if (sum > 20)
                            {
                                if ((i + (type - 2)) % type == 0)
                                {
                                    shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                                    if (sum2 > 2)
                                    {
                                        shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - type] - shrapnel[j, i]);
                                        sum            = 19;
                                    }
                                    if (sum2 > 3)
                                    {
                                        shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - type] - shrapnel[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                                        sum            = 19;
                                        sum2           = 0;
                                    }
                                }
                                else
                                {
                                    shrapnel[j, i] = (shrapnel[0, i] + explosion[j, i]) + (shrapnel[j, i - 1] - shrapnel[j, i]);
                                    sum            = 19;
                                }
                            }
                            else
                            {
                                shrapnel[j, i] = shrapnel[0, i] + explosion[j, i];
                            }
                        }

                        if (type != 2)
                        {
                            if ((i == 2) || (i == 1))
                            {
                                shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumVar].Min;
                            }

                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 2) / type)].NumVar)
                            {
                                shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumVar].Min; goto exit;
                            }

                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - type) / type)].NumVar)
                            {
                                shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumVar].Min; goto exit;
                            }
                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar != (variables - 1))
                            {
                                if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i) / type)].NumVar)
                                {
                                    shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumVar].Max; goto exit;
                                }
                                if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i + 1) / type)].NumVar)
                                {
                                    shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumVar].Max; goto exit;
                                }
                            }
                            if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar == (variables - 1))
                            {
                                if ((i == (count_terms * 3 - 1)) || (i == (count_terms * 3)))
                                {
                                    shrapnel[j, i] = Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i - 1) / type].NumVar].Max;
                                }
                            }

                            if (((i + (type - 2)) % type == 0) && (shrapnel[j, i] < shrapnel[j, i - 1]))
                            {
                                if (shrapnel[j, i] == Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumVar].Min)
                                {
                                    i--;
                                }
                                goto generate;
                            }
                            if ((i % type == 0) && (shrapnel[j, i] < shrapnel[j, i - 1]))
                            {
                                goto generate;
                            }
                            if (i != 1)
                            {
                                if (((i - (type - 2)) % type == 0) && ((shrapnel[j, i] > shrapnel[j, i - 1]) || (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumVar].Max) || (shrapnel[j, i] < Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumVar].Min)))
                                {
                                    goto generate;
                                }
                            }
                            if (((i + (type - 2)) % type == 0) && ((shrapnel[j, i] < Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumVar].Min) || (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[(int)(i / type)].NumVar].Max)))
                            {
                                goto generate;
                            }
exit:
                            if (i > type)
                            {
                                if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar != 0)
                                {
                                    if (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1 - type) / type)].NumVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / type)].NumVar - 1)
                                    {
                                        if (((i + (type - 2)) % type == 0) && ((shrapnel[j, i] < shrapnel[j, i - type])))
                                        {
                                            goto generate;
                                        }
                                    }
                                }
                            }
                        }

                        //    else
                        //    {
                        //    if (i > 1)
                        //    {
                        //        if ((i%2!=0) && (shrapnel[j, i] < shrapnel[j, i - 2]) && (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / 2)].NumberOfInputVar == Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 2) / 2)].NumberOfInputVar))
                        //        {
                        //            goto generate;
                        //        }
                        //        if((i%2!=0) && (shrapnel[j, i] > Approx.LearnSamplesSet.InputAttribute(Approx.RulesDatabaseSet[0].TermsSet[(int)(i / 2)].NumberOfInputVar).Max) && (Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 1) / 2)].NumberOfInputVar != Approx.RulesDatabaseSet[0].TermsSet[(int)((i - 2) / 2)].NumberOfInputVar))
                        //        {
                        //            goto generate;
                        //        }
                        //    }
                        //}
                    }
                }

                for (int z = 0; z < count_populate; z++)
                {
                    k = 1;
                    for (int h = 0; h < count_terms; h++)
                    {
                        for (int p = 0; p < type; p++)
                        {
                            Approx.RulesDatabaseSet[z].TermsSet[h].Parametrs[p] = shrapnel[z, k];
                            k++;
                        }
                    }
                }
                for (j = 0; j < count_populate; j++)
                {
                    RMSE[j] = Approx.approxLearnSamples(Approx.RulesDatabaseSet[j]);
                    if (RMSE[j] < RMSE_best)
                    {
                        RMSE_best = RMSE[j];
                        best      = j;
                    }
                }

                k = 1;
                for (int h = 0; h < count_terms; h++)
                {
                    for (int p = 0; p < type; p++)
                    {
                        shrapnel[0, k] = shrapnel[best, k];
                        if (exploration > iter)
                        {
                            d[0, k] = RandomNext(Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[h].NumVar].Min, Approx.LearnSamplesSet.InputAttributes[Approx.RulesDatabaseSet[0].TermsSet[h].NumVar].Max);
                        }
                        Approx.RulesDatabaseSet[0].TermsSet[h].Parametrs[p] = shrapnel[0, k];
                        k++;
                    }
                }

                if (iter % 10 == 0)
                {
                    if (RMSE_pred[1] > RMSE2[best])
                    {
                        for (k = 1; k <= Nd; k++)
                        {
                            X_pred[0, k] = X_pred[1, k];
                            X_pred[1, k] = shrapnel[best, k];
                        }
                        RMSE_pred[0] = RMSE_pred[1];
                        RMSE_pred[1] = RMSE2[best];
                    }
                }
                else
                {
                    if (RMSE_pred[1] > RMSE[best])
                    {
                        for (k = 1; k <= Nd; k++)
                        {
                            X_pred[0, k] = X_pred[1, k];
                            X_pred[1, k] = shrapnel[best, k];
                        }
                        RMSE_pred[0] = RMSE_pred[1];
                        RMSE_pred[1] = RMSE[best];
                    }
                }
            }

            return(result);
        }
コード例 #24
0
 public TakagiSugenoElementofStorage(TSAFuzzySystem Checker, KnowlegeBaseTSARules SourceElem, string algName) : base(algName)
 {
     element    = new KnowlegeBaseTSARules(SourceElem);
     LearnError = Checker.approxLearnSamples(SourceElem);
     TestError  = Checker.approxTestSamples(SourceElem);
 }
コード例 #25
0
        private void weight()
        {
            double sum   = 0;
            double worst = mass[0];
            double best  = mass[0];

            int[] index = new Int32[MCount];
            int   count = 0;

            for (int i = 1; i < MCount; i++)
            {
                mass[i] = Errors[i];
                if (mass[i] > worst)
                {
                    worst = mass[i];
                }
                if (mass[i] < best)
                {
                    best = mass[i];
                }
            }
            for (int i = 0; i < MCount; i++)
            {
                if (mass[i] == worst)
                {
                    count++;
                    index[count - 1] = i;
                }
            }
            if (count > 1)
            {
                for (int i = 1; i < count; i++)
                {
                    ///X[index[i]] = ;
                    int f = index[i];
                    KnowlegeBaseTSARules temp_c_Rule = new KnowlegeBaseTSARules(theFuzzySystem.RulesDatabaseSet[0]);
                    temp_c_Rule = new KnowlegeBaseTSARules(theFuzzySystem.RulesDatabaseSet[0]);
                    X[f]        = temp_c_Rule;
                    for (int j = 0; j < X[f].TermsSet.Count; j++)
                    {
                        for (int k = 0; k < X[f].TermsSet[j].Parametrs.Count(); k++)
                        {
                            X[f].TermsSet[j].Parametrs[k] = GaussRandom.Random_gaussian(rand, X[f].TermsSet[j].Parametrs[k], 0.1 * (X[f].TermsSet[j].Parametrs[k])) + theFuzzySystem.LearnSamplesSet.InputAttributes[X[f].TermsSet[j].NumVar].Scatter * 0.05;
                        }
                    }
                    theFuzzySystem.RulesDatabaseSet.Add(X[f]);
                    theFuzzySystem.UnlaidProtectionFix(theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1]);
                    Errors[f] = theFuzzySystem.RMSEtoMSEforLearn(theFuzzySystem.approxLearnSamples(theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1]));
                    X[f]      = theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1];
                    theFuzzySystem.RulesDatabaseSet.Remove(X[f]);
                    mass[f] = Errors[f];
                    if (mass[f] > worst)
                    {
                        i--;
                    }
                }
            }
            for (int i = 0; i < MCount; i++)
            {
                mass[i] = (mass[i] - worst) / (best - worst);
                sum     = sum + mass[i];
            }
            for (int i = 0; i < MCount; i++)
            {
                mass[i] = mass[i] / sum;
            }
        }
コード例 #26
0
ファイル: CuckooApprox.cs プロジェクト: CDMMKY/fuzzy_core
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approx, ILearnAlgorithmConf conf) // Здесь ведется оптимизация вашим алгоритмом
        {
            TSAFuzzySystem result = Approx;

            count_iteration = ((CuckooConf)conf).CuckooCountIterate;
            count_particle  = ((CuckooConf)conf).CuckooPopulationSize;
            m = ((CuckooConf)conf).CuckooWorse;
            p = ((CuckooConf)conf).CuckooLifeChance;

            KnowlegeBaseTSARules[] X = new KnowlegeBaseTSARules[count_particle + 1];
            double[] Errors          = new double[count_particle + 1];

            Random rnd  = new Random();
            int    best = 0;

            for (int i = 0; i < count_particle + 1; i++)
            {
                KnowlegeBaseTSARules temp_c_Rule = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);
                X[i]      = temp_c_Rule;
                Errors[i] = result.approxLearnSamples(result.RulesDatabaseSet[0]);
            }
            ///////////
            for (int i = 0; i < count_iteration; i++)
            {
                X[0] = new KnowlegeBaseTSARules(X[0]);
                for (int k = 0; k < X[0].TermsSet.Count; k++)
                {
                    for (int q = 0; q < X[0].TermsSet[k].CountParams; q++)
                    {
                        double b = (rnd.Next(1000, 2000) / Convert.ToDouble(1000));
                        X[0].TermsSet[k].Parametrs[q] = X[0].TermsSet[k].Parametrs[q] + Levi(BM(sigu(b)), BM(1.0), b);
                    }
                }

                for (int k = 0; k < X[0].all_conq_of_rules.Length; k++)
                {
                    double b = (rnd.Next(1000, 2000) / Convert.ToDouble(1000));
                    X[0].all_conq_of_rules[k] = X[0].all_conq_of_rules[k] + Levi(BM(sigu(b)), BM(1.0), b);
                }

                result.RulesDatabaseSet.Add(X[0]);
                int temp_index = result.RulesDatabaseSet.Count - 1;
                Errors[0] = result.approxLearnSamples(result.RulesDatabaseSet[temp_index]);
                result.RulesDatabaseSet.RemoveAt(temp_index);

                int s = rnd.Next(1, count_particle + 1);

                if (Errors[0] < Errors[s])
                {
                    X[s]      = X[0];
                    Errors[s] = Errors[0];
                }
                else
                {
                    X[0]      = X[s];
                    Errors[0] = Errors[s];
                }

                for (int v = 0; v < m; v++)
                {
                    double max = Errors[1];
                    int    ind = 1;
                    for (int r = 2; r < count_particle + 1; r++)
                    {
                        if (Errors[r] > max)
                        {
                            max = Errors[r];
                            ind = r;
                        }
                        else
                        {
                        };
                    }
                    double h = (rnd.Next(1, 1000) / Convert.ToDouble(1000));
                    if (h > p)
                    {
                        X[ind] = new KnowlegeBaseTSARules(X[ind]);
                        for (int j = 0; j < X[ind].TermsSet.Count; j++)
                        {
                            for (int k = 0; k < X[ind].TermsSet[j].CountParams; k++)
                            {
                                X[ind].TermsSet[j].Parametrs[k] = X[0].TermsSet[j].Parametrs[k] + (rnd.Next(-1000, 1000) / Convert.ToDouble(1000));
                            }
                            for (int k = 0; k < X[ind].all_conq_of_rules.Length; k++)
                            {
                                X[ind].all_conq_of_rules[k] = X[0].all_conq_of_rules[k] + (rnd.Next(1, 1000) / Convert.ToDouble(1000));
                            }
                        }
                        result.RulesDatabaseSet.Add(X[ind]);
                        temp_index  = result.RulesDatabaseSet.Count - 1;
                        Errors[ind] = result.approxLearnSamples(result.RulesDatabaseSet[temp_index]);
                        result.RulesDatabaseSet.RemoveAt(temp_index);
                    }
                }
            }

            double min = Errors[0];

            best = 0;
            for (int g = 1; g < count_particle; g++)
            {
                if (Errors[g] < min)
                {
                    min  = Errors[g];
                    best = g;
                }
            }



            result.RulesDatabaseSet[0] = X[best];
            Approx.RulesDatabaseSet[0].TermsSet.Trim();
            return(result);
        }
コード例 #27
0
 public double CalcNewProfit(KnowlegeBaseTSARules Solution)
 {
     Tempory.Add(Solution);
     theFuzzySystem.UnlaidProtectionFix(Solution);
     return(theFuzzySystem.approxLearnSamples(Solution));
 }
コード例 #28
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approx, ILearnAlgorithmConf conf)
        {
            iskl_prizn      = "";
            count_iteration = ((Param)conf).Количество_итераций;
            count_populate  = ((Param)conf).Число_осколков;
            exploration     = ((Param)conf).Фактор_исследования;
            reduce_koef     = ((Param)conf).Уменьшающий_коэффициент;
            priznaki_usech  = ((Param)conf).Усечённые_признаки;
            iter_descrete   = ((Param)conf).Итерации_дискр_алг;

            int            iter = 0, iter2, i, j, count_terms, count_iter = 0;
            int            count_best2 = 0, best_pred = 0;
            double         RMSE_best, cosFi, RMSE_best2;
            int            Nd, variables, k = 1, best2 = 0;
            TSAFuzzySystem result = Approx;
            int            type   = Approx.RulesDatabaseSet[0].TermsSet[0].CountParams;

            Nd = Approx.RulesDatabaseSet[0].TermsSet.Count * type;
            double[] X_best = new double[Nd + 1];
            double[,] X_pred    = new double[2, Nd + 1];
            double[,] direction = new double[count_populate, Nd + 1];
            double[,] d         = new double[count_populate, Nd + 1];
            double[,] explosion = new double[count_populate, Nd + 1];
            double[,] shrapnel  = new double[count_populate, Nd + 1];
            cosFi      = Math.Cos(2 * Math.PI / count_populate);
            RMSE_best  = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
            RMSE_best2 = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
            double[] RMSE     = new double[count_populate];
            double[] RMSE_all = new double[iter];
            double[] RMSE_tst = new double[count_populate];
            RMSE2 = new double[count_populate];
            double[] RMSE_pred = new double[2];
            variables   = Approx.LearnSamplesSet.CountVars;
            count_terms = Approx.RulesDatabaseSet[0].TermsSet.Count;
            int[] terms = new int[variables];

            double[] X_best2 = new double[variables];
            double[,] d3      = new double[count_populate, variables];
            double[,] priznak = new double[count_populate, variables];
            for (i = 0; i < variables; i++)
            {
                priznak[0, i] = 1;
                X_best2[i]    = 1;
            }
            KnowlegeBaseTSARules[] X = new KnowlegeBaseTSARules[count_populate];
            for (int s = 0; s < count_populate - 1; s++)
            {
                X[s] = new KnowlegeBaseTSARules(Approx.RulesDatabaseSet[0]);
                Approx.RulesDatabaseSet.Add(X[s]);
            }

            for (iter2 = 0; iter2 < iter_descrete; iter2++)
            {
                best2 = 0;
                //if (count_best2 < 10)
                //{
                if (iter == 0)
                {
                    for (k = 0; k < variables; k++)
                    {
                        d3[0, k] = RandomNext(Approx.LearnSamplesSet.InputAttributes[k].Min, Approx.LearnSamplesSet.InputAttributes[k].Max);
                    }
                }
                for (i = 0; i < variables; i++)
                {
                    for (j = 1; j < count_populate; j++)
                    {
                        // generate:
                        d3[j, i]      = d3[j - 1, i] * randn();
                        priznak[j, i] = d3[j, i] * cosFi;

                        //if ((priznak[j, i] < Approx.LearnSamplesSet.InputAttribute(i).Min) || (priznak[j, i] > Approx.LearnSamplesSet.InputAttribute(i).Max))
                        //{
                        //    goto generate;
                        //}
                        Random random = new Random();
                        if (random.NextDouble() < descret(priznak[j, i]))
                        {
                            priznak[j, i] = 1;
                        }
                        else
                        {
                            priznak[j, i] = 0;
                        }
                    }
                }


                for (j = 1; j < count_populate; j++)
                {
                    for (int h = 0; h < variables; h++)
                    {
                        if (priznak[j, h] == 1)
                        {
                            Approx.AcceptedFeatures[h] = true;
                        }
                        else
                        {
                            Approx.AcceptedFeatures[h] = false;
                            for (int h1 = 0; h1 < Approx.RulesDatabaseSet[0].RulesDatabase.Count(); h1++)
                            {
                                Approx.RulesDatabaseSet[j].RulesDatabase[h1].RegressionConstantConsequent[h] = 0;
                            }
                        }
                    }
                    RMSE2[j] = Approx.approxLearnSamples(Approx.RulesDatabaseSet[0]);
                    str     += RMSE2[j].ToString() + " ";
                    int count_features = 0;
                    for (k = 0; k < variables; k++)
                    {
                        if (priznak[j, k] == 1)
                        {
                            count_features++;
                        }
                    }
                    iskl_prizn2 += count_features.ToString() + " ";
                    if (RMSE2[j] < RMSE_best2)
                    {
                        RMSE_best2 = RMSE2[j];
                        best2      = j;
                    }
                    for (int h = 0; h < variables; h++)
                    {
                        X_best2[h] = priznak[best2, h];
                    }
                }
                if (best_pred == best2)
                {
                    count_best2++;
                }
                else
                {
                    count_best2 = 0;
                }
                for (k = 0; k < variables; k++)
                {
                    priznak[0, k] = priznak[best2, k];
                }
                count_iter++;
                //}
            }

            for (k = 0; k < variables; k++)
            {
                if (priznak[best2, k] == 1)
                {
                    Approx.AcceptedFeatures[k] = true;
                }
                else
                {
                    Approx.AcceptedFeatures[k] = false;
                    iskl_prizn += (k + 1).ToString() + " ";
                }
            }

            return(result);
        }
コード例 #29
0
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approx, ILearnAlgorithmConf conf) // Здесь ведется оптимизация вашим алгоритмом
        {
            theFuzzySystem = Approx;

            iterMax = ((gsa_conf)conf).Количество_итераций;
            MCount  = ((gsa_conf)conf).Количество_частиц;
            G0      = ((gsa_conf)conf).Гравитационная_постоянная;
            alpha   = ((gsa_conf)conf).Коэффициент_уменьшения;
            epsilon = ((gsa_conf)conf).Малая_константа;
            X       = new KnowlegeBaseTSARules[MCount];
            Errors  = new double[MCount];
            mass    = new double[MCount];
            double ErrorBest;
            KnowlegeBaseTSARules BestSolution;
            double minValue;
            int    iminIndex;
            KnowlegeBaseTSARules temp_c_Rule = new KnowlegeBaseTSARules(theFuzzySystem.RulesDatabaseSet[0]);

            X[0]      = temp_c_Rule;
            Errors[0] = theFuzzySystem.RMSEtoMSEforLearn(theFuzzySystem.approxLearnSamples(X[0]));
            double ErrorZero = Errors[0];

            ErrorBest    = ErrorZero;
            BestSolution = temp_c_Rule;
            //number = X[0].TermsSet.Count * X[0].TermsSet[0].Parametrs.Count();

            R     = new double[MCount][, , ];
            speed = new double[MCount, X[0].TermsSet.Count, X[0].TermsSet[0].Parametrs.Count()];

            for (int i = 0; i < MCount; i++)
            {
                R[i] = new double[MCount, X[0].TermsSet.Count, X[0].TermsSet[0].Parametrs.Count()];
            }
            RR = new double[MCount, MCount];
            a  = new double[MCount, X[0].TermsSet.Count, X[0].TermsSet[0].Parametrs.Count()];

            for (int i = 1; i < MCount; i++)
            {
                temp_c_Rule = new KnowlegeBaseTSARules(theFuzzySystem.RulesDatabaseSet[0]);
                X[i]        = temp_c_Rule;
                for (int j = 0; j < X[i].TermsSet.Count; j++)
                {
                    for (int k = 0; k < X[i].TermsSet[j].Parametrs.Count(); k++)
                    {
                        X[i].TermsSet[j].Parametrs[k] = GaussRandom.Random_gaussian(rand, X[i].TermsSet[j].Parametrs[k], 0.1 * (X[i].TermsSet[j].Parametrs[k])) + theFuzzySystem.LearnSamplesSet.InputAttributes[X[i].TermsSet[j].NumVar].Scatter * 0.05;
                    }
                }
                theFuzzySystem.RulesDatabaseSet.Add(X[i]);
                theFuzzySystem.UnlaidProtectionFix(theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1]);
                Errors[i] = theFuzzySystem.RMSEtoMSEforLearn(theFuzzySystem.approxLearnSamples(theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1]));

                X[i] = theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1];

                theFuzzySystem.RulesDatabaseSet.Remove(X[i]);
            }

            for (int iter = 0; iter < iterMax; iter++)
            {
                //g(t) = G(0)*e^(-a*t/T);
                G = G0 * Math.Pow(Math.E, ((-1) * alpha * iter / iterMax));

                /*  if (iter >= 100) {
                 *    Console.WriteLine("Wait");
                 * }*/
                algorithm();
                for (int r = 0; r < MCount; r++)
                {
                    theFuzzySystem.RulesDatabaseSet.Add(X[r]);
                    theFuzzySystem.UnlaidProtectionFix(theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1]);
                    Errors[r] = theFuzzySystem.RMSEtoMSEforLearn(theFuzzySystem.approxLearnSamples(theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1]));
                    X[r]      = theFuzzySystem.RulesDatabaseSet[theFuzzySystem.RulesDatabaseSet.Count - 1];
                    theFuzzySystem.RulesDatabaseSet.Remove(X[r]);
                }
                minValue  = Errors.Min();
                iminIndex = Errors.ToList().IndexOf(minValue);
                if (minValue < ErrorBest)
                {
                    ErrorBest    = minValue;
                    BestSolution = new KnowlegeBaseTSARules(X[iminIndex]);
                }
            }

            if (ErrorBest < ErrorZero)
            {
                theFuzzySystem.RulesDatabaseSet[0] = BestSolution;
            }

            return(theFuzzySystem);
        }
コード例 #30
0
ファイル: DynamicTune.cs プロジェクト: CDMMKY/fuzzy_core
        public override TSAFuzzySystem TuneUpFuzzySystem(TSAFuzzySystem Approximate, ILearnAlgorithmConf conf) // + override
        {
            result = Approximate;


            List <KnowlegeBaseTSARules> Archive = new List <KnowlegeBaseTSARules>();
            List <double> ErrorsArchive         = new List <double>();

            var config = (DynamicTuneConf)conf;

            maxError  = config.MaxError;
            RuleCount = config.RulesCount;
            TryCount  = config.TryCount;
            double error        = result.RMSEtoMSEdiv2forLearn(result.approxLearnSamples(result.RulesDatabaseSet[0]));
            var    kbToOptimize = new KnowlegeBaseTSARules(result.RulesDatabaseSet[0]);
            var    kbBest       = new KnowlegeBaseTSARules(kbToOptimize);
            double errorBefore  = Double.MaxValue;

            result.UnlaidProtectionFix(kbToOptimize);

            List <input_space> variable_spaces = new List <input_space>();

            for (int i = 0; i < result.LearnSamplesSet.InputAttributes.Count; i++)
            {
                List <Term> terms_of_variable = new List <Term>();
                terms_of_variable = kbToOptimize.TermsSet.Where(term => term.NumVar == i).ToList();
                variable_spaces.Add(new input_space(terms_of_variable, i));
            }

            int indexRegion = -1,
                indexVar    = -1,
                number_of_input_variables = variable_spaces.Count;

            int tryCount = 0;



            while (error > maxError)
            {
                if (Double.IsInfinity(error))
                {
                    throw new Exception("Something went wrong, error is Infinity, region: " + indexRegion);
                }
                if (Double.IsNaN(error))
                {
                    throw new Exception("Something went wrong, error is NaN, region: " + indexRegion);
                }

                region_side[][] sides = new region_side[number_of_input_variables][];
                for (int i = 0; i < number_of_input_variables; i++)
                {
                    sides[i] = variable_spaces[i].get_region_sides();
                }
                var cartresult = CartesianProduct.Get(sides);

                List <region2> regions = new List <region2>();

                foreach (var x in cartresult)
                {
                    regions.Add(new region2(x.ToList(), result, variable_spaces));
                }

                List <double> region_errors = regions.Select(x => x.region_error()).ToList();
                indexRegion = region_errors.IndexOf(region_errors.Max());

                for (int i = 0; i < region_errors.Count; i++)
                {
                    if (Double.IsNaN(region_errors[i]) || Double.IsInfinity(region_errors[i]) ||
                        Double.IsNegativeInfinity(region_errors[i]) || Double.IsPositiveInfinity(region_errors[i]))
                    {
                        region_errors[i] = 0;
                    }
                }

                List <double> variable_errors = regions[indexRegion].variable_errors();
                bool          check1          = false;
                for (int i = 1; i < variable_errors.Count; i++)
                {
                    if (variable_errors[i - 1] != variable_errors[i])
                    {
                        check1 = true;
                        break;
                    }
                }
                if (!check1)
                {
                    indexVar = StaticRandom.Next(variable_errors.Count - 1);
                }
                else
                {
                    indexVar = variable_errors.IndexOf(variable_errors.Max());
                }

                Term new_term = regions[indexRegion].new_term(indexVar);
                result.RulesDatabaseSet[0] = kbToOptimize;
                kbToOptimize.TermsSet.Add(new_term);

                // Rules (CHECK REFERENCE TYPES)
                int @var = indexVar;

                var rulesLeft = kbToOptimize.RulesDatabase.Where(
                    rule => rule.ListTermsInRule.Contains(regions[indexRegion].sides[indexVar].left)).ToList();
                var rulesRight = kbToOptimize.RulesDatabase.Where(
                    rule => rule.ListTermsInRule.Contains(regions[indexRegion].sides[indexVar].right)).ToList();
                for (int j = 0; j < rulesLeft.Count; j++)
                {
                    int[] order = new int[rulesLeft[j].ListTermsInRule.Count];
                    for (int k = 0; k < rulesLeft[j].ListTermsInRule.Count; k++)
                    {
                        Term temp_term = rulesLeft[j].ListTermsInRule[k];
                        if (temp_term == regions[indexRegion].sides[indexVar].left)
                        {
                            temp_term = new_term;
                        }
                        order[k] = kbToOptimize.TermsSet.FindIndex(x => x == temp_term);
                    }
                    double   temp_approx_Values = kbToOptimize.RulesDatabase[j].IndependentConstantConsequent;
                    double[] temp_approx_RegressionConstantConsequent =
                        kbToOptimize.RulesDatabase[j].RegressionConstantConsequent.Clone() as double[];
                    TSARule temp_rule = new TSARule(
                        kbToOptimize.TermsSet, order, temp_approx_Values, temp_approx_RegressionConstantConsequent);

                    double[] dC = null;
                    temp_rule.IndependentConstantConsequent = LSMWeghtReqursiveSimple.EvaluteConsiquent(
                        result, temp_rule.ListTermsInRule.ToList(), out dC);
                    temp_rule.RegressionConstantConsequent = (double[])dC.Clone();

                    kbToOptimize.RulesDatabase.Add(temp_rule);



                    rulesLeft[j].IndependentConstantConsequent = LSMWeghtReqursiveSimple.EvaluteConsiquent(
                        result, rulesLeft[j].ListTermsInRule.ToList(), out dC);
                    rulesLeft[j].RegressionConstantConsequent = (double[])dC.Clone();
                }

                foreach (var rule in rulesRight)
                {
                    double[] dC = null;
                    rule.IndependentConstantConsequent = LSMWeghtReqursiveSimple.EvaluteConsiquent(
                        result, rule.ListTermsInRule.ToList(), out dC);
                    rule.RegressionConstantConsequent = dC;
                }

                variable_spaces[indexVar].terms.Add(new_term);
                variable_spaces[indexVar].terms.Sort(new CompararerByPick());

                // Re-evaluate the system's error
                error = result.RMSEtoMSEdiv2forLearn(result.ErrorLearnSamples(kbToOptimize));

                if ((kbToOptimize.RulesDatabase.Count > config.RulesCount))
                {
                    break;
                }

#if Console
                Console.WriteLine(error + " " + kbToOptimize.TermsSet.Count + " terms\n");
                for (int i = 0; i < variable_spaces.Count; i++)
                {
                    Console.WriteLine(variable_spaces[i].terms.Count + " термов по " + i + "му параметру\n");
                }
#endif
                result.RulesDatabaseSet[0] = kbToOptimize;
                // Get the best knowledge base on the 1st place
                if (error < errorBefore)
                {
                    kbBest      = new KnowlegeBaseTSARules(kbToOptimize);
                    errorBefore = error;
                    tryCount    = 0;
                }
                else
                {
                    tryCount++;
                }
                if (tryCount > TryCount)
                {
                    break;
                }
            }


            result.RulesDatabaseSet[0] = kbBest;
            RuleCount = kbBest.RulesDatabase.Count;
            TryCount  = tryCount;

            return(result);
        }