/// <summary> /// Parse the learning data to a string. /// </summary> /// <param name="exp">Learning data.</param> /// <returns>String representation of the learning data.</returns> public static string dump(MachineLearning.Learning.Regression.Learning exp) { StringBuilder sb = new StringBuilder(); sb.Append("<learning>\n"); foreach (FeatureSubsetSelection sel in exp.models) { sb.Append("<subset>"); foreach (LearningRound round in sel.LearningHistory) { sb.Append("<LearningRound>\n"); sb.Append(round.ToString()); sb.Append("</LearningRound>\n"); } sb.Append("</subset>"); } return(sb.Append("</learning>\n").ToString().Replace(HEADER, "")); }
/// <summary> /// Performs the functionality of one command. If no functionality is found for the command, the command is retuned by this method. /// </summary> /// <param name="line">One command with its parameters.</param> /// <returns>Returns an empty string if the command could be performed by the method. If the command could not be performed by the method, the original command is returned.</returns> public string performOneCommand(string line) { GlobalState.logInfo.logLine(COMMAND + line); // remove comment part of the line (the comment starts with an #) line = line.Split(new Char[] { '#' }, 2)[0]; if (line.Length == 0) return ""; // split line in command and parameters of the command string[] components = line.Split(new Char[] { ' ' }, 2); string command = components[0]; string task = ""; if (components.Length > 1) task = components[1]; string[] taskAsParameter = task.Split(new Char[] { ' ' }); switch (command.ToLower()) { case COMMAND_START_ALLMEASUREMENTS: { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); List<Configuration> configurations_Learning = new List<Configuration>(); foreach (Configuration config in GlobalState.allMeasurements.Configurations) { if (config.nfpValues.ContainsKey(GlobalState.currentNFP)) configurations_Learning.Add(config); } if (configurations_Learning.Count == 0) { GlobalState.logInfo.logLine("The learning set is empty! Cannot start learning!"); break; } GlobalState.logInfo.logLine("Learning: " + "NumberOfConfigurationsLearning:" + configurations_Learning.Count); // prepare the machine learning exp = new MachineLearning.Learning.Regression.Learning(configurations_Learning, configurations_Learning); exp.metaModel = infMod; exp.mLsettings = this.mlSettings; exp.learn(); } break; case COMMAND_TRUEMODEL: StreamReader readModel = new StreamReader(task); String model = readModel.ReadLine().Trim(); readModel.Close(); this.trueModel = new InfluenceFunction(model.Replace(',', '.'), GlobalState.varModel); NFProperty artificalProp = new NFProperty("artificial"); GlobalState.currentNFP = artificalProp; //computeEvaluationDataSetBasedOnTrueModel(); break; case COMMAND_SUBSCRIPT: { FileInfo fi = new FileInfo(task); StreamReader reader = null; if (!fi.Exists) throw new FileNotFoundException(@"Automation script not found. ", fi.ToString()); reader = fi.OpenText(); Commands co = new Commands(); co.exp = this.exp; while (!reader.EndOfStream) { String oneLine = reader.ReadLine().Trim(); co.performOneCommand(oneLine); } } break; case COMMAND_EVALUATION_SET: { GlobalState.evalutionSet.Configurations = ConfigurationReader.readConfigurations(task, GlobalState.varModel); GlobalState.logInfo.logLine("Evaluation set loaded."); } break; case COMMAND_CLEAR_GLOBAL: SPLConqueror_Core.GlobalState.clear(); toSample.Clear(); toSampleValidation.Clear(); break; case COMMAND_CLEAR_SAMPLING: exp.clearSampling(); toSample.Clear(); toSampleValidation.Clear(); break; case COMMAND_CLEAR_LEARNING: exp.clear(); toSample.Clear(); toSampleValidation.Clear(); break; case COMMAND_LOAD_CONFIGURATIONS: GlobalState.allMeasurements.Configurations = (GlobalState.allMeasurements.Configurations.Union(ConfigurationReader.readConfigurations(task, GlobalState.varModel))).ToList(); GlobalState.logInfo.logLine(GlobalState.allMeasurements.Configurations.Count + " configurations loaded."); break; case COMMAND_SAMPLE_ALLBINARY: { if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.ALLBINARY); this.exp.info.binarySamplings_Validation = "ALLBINARY"; } else { this.toSample.Add(SamplingStrategies.ALLBINARY); this.exp.info.binarySamplings_Learning = "ALLBINARY"; } break; } case COMMAND_ANALYZE_LEARNING: {//TODO: Analyzation is not supported in the case of bagging GlobalState.logInfo.logLine("Models:"); if (this.mlSettings.bagging) { for (int i = 0; i < this.exp.models.Count; i++) { FeatureSubsetSelection learnedModel = exp.models[i]; if (learnedModel == null) { GlobalState.logError.logLine("Error... learning was not performed!"); break; } GlobalState.logInfo.logLine("Termination reason: " + learnedModel.LearningHistory.Last().terminationReason); foreach (LearningRound lr in learnedModel.LearningHistory) { double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.evalutionSet.Configurations, out relativeError); } else { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.allMeasurements.Configurations, out relativeError); } GlobalState.logInfo.logLine(lr.ToString() + relativeError); } } } else { FeatureSubsetSelection learnedModel = exp.models[0]; if (learnedModel == null) { GlobalState.logError.logLine("Error... learning was not performed!"); break; } GlobalState.logInfo.logLine("Termination reason: " + learnedModel.LearningHistory.Last().terminationReason); foreach (LearningRound lr in learnedModel.LearningHistory) { double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.evalutionSet.Configurations, out relativeError); } else { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.allMeasurements.Configurations, out relativeError); } GlobalState.logInfo.logLine(lr.ToString() + relativeError); } } break; } case COMMAND_EXERIMENTALDESIGN: performOneCommand_ExpDesign(task); break; case COMMAND_SAMPLING_OPTIONORDER: parseOptionOrder(task); break; case COMMAND_VARIABILITYMODEL: GlobalState.varModel = VariabilityModel.loadFromXML(task); if (GlobalState.varModel == null) GlobalState.logError.logLine("No variability model found at " + task); break; case COMMAND_SET_NFP: GlobalState.currentNFP = GlobalState.getOrCreateProperty(task.Trim()); break; case COMMAND_SAMPLE_OPTIONWISE: if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.OPTIONWISE); this.exp.info.binarySamplings_Validation = "OPTIONSWISE"; } else { this.toSample.Add(SamplingStrategies.OPTIONWISE); this.exp.info.binarySamplings_Learning = "OPTIONSWISE"; } break; case COMMAND_LOG: string location = task.Trim(); GlobalState.logInfo.close(); GlobalState.logInfo = new InfoLogger(location); GlobalState.logError.close(); GlobalState.logError = new ErrorLogger(location + "_error"); break; case COMMAND_SET_MLSETTING: this.mlSettings = ML_Settings.readSettings(task); break; case COMMAND_LOAD_MLSETTINGS: this.mlSettings = ML_Settings.readSettingsFromFile(task); break; case COMMAND_SAMPLE_PAIRWISE: if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.PAIRWISE); this.exp.info.binarySamplings_Validation = "PAIRWISE"; } else { this.toSample.Add(SamplingStrategies.PAIRWISE); this.exp.info.binarySamplings_Learning = "PAIRWISE"; } break; case COMMAND_PRINT_MLSETTINGS: GlobalState.logInfo.logLine(this.mlSettings.ToString()); break; case COMMAND_PRINT_CONFIGURATIONS: { /* List<Dictionary<NumericOption, double>> numericSampling = exp.NumericSelection_Learning; List<List<BinaryOption>> binarySampling = exp.BinarySelections_Learning; List<Configuration> configurations = new List<Configuration>(); foreach (Dictionary<NumericOption, double> numeric in numericSampling) { foreach (List<BinaryOption> binary in binarySampling) { Configuration config = Configuration.getConfiguration(binary, numeric); if (!configurations.Contains(config) && GlobalState.varModel.configurationIsValid(config)) { configurations.Add(config); } } }*/ var configs = ConfigurationBuilder.buildConfigs(GlobalState.varModel, this.toSample); string[] para = task.Split(new char[] { ' ' }); // TODO very error prone.. ConfigurationPrinter printer = new ConfigurationPrinter(para[0], para[1], para[2], GlobalState.optionOrder); printer.print(configs); break; } case COMMAND_SAMPLE_BINARY_RANDOM: { string[] para = task.Split(new char[] { ' ' }); ConfigurationBuilder.binaryThreshold = Convert.ToInt32(para[0]); ConfigurationBuilder.binaryModulu = Convert.ToInt32(para[1]); VariantGenerator vg = new VariantGenerator(null); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.BINARY_RANDOM); this.exp.info.binarySamplings_Validation = "BINARY_RANDOM"; } else { this.toSample.Add(SamplingStrategies.BINARY_RANDOM); this.exp.info.binarySamplings_Learning = "BINARY_RANDOM " + task; } break; } case COMMAND_START_LEARNING: { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); List<Configuration> configurationsLearning = buildSet(this.toSample); List<Configuration> configurationsValidation = buildSet(this.toSampleValidation); if (configurationsLearning.Count == 0) { configurationsLearning = configurationsValidation; } if (configurationsLearning.Count == 0) { GlobalState.logInfo.logLine("The learning set is empty! Cannot start learning!"); break; } if (configurationsValidation.Count == 0) { configurationsValidation = configurationsLearning; } GlobalState.logInfo.logLine("Learning: " + "NumberOfConfigurationsLearning:" + configurationsLearning.Count + " NumberOfConfigurationsValidation:" + configurationsValidation.Count); //+ " UnionNumberOfConfigurations:" + (configurationsLearning.Union(configurationsValidation)).Count()); too costly to compute // We have to reuse the list of models because of NotifyCollectionChangedEventHandlers that might be attached to the list of models. exp.models.Clear(); var mod = exp.models; exp = new MachineLearning.Learning.Regression.Learning(configurationsLearning, configurationsValidation); exp.models = mod; exp.metaModel = infMod; exp.mLsettings = this.mlSettings; exp.learn(); GlobalState.logInfo.logLine("Average model: \n" + exp.metaModel.printModelAsFunction()); double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { relativeError = FeatureSubsetSelection.computeError(exp.metaModel, GlobalState.evalutionSet.Configurations, ML_Settings.LossFunction.RELATIVE); } else { relativeError = FeatureSubsetSelection.computeError(exp.metaModel, GlobalState.allMeasurements.Configurations, ML_Settings.LossFunction.RELATIVE); } GlobalState.logInfo.logLine("Error :" + relativeError); } break; case COMMAND_SAMPLE_NEGATIVE_OPTIONWISE: // TODO there are two different variants in generating NegFW configurations. if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.NEGATIVE_OPTIONWISE); this.exp.info.binarySamplings_Validation = "NEGATIVE_OPTIONWISE"; } else { this.toSample.Add(SamplingStrategies.NEGATIVE_OPTIONWISE); this.exp.info.binarySamplings_Learning = "NEGATIVE_OPTIONWISE"; } break; default: return command; } return ""; }