/// <summary> /// Computes the influence of all configuration options based on the measurements of the given result db. It uses linear programming (simplex) and is an exact algorithm. /// </summary> /// <param name="nfp">The non-funcitonal property for which the influences of configuration options are to be computed. If null, we use the property of the global model.</param> /// <param name="infModel">The influence model containing options and interactions. The state of the model will be changed by the result of the process</param> /// <param name="db">The result database containing the measurements.</param> /// <returns>A map of binary options to their computed influences.</returns> public Dictionary<BinaryOption, double> computeOptionInfluences(NFProperty nfp, InfluenceModel infModel, ResultDB db) { List<BinaryOption> variables = infModel.Vm.BinaryOptions; List<double> results = new List<double>(); List<List<BinaryOption>> configurations = new List<List<BinaryOption>>(); foreach (Configuration c in db.Configurations) { configurations.Add(c.getBinaryOptions(BinaryOption.BinaryValue.Selected)); if (nfp != null) results.Add(c.GetNFPValue(nfp)); else results.Add(c.GetNFPValue()); } List<String> errorEqs = new List<string>(); Dictionary<String, double> faultRates = new Dictionary<string, double>(); List<int> indexOfErrorMeasurements = new List<int>(); Dictionary<String, double> featureValuedAsStrings = solve(variables, results, configurations, infModel.InteractionInfluence.Keys.ToList()); foreach (String current in featureValuedAsStrings.Keys) { BinaryOption temp = infModel.Vm.getBinaryOption(current); this.featureValues[temp] = featureValuedAsStrings[current]; InfluenceFunction influence = new InfluenceFunction(temp.Name + " + " + featureValuedAsStrings[current].ToString(),infModel.Vm); if (infModel.BinaryOptionsInfluence.Keys.Contains(temp)) infModel.BinaryOptionsInfluence[temp] = influence; else infModel.BinaryOptionsInfluence.Add(temp, influence); } return this.featureValues; }
public void clear() { this.nbBaggings = 0; this.mLsettings = new ML_Settings(); this.metaModel = null; this.models.Clear(); clearSampling(); }
/// <summary> /// Clears the global state. This mehtod should be used after performing all experiments of one case study. /// </summary> public static void clear() { varModel = null; currentNFP = null; allMeasurements = new ResultDB(); evalutionSet = new ResultDB(); infModel = null; nfProperties = new Dictionary<string,NFProperty>(); optionOrder = new List<ConfigurationOption>(); }
/// <summary> /// This method searches for a corresponding methods in the dynamically loadeda assemblies and calls it if found. It prefers due to performance reasons the Microsoft Solver Foundation implementation. /// </summary> /// <param name="nfp">The non-funcitonal property for which the influences of configuration options are to be computed. If null, we use the property of the global model.</param> /// <param name="infModel">The influence model containing options and interactions. The state of the model will be changed by the result of the process</param> /// <param name="db">The result database containing the measurements.</param> /// <returns>A map of binary options to their computed influences.</returns> public Dictionary<BinaryOption, double> computeOptionInfluences(NFProperty nfp, InfluenceModel infModel, ResultDB db) { foreach (Lazy<ISolverLP, ISolverType> solver in solvers) { if (solver.Metadata.SolverType.Equals("MSSolverFoundation")) return solver.Value.computeOptionInfluences(nfp, infModel, db); } //If not MS Solver, take any solver. Should be changed when supporting more than 2 solvers here foreach (Lazy<ISolverLP, ISolverType> solver in solvers) { return solver.Value.computeOptionInfluences(nfp, infModel, db); } return null; }
/// <summary> /// Performs the functionality of one command. If no functionality is found for the command, the command is retuned by this method. /// </summary> /// <param name="line">One command with its parameters.</param> /// <returns>Returns an empty string if the command could be performed by the method. If the command could not be performed by the method, the original command is returned.</returns> public string performOneCommand(string line) { GlobalState.logInfo.logLine(COMMAND + line); // remove comment part of the line (the comment starts with an #) line = line.Split(new Char[] { '#' }, 2)[0]; if (line.Length == 0) return ""; // split line in command and parameters of the command string[] components = line.Split(new Char[] { ' ' }, 2); string command = components[0]; string task = ""; if (components.Length > 1) task = components[1]; string[] taskAsParameter = task.Split(new Char[] { ' ' }); switch (command.ToLower()) { case COMMAND_START_ALLMEASUREMENTS: { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); List<Configuration> configurations_Learning = new List<Configuration>(); foreach (Configuration config in GlobalState.allMeasurements.Configurations) { if (config.nfpValues.ContainsKey(GlobalState.currentNFP)) configurations_Learning.Add(config); } if (configurations_Learning.Count == 0) { GlobalState.logInfo.logLine("The learning set is empty! Cannot start learning!"); break; } GlobalState.logInfo.logLine("Learning: " + "NumberOfConfigurationsLearning:" + configurations_Learning.Count); // prepare the machine learning exp = new MachineLearning.Learning.Regression.Learning(configurations_Learning, configurations_Learning); exp.metaModel = infMod; exp.mLsettings = this.mlSettings; exp.learn(); } break; case COMMAND_TRUEMODEL: StreamReader readModel = new StreamReader(task); String model = readModel.ReadLine().Trim(); readModel.Close(); this.trueModel = new InfluenceFunction(model.Replace(',', '.'), GlobalState.varModel); NFProperty artificalProp = new NFProperty("artificial"); GlobalState.currentNFP = artificalProp; //computeEvaluationDataSetBasedOnTrueModel(); break; case COMMAND_SUBSCRIPT: { FileInfo fi = new FileInfo(task); StreamReader reader = null; if (!fi.Exists) throw new FileNotFoundException(@"Automation script not found. ", fi.ToString()); reader = fi.OpenText(); Commands co = new Commands(); co.exp = this.exp; while (!reader.EndOfStream) { String oneLine = reader.ReadLine().Trim(); co.performOneCommand(oneLine); } } break; case COMMAND_EVALUATION_SET: { GlobalState.evalutionSet.Configurations = ConfigurationReader.readConfigurations(task, GlobalState.varModel); GlobalState.logInfo.logLine("Evaluation set loaded."); } break; case COMMAND_CLEAR_GLOBAL: SPLConqueror_Core.GlobalState.clear(); toSample.Clear(); toSampleValidation.Clear(); break; case COMMAND_CLEAR_SAMPLING: exp.clearSampling(); toSample.Clear(); toSampleValidation.Clear(); break; case COMMAND_CLEAR_LEARNING: exp.clear(); toSample.Clear(); toSampleValidation.Clear(); break; case COMMAND_LOAD_CONFIGURATIONS: GlobalState.allMeasurements.Configurations = (GlobalState.allMeasurements.Configurations.Union(ConfigurationReader.readConfigurations(task, GlobalState.varModel))).ToList(); GlobalState.logInfo.logLine(GlobalState.allMeasurements.Configurations.Count + " configurations loaded."); break; case COMMAND_SAMPLE_ALLBINARY: { if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.ALLBINARY); this.exp.info.binarySamplings_Validation = "ALLBINARY"; } else { this.toSample.Add(SamplingStrategies.ALLBINARY); this.exp.info.binarySamplings_Learning = "ALLBINARY"; } break; } case COMMAND_ANALYZE_LEARNING: {//TODO: Analyzation is not supported in the case of bagging GlobalState.logInfo.logLine("Models:"); if (this.mlSettings.bagging) { for (int i = 0; i < this.exp.models.Count; i++) { FeatureSubsetSelection learnedModel = exp.models[i]; if (learnedModel == null) { GlobalState.logError.logLine("Error... learning was not performed!"); break; } GlobalState.logInfo.logLine("Termination reason: " + learnedModel.LearningHistory.Last().terminationReason); foreach (LearningRound lr in learnedModel.LearningHistory) { double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.evalutionSet.Configurations, out relativeError); } else { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.allMeasurements.Configurations, out relativeError); } GlobalState.logInfo.logLine(lr.ToString() + relativeError); } } } else { FeatureSubsetSelection learnedModel = exp.models[0]; if (learnedModel == null) { GlobalState.logError.logLine("Error... learning was not performed!"); break; } GlobalState.logInfo.logLine("Termination reason: " + learnedModel.LearningHistory.Last().terminationReason); foreach (LearningRound lr in learnedModel.LearningHistory) { double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.evalutionSet.Configurations, out relativeError); } else { double relativeErro2r = learnedModel.computeError(lr.FeatureSet, GlobalState.allMeasurements.Configurations, out relativeError); } GlobalState.logInfo.logLine(lr.ToString() + relativeError); } } break; } case COMMAND_EXERIMENTALDESIGN: performOneCommand_ExpDesign(task); break; case COMMAND_SAMPLING_OPTIONORDER: parseOptionOrder(task); break; case COMMAND_VARIABILITYMODEL: GlobalState.varModel = VariabilityModel.loadFromXML(task); if (GlobalState.varModel == null) GlobalState.logError.logLine("No variability model found at " + task); break; case COMMAND_SET_NFP: GlobalState.currentNFP = GlobalState.getOrCreateProperty(task.Trim()); break; case COMMAND_SAMPLE_OPTIONWISE: if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.OPTIONWISE); this.exp.info.binarySamplings_Validation = "OPTIONSWISE"; } else { this.toSample.Add(SamplingStrategies.OPTIONWISE); this.exp.info.binarySamplings_Learning = "OPTIONSWISE"; } break; case COMMAND_LOG: string location = task.Trim(); GlobalState.logInfo.close(); GlobalState.logInfo = new InfoLogger(location); GlobalState.logError.close(); GlobalState.logError = new ErrorLogger(location + "_error"); break; case COMMAND_SET_MLSETTING: this.mlSettings = ML_Settings.readSettings(task); break; case COMMAND_LOAD_MLSETTINGS: this.mlSettings = ML_Settings.readSettingsFromFile(task); break; case COMMAND_SAMPLE_PAIRWISE: if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.PAIRWISE); this.exp.info.binarySamplings_Validation = "PAIRWISE"; } else { this.toSample.Add(SamplingStrategies.PAIRWISE); this.exp.info.binarySamplings_Learning = "PAIRWISE"; } break; case COMMAND_PRINT_MLSETTINGS: GlobalState.logInfo.logLine(this.mlSettings.ToString()); break; case COMMAND_PRINT_CONFIGURATIONS: { /* List<Dictionary<NumericOption, double>> numericSampling = exp.NumericSelection_Learning; List<List<BinaryOption>> binarySampling = exp.BinarySelections_Learning; List<Configuration> configurations = new List<Configuration>(); foreach (Dictionary<NumericOption, double> numeric in numericSampling) { foreach (List<BinaryOption> binary in binarySampling) { Configuration config = Configuration.getConfiguration(binary, numeric); if (!configurations.Contains(config) && GlobalState.varModel.configurationIsValid(config)) { configurations.Add(config); } } }*/ var configs = ConfigurationBuilder.buildConfigs(GlobalState.varModel, this.toSample); string[] para = task.Split(new char[] { ' ' }); // TODO very error prone.. ConfigurationPrinter printer = new ConfigurationPrinter(para[0], para[1], para[2], GlobalState.optionOrder); printer.print(configs); break; } case COMMAND_SAMPLE_BINARY_RANDOM: { string[] para = task.Split(new char[] { ' ' }); ConfigurationBuilder.binaryThreshold = Convert.ToInt32(para[0]); ConfigurationBuilder.binaryModulu = Convert.ToInt32(para[1]); VariantGenerator vg = new VariantGenerator(null); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.BINARY_RANDOM); this.exp.info.binarySamplings_Validation = "BINARY_RANDOM"; } else { this.toSample.Add(SamplingStrategies.BINARY_RANDOM); this.exp.info.binarySamplings_Learning = "BINARY_RANDOM " + task; } break; } case COMMAND_START_LEARNING: { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); List<Configuration> configurationsLearning = buildSet(this.toSample); List<Configuration> configurationsValidation = buildSet(this.toSampleValidation); if (configurationsLearning.Count == 0) { configurationsLearning = configurationsValidation; } if (configurationsLearning.Count == 0) { GlobalState.logInfo.logLine("The learning set is empty! Cannot start learning!"); break; } if (configurationsValidation.Count == 0) { configurationsValidation = configurationsLearning; } GlobalState.logInfo.logLine("Learning: " + "NumberOfConfigurationsLearning:" + configurationsLearning.Count + " NumberOfConfigurationsValidation:" + configurationsValidation.Count); //+ " UnionNumberOfConfigurations:" + (configurationsLearning.Union(configurationsValidation)).Count()); too costly to compute // We have to reuse the list of models because of NotifyCollectionChangedEventHandlers that might be attached to the list of models. exp.models.Clear(); var mod = exp.models; exp = new MachineLearning.Learning.Regression.Learning(configurationsLearning, configurationsValidation); exp.models = mod; exp.metaModel = infMod; exp.mLsettings = this.mlSettings; exp.learn(); GlobalState.logInfo.logLine("Average model: \n" + exp.metaModel.printModelAsFunction()); double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { relativeError = FeatureSubsetSelection.computeError(exp.metaModel, GlobalState.evalutionSet.Configurations, ML_Settings.LossFunction.RELATIVE); } else { relativeError = FeatureSubsetSelection.computeError(exp.metaModel, GlobalState.allMeasurements.Configurations, ML_Settings.LossFunction.RELATIVE); } GlobalState.logInfo.logLine("Error :" + relativeError); } break; case COMMAND_SAMPLE_NEGATIVE_OPTIONWISE: // TODO there are two different variants in generating NegFW configurations. if (taskAsParameter.Contains(COMMAND_VALIDATION)) { this.toSampleValidation.Add(SamplingStrategies.NEGATIVE_OPTIONWISE); this.exp.info.binarySamplings_Validation = "NEGATIVE_OPTIONWISE"; } else { this.toSample.Add(SamplingStrategies.NEGATIVE_OPTIONWISE); this.exp.info.binarySamplings_Learning = "NEGATIVE_OPTIONWISE"; } break; default: return command; } return ""; }
/// <summary> /// This method searches for a corresponding methods in the dynamically loadeda assemblies and calls it if found. It prefers due to performance reasons the Microsoft Solver Foundation implementation. /// </summary> /// <param name="nfp">The non-funcitonal property for which the influences of configuration options are to be computed. If null, we use the property of the global model.</param> /// <param name="infModel">The influence model containing the variability model, all configuration options and interactions.</param> /// <param name="db">The result database containing the measurements.</param> /// <param name="evaluateFeatureInteractionsOnly">Only interactions are learned.</param> /// <param name="withDeviation">(Not used) We can specifiy whether learned influences must be greater than a certain value (e.g., greater than measurement bias).</param> /// <param name="deviation">(Not used) We can specifiy whether learned influences must be greater than a certain value (e.g., greater than measurement bias).</param> /// <returns>Returns the learned infleunces of each option in a map whereas the String (Key) is the name of the option / interaction.</returns> public Dictionary<string, double> computeOptionInfluences(NFProperty nfp, InfluenceModel infModel, ResultDB db, bool evaluateFeatureInteractionsOnly, bool withDeviation, double deviation) { foreach (Lazy<ISolverLP, ISolverType> solver in solvers) { if (solver.Metadata.SolverType.Equals("MSSolverFoundation")) return solver.Value.computeOptionInfluences(nfp, infModel, db, evaluateFeatureInteractionsOnly, withDeviation, deviation); } //If not MS Solver, take any solver. Should be changed when supporting more than 2 solvers here foreach (Lazy<ISolverLP, ISolverType> solver in solvers) { return solver.Value.computeOptionInfluences(nfp, infModel, db, evaluateFeatureInteractionsOnly, withDeviation, deviation); } return null; }
public void learn() { if (!hasNecessaryData()) return; if (this.mLsettings.bagging) { //Get number of cores int coreCount = 0; foreach (var item in new System.Management.ManagementObjectSearcher("Select NumberOfCores from Win32_Processor").Get()) { coreCount += int.Parse(item["NumberOfCores"].ToString()); } createThreadPool(coreCount); this.nbBaggings = this.mLsettings.baggingNumbers; iCount = this.nbBaggings; Random rand = new Random(); int nbOfConfigs = (testSet.Count * this.mLsettings.baggingTestDataFraction) / 100; for (int i = 0; i < nbBaggings; i++) { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); FeatureSubsetSelection sel = new FeatureSubsetSelection(infMod, this.mLsettings); this.models.Add(sel); List<int> selection = new List<int>(); for (int r = 0; r <= nbOfConfigs; r++) { selection.Add(rand.Next(nbOfConfigs)); } List<Configuration> newTestSet = new List<Configuration>(); List<Configuration> newValidationSet = new List<Configuration>(); for (int r = 0; r <= selection.Count; r++) { if (selection.Contains(r)) newTestSet.Add(testSet[r]); else newValidationSet.Add(testSet[r]); } sel.setLearningSet(newTestSet); sel.setValidationSet(newValidationSet); Task task = EnqueueTask(() => sel.learn()); } eventX.WaitOne(Timeout.Infinite, true); averageModels(); } else { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); FeatureSubsetSelection sel = new FeatureSubsetSelection(infMod, this.mLsettings); this.models.Add(sel); sel.setLearningSet(testSet); sel.setValidationSet(this.validationSet); Stopwatch sw = new Stopwatch(); sw.Start(); sel.learn(); sw.Stop(); Console.WriteLine("Elapsed={0}", sw.Elapsed); } }
private void updateInfluenceModel(InfluenceModel influenceModel) { foreach (BinaryOption bin in influenceModel.BinaryOptionsInfluence.Keys) { if (this.metaModel.BinaryOptionsInfluence.Keys.Contains(bin)) { ((Feature)this.metaModel.BinaryOptionsInfluence[bin]).Constant += ((Feature)influenceModel.BinaryOptionsInfluence[bin]).Constant; } else { this.metaModel.BinaryOptionsInfluence.Add(bin, ((Feature)influenceModel.BinaryOptionsInfluence[bin])); } } foreach (NumericOption num in influenceModel.NumericOptionsInfluence.Keys) { if (this.metaModel.NumericOptionsInfluence.Keys.Contains(num)) { ((Feature)this.metaModel.NumericOptionsInfluence[num]).Constant += ((Feature)influenceModel.NumericOptionsInfluence[num]).Constant; } else { this.metaModel.NumericOptionsInfluence.Add(num, ((Feature)influenceModel.NumericOptionsInfluence[num])); } } foreach (Interaction interact in influenceModel.InteractionInfluence.Keys) { if (this.metaModel.InteractionInfluence.Keys.Contains(interact)) { ((Feature)this.metaModel.InteractionInfluence[interact]).Constant += ((Feature)influenceModel.InteractionInfluence[interact]).Constant; } else { this.metaModel.InteractionInfluence.Add(interact, ((Feature)influenceModel.InteractionInfluence[interact])); } } }
/// <summary> /// Computes the influence of all configuration options and interactions based on the measurements of the given result db. It uses linear programming (simplex) and is an exact algorithm. /// </summary> /// <param name="nfp">The non-funcitonal property for which the influences of configuration options are to be computed. If null, we use the property of the global model.</param> /// <param name="infModel">The influence model containing the variability model, all configuration options and interactions.</param> /// <param name="db">The result database containing the measurements.</param> /// <param name="evaluateFeatureInteractionsOnly">Only interactions are learned.</param> /// <param name="withDeviation">(Not used) We can specifiy whether learned influences must be greater than a certain value (e.g., greater than measurement bias).</param> /// <param name="deviation">(Not used) We can specifiy whether learned influences must be greater than a certain value (e.g., greater than measurement bias).</param> /// <returns>Returns the learned infleunces of each option in a map whereas the String (Key) is the name of the option / interaction.</returns> public Dictionary<String, double> computeOptionInfluences(NFProperty nfp, InfluenceModel infModel, ResultDB db, bool evaluateFeatureInteractionsOnly, bool withDeviation, double deviation) { //Initialization List<List<BinaryOption>> configurations = new List<List<BinaryOption>>(); this.evaluateInteractionsOnly = evaluateFeatureInteractionsOnly; this.withStandardDeviation = withDeviation; this.standardDeviation = deviation; List<double> results = new List<double>(); foreach (Configuration c in db.Configurations) { configurations.Add(c.getBinaryOptions(BinaryOption.BinaryValue.Selected)); if (nfp != null) results.Add(c.GetNFPValue(nfp)); else results.Add(c.GetNFPValue()); } List<BinaryOption> variables = new List<BinaryOption>(); Dictionary<String, double> featureValues = new Dictionary<string, double>(); Dictionary<String, double> faultRates = new Dictionary<string, double>(); List<int> indexOfErrorMeasurements = new List<int>(); if (configurations.Count == 0) return null; //For the case there is an empty base if (configurations.Count != 0) { if (configurations[0].Count == 0) {//Should never occur that we get a configuration with no option selected... at least the root must be there BinaryOption root = infModel.Vm.Root; //Element baseElement = new Element("base_gen", infModel.getID(), infModel); //variables.Add(baseElement); // featureValues.Add(baseElement.getName(), 0); foreach (List<BinaryOption> config in configurations) if(!config.Contains(root)) config.Insert(0, root); } } //Building the variable list foreach (var elem in infModel.Vm.BinaryOptions) { variables.Add(elem); featureValues.Add(elem.Name, 0); } //First run featureValues = solve(variables, results, configurations, null); //if (evaluateFeatureInteractionsOnly == false) return featureValues; /* //We might have some interactions here and cannot compute all values //1. identify options that are only present in these equations Dictionary<Element, int> featureCounter = new Dictionary<Element, int>(); for (int i = 0; i < indexOfErrorMeasurements.Count; i++) { } */ /*Todo: get compute interactins from deviations / errors of the LP results if (errorEqs != null) { foreach (string eq in errorEqs) { double value = Double.Parse(eq.Substring(eq.IndexOf("==") + 2)); StringBuilder sb = new StringBuilder(); List<Element> derivativeParents = new List<Element>(); sb.Append("derivate_"); string[] splittedEQ = eq.Split('+'); foreach (string element in splittedEQ) { string name = element; if (name.Contains("==")) name = name.Substring(0, name.IndexOf("==")); if (name.Contains("yp") && name.Contains("-yn")) continue; // string featureName = name.Substring(0, name.IndexOf("_p-")); Element elem = infModel.getElementByNameUnsafe(name); if (elem == null) continue; sb.Append("_" + name); derivativeParents.Add(elem); } Element interaction = new Element(sb.ToString(), infModel.getID(), infModel); interaction.setType("derivative"); interaction.addDerivativeParents(derivativeParents); infModel.addElement(interaction); this.featureValues.Add(interaction, value); } } return featureValues;*/ }
/// <summary> /// Performs the functionality of one command. If no functionality is found for the command, the command is retuned by this method. /// </summary> /// <param name="line">One command with its parameters.</param> /// <returns>Returns an empty string if the command could be performed by the method. If the command could not be performed by the method, the original command is returned.</returns> public string performOneCommand(string line) { GlobalState.logInfo.log(COMMAND + line); // remove comment part of the line (the comment starts with an #) line = line.Split(new Char[] { '#' }, 2)[0]; if (line.Length == 0) return ""; // split line in command and parameters of the command string[] components = line.Split(new Char[] { ' ' }, 2); string command = components[0]; string task = ""; if (components.Length > 1) task = components[1]; string[] taskAsParameter = task.Split(new Char[] { ' ' }); switch (command.ToLower()) { case COMMAND_TRUEMODEL: StreamReader readModel = new StreamReader(task); String model = readModel.ReadLine().Trim(); readModel.Close(); exp.TrueModel = new InfluenceFunction(model.Replace(',','.'), GlobalState.varModel); NFProperty artificalProp = new NFProperty("artificial"); GlobalState.currentNFP = artificalProp; computeEvaluationDataSetBasedOnTrueModel(); break; case COMMAND_SUBSCRIPT: { FileInfo fi = new FileInfo(task); StreamReader reader = null; if (!fi.Exists) throw new FileNotFoundException(@"Automation script not found. ", fi.ToString()); reader = fi.OpenText(); Commands co = new Commands(); co.exp = this.exp; while (!reader.EndOfStream) { String oneLine = reader.ReadLine().Trim(); co.performOneCommand(oneLine); } } break; case COMMAND_EVALUATION_SET: { GlobalState.evalutionSet.Configurations = ConfigurationReader.readConfigurations(task, GlobalState.varModel); GlobalState.logInfo.log("Evaluation set loaded."); } break; case COMMAND_CLEAR_GLOBAL: SPLConqueror_Core.GlobalState.clear(); break; case COMMAND_CLEAR_SAMPLING: exp.clearSampling(); break; case COMMAND_CLEAR_LEARNING: exp.clear(); break; case COMMAND_LOAD_CONFIGURATIONS: GlobalState.allMeasurements.Configurations = (GlobalState.allMeasurements.Configurations.Union(ConfigurationReader.readConfigurations(task, GlobalState.varModel))).ToList(); GlobalState.logInfo.log(GlobalState.allMeasurements.Configurations.Count + " configurations loaded."); break; case COMMAND_SAMPLE_ALLBINARY: { VariantGenerator vg = new VariantGenerator(null); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { exp.addBinarySelection_Validation(vg.generateAllVariantsFast(GlobalState.varModel)); exp.addBinarySampling_Validation(COMMAND_SAMPLE_ALLBINARY); } else { exp.addBinarySelection_Learning(vg.generateAllVariantsFast(GlobalState.varModel)); exp.addBinarySampling_Learning(COMMAND_SAMPLE_ALLBINARY); } break; } case COMMAND_ANALYZE_LEARNING: { GlobalState.logInfo.log("Models:"); FeatureSubsetSelection learning = exp.learning; if (learning == null) { GlobalState.logError.log("Error... learning was not performed!"); break; } foreach (LearningRound lr in learning.LearningHistory) { double relativeError = 0; if (GlobalState.evalutionSet.Configurations.Count > 0) { double relativeErro2r = exp.learning.computeError(lr.FeatureSet, GlobalState.evalutionSet.Configurations, out relativeError); } else { double relativeErro2r = exp.learning.computeError(lr.FeatureSet, GlobalState.allMeasurements.Configurations, out relativeError); } GlobalState.logInfo.log(lr.ToString() + relativeError); } break; } case COMMAND_EXERIMENTALDESIGN: performOneCommand_ExpDesign(task); break; case COMMAND_SAMPLING_OPTIONORDER: parseOptionOrder(task); break; case COMMAND_VARIABILITYMODEL: GlobalState.varModel = VariabilityModel.loadFromXML(task); if (GlobalState.varModel == null) GlobalState.logError.log("No variability model found at " + task); break; case COMMAND_SET_NFP: GlobalState.currentNFP = GlobalState.getOrCreateProperty(task.Trim()); break; case COMMAND_SAMPLE_OPTIONWISE: FeatureWise fw = new FeatureWise(); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { exp.addBinarySelection_Validation(fw.generateFeatureWiseConfigsCSP(GlobalState.varModel)); exp.addBinarySampling_Validation("FW"); } else { //exp.addBinarySelection_Learning(fw.generateFeatureWiseConfigsCSP(GlobalState.varModel)); exp.addBinarySelection_Learning(fw.generateFeatureWiseConfigurations(GlobalState.varModel)); exp.addBinarySampling_Learning("FW"); } break; case COMMAND_LOG: string location = task.Trim(); GlobalState.logInfo.close(); GlobalState.logInfo = new InfoLogger(location); GlobalState.logError.close(); GlobalState.logError = new ErrorLogger(location + "_error"); break; case COMMAND_SET_MLSETTING: exp.mlSettings = ML_Settings.readSettings(task); break; case COMMAND_LOAD_MLSETTINGS: exp.mlSettings = ML_Settings.readSettingsFromFile(task); break; case COMMAND_SAMPLE_PAIRWISE: PairWise pw = new PairWise(); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { exp.addBinarySelection_Validation(pw.generatePairWiseVariants(GlobalState.varModel)); exp.addBinarySampling_Validation("PW"); } else { exp.addBinarySelection_Learning(pw.generatePairWiseVariants(GlobalState.varModel)); exp.addBinarySampling_Learning("PW"); } break; case COMMAND_PRINT_MLSETTINGS: GlobalState.logInfo.log(exp.mlSettings.ToString()); break; case COMMAND_PRINT_CONFIGURATIONS: { List<Dictionary<NumericOption, double>> numericSampling = exp.NumericSelection_Learning; List<List<BinaryOption>> binarySampling = exp.BinarySelections_Learning; List<Configuration> configurations = new List<Configuration>(); foreach (Dictionary<NumericOption, double> numeric in numericSampling) { foreach (List<BinaryOption> binary in binarySampling) { Configuration config = Configuration.getConfiguration(binary, numeric); if (!configurations.Contains(config) && GlobalState.varModel.configurationIsValid(config)) { configurations.Add(config); } } } string[] para = task.Split(new char[] { ' ' }); // TODO very error prune.. ConfigurationPrinter printer = new ConfigurationPrinter(para[0], para[1], para[2], GlobalState.optionOrder); printer.print(configurations); break; } case COMMAND_SAMPLE_BINARY_RANDOM: { string[] para = task.Split(new char[] { ' ' }); int treshold = Convert.ToInt32(para[0]); int modulu = Convert.ToInt32(para[1]); VariantGenerator vg = new VariantGenerator(null); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { exp.addBinarySelection_Validation(vg.generateRandomVariants(GlobalState.varModel, treshold, modulu)); exp.addBinarySampling_Validation("random " + task); } else { exp.addBinarySelection_Learning(vg.generateRandomVariants(GlobalState.varModel, treshold, modulu)); exp.addBinarySampling_Learning("random " + task); } break; } case COMMAND_START_LEARNING: { InfluenceModel infMod = new InfluenceModel(GlobalState.varModel, GlobalState.currentNFP); List<Configuration> configurations_Learning = new List<Configuration>(); List<Configuration> configurations_Validation = new List<Configuration>(); if (exp.TrueModel == null) { //List<List<BinaryOption>> availableBinary //configurations_Learning = GlobalState.getMeasuredConfigs(exp.BinarySelections_Learning, exp.NumericSelection_Learning); configurations_Learning = GlobalState.getMeasuredConfigs(Configuration.getConfigurations(exp.BinarySelections_Learning, exp.NumericSelection_Learning)); configurations_Learning = configurations_Learning.Distinct().ToList(); configurations_Validation = GlobalState.getMeasuredConfigs(Configuration.getConfigurations(exp.BinarySelections_Validation, exp.NumericSelection_Validation)); configurations_Validation = configurations_Validation.Distinct().ToList(); //break;//todo only to get the configurations that we haven't measured } else { foreach (List<BinaryOption> binConfig in exp.BinarySelections_Learning) { if (exp.NumericSelection_Learning.Count == 0) { Configuration c = new Configuration(binConfig); c.setMeasuredValue(GlobalState.currentNFP, exp.TrueModel.eval(c)); if (!configurations_Learning.Contains(c)) configurations_Learning.Add(c); continue; } foreach (Dictionary<NumericOption, double> numConf in exp.NumericSelection_Learning) { Configuration c = new Configuration(binConfig, numConf); c.setMeasuredValue(GlobalState.currentNFP, exp.TrueModel.eval(c)); if(GlobalState.varModel.configurationIsValid(c)) // if (!configurations_Learning.Contains(c)) configurations_Learning.Add(c); } } } if (configurations_Learning.Count == 0) { configurations_Learning = configurations_Validation; } if (configurations_Learning.Count == 0) { GlobalState.logInfo.log("The learning set is empty! Cannot start learning!"); break; } if (configurations_Validation.Count == 0) { configurations_Validation = configurations_Learning; } //break; GlobalState.logInfo.log("Learning: " + "NumberOfConfigurationsLearning:" + configurations_Learning.Count + " NumberOfConfigurationsValidation:" + configurations_Validation.Count + " UnionNumberOfConfigurations:" + (configurations_Learning.Union(configurations_Validation)).Count()); // prepare the machine learning exp.learning.init(infMod, exp.mlSettings); exp.learning.setLearningSet(configurations_Learning); exp.learning.setValidationSet(configurations_Validation); exp.learning.learn(); } break; case COMMAND_SAMPLE_NEGATIVE_OPTIONWISE: // TODO there are two different variants in generating NegFW configurations. NegFeatureWise neg = new NegFeatureWise(); if (taskAsParameter.Contains(COMMAND_VALIDATION)) { exp.addBinarySelection_Validation(neg.generateNegativeFW(GlobalState.varModel)); exp.addBinarySampling_Validation("newFW"); } else { exp.addBinarySelection_Learning(neg.generateNegativeFW(GlobalState.varModel));//neg.generateNegativeFWAllCombinations(GlobalState.varModel)); exp.addBinarySampling_Learning("newFW"); } break; default: return command; } return ""; }