/// <summary> /// Run a train-test unit test /// </summary> protected void Run_TrainTest(PredictorAndArgs predictor, TestDataset dataset, string[] extraSettings = null, string extraTag = "", bool expectFailure = false, bool summary = false, bool saveAsIni = false, int digitsOfPrecision = DigitsOfPrecision) { RunContext ctx = new RunContext(this, Cmd.TrainTest, predictor, dataset, extraSettings, extraTag, expectFailure: expectFailure, summary: summary, saveAsIni: saveAsIni); Run(ctx, digitsOfPrecision); }
/// <summary> /// Run TrainTest, CV, and TrainSaveTest for a single predictor on a single dataset. /// </summary> protected void RunOneAllTests(PredictorAndArgs predictor, TestDataset dataset, string[] extraSettings = null, string extraTag = "", bool summary = false, int digitsOfPrecision = DigitsOfPrecision) { Contracts.Assert(IsActive); Run_TrainTest(predictor, dataset, extraSettings, extraTag, summary: summary, digitsOfPrecision: digitsOfPrecision); Run_CV(predictor, dataset, extraSettings, extraTag, useTest: true, digitsOfPrecision: digitsOfPrecision); }
/// <summary> /// Run a train unit test /// </summary> protected RunContext Run_Train(PredictorAndArgs predictor, TestDataset dataset, string[] extraSettings = null, string extraTag = "") { RunContext ctx = new RunContext(this, Cmd.Train, predictor, dataset, extraSettings, extraTag); Run(ctx); return(ctx); }
/// <summary> /// Run INI test for a pair of predictor and dataset. /// </summary> /// <param name="debugInformation"></param> /// <param name="predictor"></param> /// <param name="dataset"></param> /// <param name="evaluationOutputDirPrefix"></param> /// <param name="extraSettings"></param> /// <param name="extraTag"></param> public void RunIniFileEvaluationTest( List <IniModelTestInformation> successTestInformation, List <IniModelTestInformation> failureTestInformation, PredictorAndArgs predictor, TestDataset dataset, string evaluationOutputDirPrefix, string[] extraSettings = null, string extraTag = "" ) { string outName = ExpectedFilename("Train", predictor, dataset, extraTag); string[] extraTrainingSettings = JoinOptions(GetInstancesSettings(dataset), extraSettings); string trainDataset = dataset.testFilename; InternalLearnRunParameters runParameters = TrainForIniModel( predictor, trainDataset, outName, extraTrainingSettings, ModelType.ModelKind.Ini); CheckEqualityNormalized(runParameters.BaselineDir, runParameters.ModelFilename); string modelFilePath = GetOutputPath(runParameters.BaselineDir, runParameters.ModelFilename); string trainDatasetPath = GetDataPath(trainDataset); string evaluationOutputDir = GetOutputDir(evaluationOutputDirPrefix + @"\Dirs\" + outName); Assert.IsNull(EnsureEmptyDirectory(evaluationOutputDir)); string cmd = string.Format(EvaluationCommandLineFormat, modelFilePath, evaluationOutputDir, trainDatasetPath); string dir = Path.GetFullPath(EvaluationExecutorDir); Log("Working directory for evaluation: {0}", dir); Log("Evaluation command line: {0}", cmd); ProcessDebugInformation processDebugInformation = RunCommandLine(cmd, dir); if (processDebugInformation.ExitCode == 0) { KeyValuePair <Exception, List <string> > baselineCheckDebugInformation = DirectoryBaselineCheck(evaluationOutputDir); IniModelTestInformation iniModelTestInformation = new IniModelTestInformation(modelFilePath, trainDatasetPath, evaluationOutputDir, cmd, runParameters, processDebugInformation, baselineCheckDebugInformation); if (baselineCheckDebugInformation.Key == null) { successTestInformation.Add(iniModelTestInformation); } else { failureTestInformation.Add(iniModelTestInformation); } } else { IniModelTestInformation iniModelTestInformation = new IniModelTestInformation(modelFilePath, trainDatasetPath, evaluationOutputDir, cmd, runParameters, processDebugInformation, new KeyValuePair <Exception, List <string> >(null, null)); failureTestInformation.Add(iniModelTestInformation); } }
protected void Run_Test(PredictorAndArgs predictor, TestDataset dataset, string modelPath, string[] extraSettings = null, string extraTag = "") { OutputPath path = new OutputPath(modelPath); RunContext testCtx = new RunContext(this, Cmd.Test, predictor, dataset, extraSettings, extraTag, modelOverride: path); Run(testCtx); }
// REVIEW: Remove TrainSaveTest and supporting code. /// <summary> /// Run a unit test which does training, saves the model, and then tests /// after loading the model /// </summary> protected void Run_TrainSaveTest(PredictorAndArgs predictor, TestDataset dataset, string[] extraSettings = null, string extraTag = "") { // Train and save the model. RunContext trainCtx = new RunContext(this, Cmd.Train, predictor, dataset, extraSettings, extraTag); Run(trainCtx); // Load the model and test. RunContext testCtx = new RunContext(this, Cmd.Test, predictor, dataset, extraSettings, extraTag, modelOverride: trainCtx.ModelPath()); Run(testCtx); }
/// <summary> /// Run a cross-validation unit test, over the training set, unless /// <paramref name="useTest"/> is set. /// </summary> protected void Run_CV(PredictorAndArgs predictor, TestDataset dataset, string[] extraSettings = null, string extraTag = "", bool useTest = false, int digitsOfPrecision = DigitsOfPrecision) { if (useTest) { // REVIEW: It is very strange to use the *test* set in // cross validation. Should this just be deprecated outright? dataset = dataset.Clone(); dataset.trainFilename = dataset.testFilename; } RunContext cvCtx = new RunContext(this, Cmd.CV, predictor, dataset, extraSettings, extraTag); Run(cvCtx, digitsOfPrecision); }
/// <summary> /// Create a string for specifying the loader and transform. /// </summary> public string GetLoaderTransformSettings(TestDataset dataset) { List <string> settings = new List <string>(); Contracts.Check(dataset.testSettings == null, "Separate test loader pipeline is not supported"); if (!string.IsNullOrEmpty(dataset.loaderSettings)) { settings.Add(dataset.loaderSettings); } if (!string.IsNullOrEmpty(dataset.labelFilename)) { settings.Add(string.Format("xf=lookup{{col=Label data={{{0}}}}}", GetDataPath(dataset.labelFilename))); } return(settings.Count > 0 ? string.Join(" ", settings) : null); }
public TestDataset Clone() { var ret = new TestDataset { name = name, trainFilename = trainFilename, testFilename = testFilename, validFilename = validFilename, labelFilename = labelFilename, settings = settings, testSettings = testSettings, extraSettings = extraSettings, loaderSettings = loaderSettings, mamlExtraSettings = mamlExtraSettings }; return(ret); }
public RunContext(TestCommandBase test, Cmd cmd, PredictorAndArgs predictor, TestDataset dataset, string[] extraArgs = null, string extraTag = "", bool expectFailure = false, OutputPath modelOverride = null, bool summary = false, bool saveAsIni = false) : base(test, predictor.Trainer.Kind, GetNamePrefix(cmd.ToString(), predictor, dataset, extraTag), predictor.BaselineProgress) { Command = cmd; Predictor = predictor; Dataset = dataset; ExtraArgs = extraArgs; ExtraTag = extraTag; ExpectedToFail = expectFailure; Summary = summary; ModelOverride = modelOverride; SaveAsIni = saveAsIni; }
/// <summary> /// Run Train for a single predictor on a single dataset. /// </summary> protected RunContext RunOneTrain(PredictorAndArgs predictor, TestDataset dataset, string[] extraSettings = null, string extraTag = "") { Contracts.Assert(IsActive); return(Run_Train(predictor, dataset, extraSettings, extraTag)); }
private static string GetNamePrefix(string testType, PredictorAndArgs predictor, TestDataset dataset, string extraTag = "") { // REVIEW: Once we finish the TL->MAML conversion effort, please make the output/baseline // names take some form that someone could actually tell what test generated that file. string datasetSuffix = dataset.name; if (!string.IsNullOrEmpty(extraTag)) { if (char.IsLetterOrDigit(extraTag[0])) { datasetSuffix += "." + extraTag; } else { datasetSuffix += extraTag; } } string filePrefix = (string.IsNullOrEmpty(predictor.Tag) ? predictor.Trainer.Kind : predictor.Tag); return(filePrefix + "-" + testType + "-" + datasetSuffix); }