//multi-task learning public static void train_multi_mtl(List <dataSet> XXList, toolbox tb) { Global.reinitGlobal(); for (double iter = 0; iter < Global.ttlIter; iter++) { Global.glbIter++; Stopwatch timer = new Stopwatch(); timer.Start(); double error = tb.train_multi(); timer.Stop(); double time = timer.ElapsedMilliseconds / 1000.0; Global.swLog.WriteLine("Training used time (second): " + time.ToString()); //evaluate List <double> scoreList = tb.test_multi_mtl(XXList, iter, Global.swOutputList); for (int i = 0; i < Global.nTask; i++) { Global.scoreTaskList_multi[i].Add(scoreList[i]); } Global.timeList_multi[Global.glbIter - 1] += time; Global.errorList_multi[Global.glbIter - 1] += error; if (iter > 30 && Global.diff > 0 && Global.diff < Global.convergeTol) { break; } } //save model if (Global.save == 1) { for (int i = 0; i < Global.nTask; i++) { tb.ModelList[i].save(Global.modelDir + i.ToString() + Global.fModel); } } }
static void multiTask() { //train if (Global.runMode.Contains("train")) { //load data List <dataSet> XList = new List <dataSet>(); List <dataSet> XXList = new List <dataSet>(); dataSet X = new dataSet(); loadData_multi(XList, X, XXList); toolbox toolbox; //single-task training in multi-task framework: each task has its own independent train & test data if (Global.mt_singleTrain) { foreach (double r in Global.regList)//experiments for each different regularizer value { Global.swResRaw.WriteLine("\n%single-task! r: {0}", r); Console.WriteLine("\nsingle-task! r: {0}", r); for (int i = 0; i < Global.nTask; i++) { Global.swLog.WriteLine("\nsingle-task! #task, r: " + (i + 1).ToString() + "," + r.ToString()); Console.WriteLine("\nsingle-task! #task, r: " + (i + 1).ToString() + "," + r.ToString()); Global.reg = r; dataSet Xi = XList[i]; toolbox = new toolbox(Xi); train_multi_single(XXList, toolbox, i); } resProcess.write_multi(); } Global.swResRaw.WriteLine(); } //merged training in multi-task framework: merge all training data to train a unified model if (Global.mt_mergeTrain) { foreach (double r in Global.regList)//experiments for each different regularizer value { Global.reg = r; Global.swLog.WriteLine("\nmerged-task! r: " + r.ToString()); Console.WriteLine("\nmerged-task! r: " + r.ToString()); Global.swResRaw.WriteLine("\n%merged-task! r: " + r.ToString()); toolbox = new toolbox(X); train_multi_merge(XXList, toolbox); resProcess.write_multi(); } Global.swResRaw.WriteLine(); } //multi-task learning if (Global.mt_mtTrain) { foreach (double r in Global.regList)//experiments for each different regularizer value { Global.reg = r; foreach (double cFactor in Global.cFactors)//experiments for each different C value (see Eq. 18 & 19 of [Sun+ TKDE 2013] for the definition of C) { Global.C = cFactor; Global.swLog.WriteLine("\n%multi-task! reg, rate0, C, kernel: {0},{1},{2},{3}", Global.reg, Global.rate0, Global.C, Global.simiMode); Global.swSimi.WriteLine("\n%multi-task! reg, rate0, C, kernel: {0},{1},{2},{3}", Global.reg, Global.rate0, Global.C, Global.simiMode); Global.swResRaw.WriteLine("\n%multi-task! reg, rate0, C, kernel: {0},{1},{2},{3}", Global.reg, Global.rate0, Global.C, Global.simiMode); Console.WriteLine("\nmulti-task! reg, rate0, C, kernel: {0},{1},{2},{3}", Global.reg, Global.rate0, Global.C, Global.simiMode); toolbox = new toolbox(X, XList); train_multi_mtl(XXList, toolbox); resProcess.write_multi(); } } Global.swResRaw.WriteLine(); } } else if (Global.runMode.Contains("test1"))//normal test { //load data List <dataSet> XList = new List <dataSet>(); List <dataSet> XXList = new List <dataSet>(); dataSet X = new dataSet(); loadData_multi(XList, X, XXList); //load model etc. toolbox tb = new toolbox(X, XList, false); if (Global.mt_mergeTrain)//multi_merge { List <double> scoreList = tb.test_multi_merge(XXList, 0, Global.swOutputList); for (int i = 0; i < Global.nTask; i++) { Global.scoreTaskList_multi[i].Add(scoreList[i]); } resProcess.write_multi(); } else//multi_single or multi_mtl: they have the same testing schema { List <double> scoreList = tb.test_multi_mtl(XXList, 0, Global.swOutputList); for (int i = 0; i < Global.nTask; i++) { Global.scoreTaskList_multi[i].Add(scoreList[i]); } resProcess.write_multi(); } } else if (Global.runMode.Contains("test2"))//for multi_mtl: test a new task via choosing the most similar model { //load data List <dataSet> XList = new List <dataSet>(); List <dataSet> XXList = new List <dataSet>(); dataSet X = new dataSet(); loadData_multi(XList, X, XXList); //get vectors List <List <double> > vecList = new List <List <double> >(); foreach (dataSet Xi in XList) { List <double> vec = getVecFromX(Xi); vecList.Add(vec); } //load model & test toolbox tb = new toolbox(X, XList, false); List <double> scoreList = tb.test2_multi_mtl(vecList, XXList, 0, Global.swOutputList); for (int i = 0; i < Global.nTask; i++) { Global.scoreTaskList_multi[i].Add(scoreList[i]); } resProcess.write_multi(); } else if (Global.runMode.Contains("test3"))//for multi_mtl: test a new task via voted-test based on all models, i.e., the OMT-SBD method described in Section 4.4 of [Sun+ TKDE 2013] { //load data List <dataSet> XList = new List <dataSet>(); List <dataSet> XXList = new List <dataSet>(); dataSet X = new dataSet(); loadData_multi(XList, X, XXList); //get vectors List <List <double> > vecList = new List <List <double> >(); foreach (dataSet Xi in XList) { List <double> vec = getVecFromX(Xi); vecList.Add(vec); } //load model & test toolbox tb = new toolbox(X, XList, false); List <double> scoreList = tb.test3_multi_mtl(vecList, XXList, 0, Global.swOutputList); for (int i = 0; i < Global.nTask; i++) { Global.scoreTaskList_multi[i].Add(scoreList[i]); } resProcess.write_multi(); } else { throw new Exception("error"); } }