public PerformanceResult GetPerformanceResult(TradeSet set = TradeSet.All) { List <Trade> tradesList = new List <Trade>(); //get the trades from all test sets foreach (TestSet testSet in testSets) { //can split these into trades from train and test set if TradeSet parameter is passed if (set == TradeSet.All) { tradesList.AddRange(testSet.Trades); } else if (set == TradeSet.Test) { tradesList.AddRange(testSet.Trades.Where(x => x.OpenTime > EndTrainDate)); } else if (set == TradeSet.Train) { tradesList.AddRange(testSet.Trades.Where(x => x.CloseTime <= EndTrainDate)); } } Trade[] trades = tradesList.ToArray(); PerformanceResult pr = new PerformanceResult(); pr.Description = Description + " " + set.ToString(); pr.TradeCount = trades.Count(); pr.TotalProfit = trades.Sum(x => x.Profit); pr.SpreadCost = trades.Sum(x => x.SpreadCost); pr.WinPercent = PerformanceResult.CalculateWinPercent(trades); pr.ProfitFactor = PerformanceResult.CalculateProfitFactor(trades); return(pr); }
public static TestSummary ReduceCorrelated(TestSummary testSummary) { //Work out the percentage of trades to use to train DateTime startDate = (DateTime)testSummary.StartDate; DateTime endDate = (DateTime)testSummary.EndDate; double trainDays = (endDate - startDate).TotalDays * ((double)testSummary.ReduceCorrelatedParams.TrainTestSplit / 100); DateTime endTrain = startDate.AddDays(trainDays); testSummary.EndTrainDate = endTrain; //Create a copy of the testSummary so we don't modify the original testSummary when we reduce the testsets TestSummary filteredTestSummary = new TestSummary(testSummary); //remove any non-performing sets List <TestSet> nonPerformers = new List <TestSet>(); int totalProfitable = 0; foreach (TestSet t in testSummary.TestSets) { double p = PerformanceResult.CalculateProfitFactor(t.Trades.Where(x => x.CloseTime <= filteredTestSummary.EndTrainDate).ToArray()); if (p > 1) { totalProfitable++; } if (p < testSummary.ReduceCorrelatedParams.MinMetric) { filteredTestSummary.Remove(t); } } //remove all the test sets if at least half are profitable double pProfitable = (double)totalProfitable / (double)testSummary.TestSets.Length; if (pProfitable < 0.5) { foreach (TestSet t in testSummary.TestSets) { filteredTestSummary.Remove(t); } } //Calcualte weekly profit for every week so we can do a correlation based on weekly profits Dictionary <string, KeyValuePair <int, double>[]> WeeklyProfits = new Dictionary <string, KeyValuePair <int, double>[]>(); foreach (TestSet ts in testSummary.TestSets) { //get all the trades in the train period and calculate weekly profit var result = from s in ts.Trades.Where(x => x.CloseTime <= endTrain) group s by new { week = (s.OpenTime.Year - startDate.Year) * 52 + (s.OpenTime.DayOfYear / 7) } into g select new KeyValuePair <int, double>(g.Key.week, g.Sum(x => x.Profit)); WeeklyProfits.Add(ts.Description, result.ToArray()); } //Create a grid of r2 values by comparing each testset with each other test set Dictionary <Pair, double> r2Values = new Dictionary <Pair, double>(); foreach (KeyValuePair <string, KeyValuePair <int, double>[]> wpRow in WeeklyProfits) { foreach (KeyValuePair <string, KeyValuePair <int, double>[]> wpColumn in WeeklyProfits) { //skip identical resuklt sets if (wpColumn.Key == wpRow.Key) { continue; } //calculate the r2 value from these lists //Line up the weeks to get an x and y for the current pair Dictionary <int, Point> list = new Dictionary <int, Point>(); foreach (KeyValuePair <int, double> res in wpRow.Value) { list.Add(res.Key, new Point(res.Value, 0, wpRow.Key, null)); } foreach (KeyValuePair <int, double> res in wpColumn.Value) { if (!list.ContainsKey(res.Key)) { list.Add(res.Key, new Point(0, res.Value, null, wpColumn.Key)); } else { list[res.Key].Y = res.Value; list[res.Key].YLabel = wpColumn.Key; } } double[] x = list.Select(v => v.Value.X).ToArray(); double[] y = list.Select(v => v.Value.Y).ToArray(); //calculate the r2 and store in dictionary with the testset description pair as the Key r2Values.Add(new Pair(wpRow.Key, wpColumn.Key), Stat.R2(x, y)); } } foreach (KeyValuePair <Pair, double> res in r2Values) { //if too corelated remove the worst performer if (res.Value > testSummary.ReduceCorrelatedParams.R2Cutoff) { //get the train set of trades only Trade[] xTrades = filteredTestSummary.GetTradeSet(Convert.ToString(res.Key.P1), TestSummary.TradeSet.Train); Trade[] yTrades = filteredTestSummary.GetTradeSet(Convert.ToString(res.Key.P2), TestSummary.TradeSet.Train); //if both exist in our filtered test sets remove worst performer - it may have already been removed from previous pair r2 comparisons if (xTrades != null && yTrades != null) { double xMetric = PerformanceResult.CalculateProfitFactor(xTrades); double yMetric = PerformanceResult.CalculateProfitFactor(yTrades); if (xMetric > yMetric) { filteredTestSummary.Remove(Convert.ToString(res.Key.P2)); } else { filteredTestSummary.Remove(Convert.ToString(res.Key.P1)); } } } } return(filteredTestSummary); }
public static TestSummary ReduceByRank(TestSummary testSummary) { //dn't filter anymore if there is less than 2 testSets if (testSummary.TestSets.Count() < 2) { return(testSummary); } //copy the original test summary so we don't modify it directily TestSummary filteredTestSummary = new TestSummary(testSummary); //cycle through this process of deleteing the worst average ranking test set until the remaining test sets have no real rank differentation //These test sets should have already been reduced to remove correlated test sets so we should end up with a number of uncorrelated test sets that all perform about the same double rankDiff = 0; do { //setup the dates for starting the train and test periods DateTime startDate = (DateTime)testSummary.StartDate; DateTime endDate = (DateTime)testSummary.EndTrainDate; // this is set in the remove correlated method DateTime testStartDate = startDate; DateTime testEndDate = testStartDate.AddDays(testSummary.ReduceByRankParams.PeriodDays); //keep a list of rankings of each test set compared to the other parameter sets for each cycle Dictionary <string, OptimisePerformanceRank> ranks = new Dictionary <string, OptimisePerformanceRank>(); //cycle through the dates until the end of the train set while (testStartDate < endDate) { //compile an dictonary of the results for this cycle the key is the parameter set string Dictionary <string, double> testResults = new Dictionary <string, double>(); //Calcualte a profit factor for each of the test sets between the current cycle dates foreach (TestSet ts in filteredTestSummary.TestSets) { Trade[] trades = ts.Trades.Where(x => x.OpenTime > testStartDate && x.CloseTime <= testEndDate).ToArray(); testResults.Add(ts.Description, PerformanceResult.CalculateProfitFactor(trades)); } //rank the test sets var ordered = testResults.OrderByDescending(x => x.Value); int rank = 1; foreach (KeyValuePair <string, double> result in ordered) { if (!ranks.ContainsKey(result.Key)) { ranks.Add(result.Key, new OptimisePerformanceRank()); } ranks[result.Key].Add(rank); rank++; } //move the dates along testStartDate = testStartDate.AddDays(testSummary.ReduceByRankParams.PeriodDays); testEndDate = testEndDate.AddDays(testSummary.ReduceByRankParams.PeriodDays); if (testEndDate > endDate) { testEndDate = endDate; } } //Print out the table of the parameter sets ranked for each cycle and the average overall rank var orderedRanks = ranks.OrderBy(x => x.Value.Average).ToDictionary(t => t.Key, t => t.Value); //calcaulte the maximum difference in rank between the test sets - we exit the loop if this difference is small enough rankDiff = orderedRanks.Max(x => x.Value.Average) - orderedRanks.Min(x => x.Value.Average); //copy the original test summary so we don't modify it directily filteredTestSummary = new TestSummary(filteredTestSummary); //remove the lowest ranking test set filteredTestSummary.Remove(orderedRanks.LastOrDefault().Key); } while (testSummary.ReduceByRankParams.MaxRankDifference > 0 && rankDiff > testSummary.ReduceByRankParams.MaxRankDifference); return(filteredTestSummary); }