private FieldDef GetFieldDef(int i) { var(header, data) = SampleData.SamplingData(); var sut = new SamplingStrategy(new NullLogger <SamplingStrategy>()); return(sut.GetFieldDef(header[i], data.Select(x => x[i])).Field); }
public ClassifierParams WithSampling(SamplingStrategy strategy, int minorityClassMaxOversampling) { SamplingParams = new SamplingParams { NeedSampling = true, Strategy = strategy, MinorityClassMaxOversampling = minorityClassMaxOversampling }; return this; }
/// <summary> /// Uses the local Multi-Armed-Bandits to explore the action space and uses the global Multi-Armed-Bandit to exploit the best performing actions. /// </summary> /// <param name="context">The current search context.</param> /// <param name="state">The game state for the node.</param> /// <param name="gMAB">The global Multi-Armed-Bandit.</param> /// <returns>An <see cref="A"/> that was selected from the global Multi-Armed-Bandit.</returns> private A NaïveSampling(SearchContext <D, P, A, S, Sol> context, P state, IDictionary <long, Dictionary <int, LocalArm> > gMAB) { var apply = context.Application; var stateClone = context.Cloner.Clone(state); var stateHash = stateClone.HashMethod(); if (!gMAB.ContainsKey(stateHash)) { gMAB.Add(stateHash, new Dictionary <int, LocalArm>()); } // Use a policy p_0 to determine whether to explore or exploit // If explore was selected // x_1...x_n is sampled by using a policy p_l to select a value for each X_i in X independently. // As a side effect, the resulting value combination is added to the global MAB. // If exploit was selected // x_1...x_n is sampled by using a policy p_g to select a value combination using MAB_g. // Can only exploit if there is anything to exploit in the first place if (gMAB[stateHash].IsNullOrEmpty() || ExplorationStrategy.Policy(context, 0)) { // Explore // Create an action according to policy p_1 var action = SamplingStrategy.Sample(stateClone); var actionHash = action.GetHashCode(); // Evaluate the sampled action var endState = PlayoutStrategy.Playout(context, apply.Apply(context, stateClone, action)); var tempNode = new TreeSearchNode <P, A> { Payload = action }; var reward = EvaluationStrategy.Evaluate(context, tempNode, endState); // Add the action to the global MAB if (gMAB[stateHash].ContainsKey(actionHash)) { gMAB[stateHash][actionHash].Visit(reward); } else { var newArm = new LocalArm(action); newArm.Visit(reward); gMAB[stateHash].Add(actionHash, newArm); } return(action); } // Exploit; epsilon-greedy by returning the action with the highest expected reward with probability 1-e, otherwise returning random. return(_rng.NextDouble() <= 1 - PolicyGlobal ? gMAB[stateHash].Values.OrderByDescending(i => i.ExpectedReward).First().Action : gMAB[stateHash].RandomElementOrDefault().Value.Action); }
/// <summary> /// Generates the interesting subset of actions C* from C. /// /// 1) Generate a weight function R^ from PartialActions(adopting the linear side information assumption). /// 2) Schematically generating a probability distribution D_R^ over CombinedAction space C, biased "towards" R^. /// 3) Sample a number of CombinedActions C* from D_R^. /// </summary> /// <param name="context">The current search context.</param> /// <returns>List of <see cref="A"/>.</returns> private List <A> Generate(SearchContext <D, P, A, S, A> context) { // Create the Side Information using the allowed number of generation samples. SideInformation = SideInformationStrategy.Create(context, GenerationSamples); // Create combined-actions by sampling the side information. var sampledActions = new List <A>(); for (var i = 0; i < EvaluationSamples; i++) { sampledActions.Add(SamplingStrategy.Sample(context.Source, SideInformation)); } return(sampledActions); }
public RandomDOEStudy(string name, string description, WorkflowComponent studiedComponent, List <Factor> factors, List <Data> responses, long samples, SamplingStrategy strategy, bool createFolder, string parentName = "") : base(name, description, studiedComponent, factors.Select(f => f.Data).ToList(), responses, parentName) { base.StudiedComponent = studiedComponent; // Copy factors to avoid aliasing problems when editing studies Factors = factors.Select(f => f.Copy()).ToList(); Responses = responses; Samples = samples; Strategy = strategy; SetIDColumn(); foreach (Factor factor in Factors) { SetColumn(factor.Name, factor.Data); } foreach (Data response in Responses) { SetColumn(response.Name, response); } TableNames.Add(Name); switch (strategy) { case SamplingStrategy.Random: Treatment = new RandomDoE(name, description, studiedComponent, Factors, responses, samples); break; case SamplingStrategy.LatinHypercube: Treatment = new LatinHypercube(name, description, studiedComponent, Factors, responses, samples); break; default: break; } if (createFolder) { Treatment.CreateFolder(); } }
public RandomDOEStudy(string name, string description, WorkflowComponent studiedComponent, List <Factor> factors, List <Data> responses, long samples, SamplingStrategy strategy, string parentName = "") : this(name, description, studiedComponent, factors, responses, samples, strategy, false, parentName) { }
public static List <DataPoint> GetDataPointsWithGivenMaxSampleInterval(List <DataPoint> pnts, TimeSpan maxRes, SamplingStrategy samplingStrategy, double?desiredFirstSample) { if (samplingStrategy == SamplingStrategy.Raw || maxRes.TotalDays == 0 || pnts.Count == 0) { return(pnts); } List <DataPoint> dataPoints = new List <DataPoint>(); // get max sample interval as numeric double maxResNumeric = maxRes.TotalDays; double sampleBoundaryStart = pnts[0].X; if (desiredFirstSample != null) { sampleBoundaryStart = desiredFirstSample.Value; } double sampleBoundaryEnd = sampleBoundaryStart + maxResNumeric; List <double> sampleBucket = new List <double>(); for (int pntIter = 0; pntIter < pnts.Count; pntIter++) { if (pnts[pntIter].X < sampleBoundaryEnd) { // Add points to the sample bucket till we encounter the sample boundary sampleBucket.Add(pnts[pntIter].Y); } else { // Add the value to the final list dataPoints.Add(new DataPoint(sampleBoundaryStart, GetBucketAggregate(sampleBucket, samplingStrategy))); // Update the sample Boundaries sampleBoundaryStart = sampleBoundaryEnd; sampleBoundaryEnd += maxResNumeric; // Empty the bucket sampleBucket.Clear(); // Add current sample to the new bucket sampleBucket.Add(pnts[pntIter].Y); } } if (sampleBucket.Count > 0) { // handle the last bucket dataPoints.Add(new DataPoint(sampleBoundaryStart, GetBucketAggregate(sampleBucket, samplingStrategy))); } return(dataPoints); }
public static double GetBucketAggregate(List <double> sampleBucket, SamplingStrategy samplingStrategy) { // Aggregate the sample bucket as per sampling strategy double bucketResult = 0; if (sampleBucket.Count == 0) { return(double.NaN); } try { if (samplingStrategy == SamplingStrategy.Snap) { foreach (double sampleVal in sampleBucket) { if (!Double.IsNaN(sampleVal)) { bucketResult = sampleVal; return(bucketResult); } } } else if (samplingStrategy == SamplingStrategy.Average || samplingStrategy == SamplingStrategy.Sum) { double numValidSamples = 0; foreach (double sampleVal in sampleBucket) { if (!Double.IsNaN(sampleVal)) { bucketResult += sampleVal; numValidSamples += 1; } } if (numValidSamples == 0) { // since we did not find a valid sample in the bucket, return NaN return(double.NaN); } if (samplingStrategy == SamplingStrategy.Average) { bucketResult = bucketResult / numValidSamples; } return(bucketResult); } else if (samplingStrategy == SamplingStrategy.Maximum) { bucketResult = Double.NegativeInfinity; foreach (double sampleVal in sampleBucket) { if (!Double.IsNaN(sampleVal) && sampleVal > bucketResult) { bucketResult = sampleVal; } } return(bucketResult); } else if (samplingStrategy == SamplingStrategy.Minimum) { bucketResult = Double.PositiveInfinity; foreach (double sampleVal in sampleBucket) { if (!Double.IsNaN(sampleVal) && sampleVal < bucketResult) { bucketResult = sampleVal; } } return(bucketResult); } } catch (Exception) { // do nothing } return(bucketResult); }