/// <summary> /// Handles learning for all of the categories, according to category alpha, category beta and /// the value of the document being read. /// </summary> /// <param name="consumed"></param> public void BatchLearn(Document consumed) { for (int i = 0; i < Simulation.instance.categories; ++i) { // handle learning for category i // first, get the alpha and beta values for this specific category ABPair catAB = state.categoriesAB[i]; // get the document 0-1 value for this value float DocumentCategoryValue = consumed.values[i]; float nextA = sampleGaussian(meanLearningAlpha(DocumentCategoryValue), 0.2f) + catAB.alpha; if (nextA <= 0) { catAB.alpha = 0.001f; } else { catAB.alpha = nextA; } float nextB = sampleGaussian(meanLearningBeta(DocumentCategoryValue), 0.2f) + catAB.beta; if (nextB <= 0) { catAB.beta = 0.001f; } else { catAB.beta = nextB; } } }
/// <summary> /// Reimplementation of ChooseByPolicy to work with multidimensional documents /// </summary> /// <returns></returns> public Document BatchChooseByPolicy() { List <float> samples = new List <float>(); var VecBuilder = Vector <float> .Build; // calculate target vector of all categories for (int i = 0; i < Settings.Categories; ++i) { // get alpha and beta values for each category ABPair catAB = state.categoriesAB[i]; var betaDist = new Beta(catAB.alpha, catAB.beta); float sample = (float)betaDist.Sample(); samples.Add(sample); } // vector representing the ideal document according to this agents alpha / beta values var target = VecBuilder.DenseOfArray(samples.ToArray()); //samples.Clear(); Document chosen = state.documents[0]; float minDist = 2f; for (int i = 0; i < state.documents.Count; ++i) { Document doc = state.documents[i]; var docVec = VecBuilder.DenseOfArray(doc.values.ToArray()); var distance = (float)Distance.Euclidean(target, docVec); // take euclidean distance between if (distance < minDist) { // update the minimum distance minDist = distance; // update the reference to closest document found so far chosen = doc; } } return(chosen); }