/// <summary> /// PPT 20141202 /// 为每个source annotation求出可能的target annotations,并按照概率排序。 /// 存储到ProbabilityConstant.Pr_T_S里。 /// 注意:本函数不是训练,而是计算结果,所以遍历的是全部的sentence,不是用于train的sentence。 /// </summary> static public void Pr_T_S() { //初始化 CascadedConstant.Pr_T_S = new Dictionary <SourceAnnotation, IDictionary <TargetAnnotation, double> >(); CascadedConstant.Pr_T_t = new Dictionary <TargetLabeltruth, IDictionary <TargetAnnotation, double> >(); foreach (Label label in Constant.TargetTaxonomy.LabelArray) { CascadedConstant.Pr_T_t.Add(new TargetLabeltruth(label, true), new Dictionary <TargetAnnotation, double>()); CascadedConstant.Pr_T_t.Add(new TargetLabeltruth(label, false), new Dictionary <TargetAnnotation, double>()); } //开始计算 foreach (Sentence sentence in Constant.SentenceList) { if (!CascadedConstant.Pr_T_S.ContainsKey(sentence.GoldSourceAnnotation)) { IDictionary <TargetAnnotation, double> targetAnnotationProbabilityDic = new Dictionary <TargetAnnotation, double>(); //遍历所有可能的target annotation foreach (Label[] targetLabelArray in CascadedFunction.PowerSet(Constant.TargetTaxonomy.LabelArray)) { TargetAnnotation T = new TargetAnnotation(targetLabelArray); double probability = sentence.GoldSourceAnnotation.Pr_T_S(T); //算值 if (probability != 0) //忽略等于0的 { targetAnnotationProbabilityDic.Add(T, probability); } } //排序(已查明为什么sadness true总是最高) IDictionary <TargetAnnotation, double> sortedElements = GeneralFunction.SortDictionary(targetAnnotationProbabilityDic); CascadedConstant.Pr_T_S.Add(sentence.GoldSourceAnnotation, sortedElements); } } }
static private void initializeEkmanAsTarget(Corpus corpus, int startIndex, int endIndex, IList <Sentence> sentences, ref IList <TargetWorker> targetWorkerList) { string[] data = File.ReadAllLines(corpus + "/EkmanData" + startIndex + "-" + endIndex + ".csv"); foreach (string row in data) { string[] labels = row.Split(','); TargetWorker worker = new TargetWorker(labels[0]); if (!targetWorkerList.Contains(worker))//重复的人不再添加 { targetWorkerList.Add(worker); } else { worker = targetWorkerList.First(x => x.Equals(worker)); } IList <Label> trueLabels = new List <Label>(); for (int i = 1; i <= (endIndex - startIndex + 1) * (Constant.EkmanLabelArray.Length + 1); ++i) { switch (labels[i]) { case "Anger": trueLabels.Add(Label.Anger); break; case "Sadness": trueLabels.Add(Label.Sadness); break; case "Joy": trueLabels.Add(Label.Joy); break; case "Disgust": trueLabels.Add(Label.Disgust); break; case "Surprise": trueLabels.Add(Label.Surprise); break; case "Fear": trueLabels.Add(Label.Fear); break; } if (i % (Constant.EkmanLabelArray.Length + 1) == 0) { //取出SentenceList里的一个Sentence Sentence sentence = sentences[startIndex + (i - 1) / (Constant.EkmanLabelArray.Length + 1)]; TargetAnnotation targetAnnotation = new TargetAnnotation(trueLabels.ToArray()); trueLabels.Clear(); worker.SentenceTargetAnnotationDic.Add(sentence, targetAnnotation); sentence.TargetWorkerTargetAnnotationDic.Add(worker, targetAnnotation); } } } }
static public double Similarity(TargetAnnotation a, TargetAnnotation b) { double result = 0; switch (Constant.Similarity) { case Interoperability.Similarity.SMC: foreach (Label label in Constant.TargetTaxonomy.LabelArray) { if (a.LabelAndTruthDic[label] == b.LabelAndTruthDic[label]) { ++result; } } result /= Constant.TargetTaxonomy.LabelArray.Length; break; case Interoperability.Similarity.Jaccard: double numerator = 0; double denominator = 0; foreach (Label label in Constant.TargetTaxonomy.LabelArray) { if (a.LabelAndTruthDic[label] && b.LabelAndTruthDic[label]) { ++numerator; ++denominator; } else if (a.LabelAndTruthDic[label] || b.LabelAndTruthDic[label]) { ++denominator; } } result = numerator / denominator; break; case Interoperability.Similarity.Dice: numerator = 0; denominator = 0; foreach (Label label in Constant.TargetTaxonomy.LabelArray) { if (a.LabelAndTruthDic[label] && b.LabelAndTruthDic[label]) { numerator += 2; denominator += 2; } else if (a.LabelAndTruthDic[label] || b.LabelAndTruthDic[label]) { ++denominator; } } result = (numerator + 1) / (denominator + 1); break; } return(result); }
void Start() { Debug.Log(Directory.GetCurrentDirectory()); GameObject agent = transform.parent.gameObject; annotations = agent.GetComponent <TargetAnnotation>(); initializer = agent.GetComponent <RandomInit>(); positionDrawer = agent.GetComponent <RandomPosition>(); positionDrawer.agent = transform.gameObject; rbody = GetComponent <Rigidbody>(); engine = transform.Find("Engine").GetComponent <Engine>(); accelerometer = transform.Find("Accelerometer").GetComponent <Accelerometer>(); depthSensor = transform.Find("DepthSensor").GetComponent <DepthSensor>(); // calculate max velocity with set parameters maxVelocity = new Vector3(engine.maxForceLateral / (rbody.drag * rbody.mass), engine.maxForceVertical / (rbody.drag * rbody.mass), engine.maxForceLongitudinal / (rbody.drag * rbody.mass)); maxYawVelocity = engine.maxTorqueYaw / (rbody.inertiaTensor.y * rbody.angularDrag); }
void OnValidate() { GameObject agent = transform.parent.gameObject; annotations = agent.GetComponent <TargetAnnotation>(); initializer = agent.GetComponent <RandomInit>(); positionDrawer = agent.GetComponent <RandomPosition>(); positionDrawer.agent = transform.gameObject; if (mode == RobotAcademy.DataCollection.gate) { annotations.target = gateTargetObject; positionDrawer.target = gateTargetObject; } else if (mode == RobotAcademy.DataCollection.path) { annotations.target = pathTargetObject; positionDrawer.target = pathTargetObject; } }
/// <summary> /// 计算已知Target Label为true求Target Annotation的概率。 /// </summary> /// <param name="targetAnnotation">Target Annotation</param> /// <returns> /// 已知Target Label的Truth求Target Annotation的概率。 /// 如Target Label的Truth与其在Target Annotation的Truth不符,则返回0。 /// </returns> /// (观察重复使用情况) public double Pr_T_t(TargetAnnotation targetAnnotation) { #region 符时直接返回0 if (targetAnnotation.LabelAndTruthDic[this.Label] != this.Truth) { return(0); } #endregion #region 符时求Pr_T和Pr_t //如已经计算过,直接取即可,不用重新计算。 //因为符合t的T共有2^|T|个,所以对于每个t都有2^|T|个T对应。 if (CascadedConstant.Pr_T_t[this].ContainsKey(targetAnnotation)) { return(CascadedConstant.Pr_T_t[this][targetAnnotation]); } else { double nT = 0; //分子 double nt = 0; //分母 foreach (Sentence sentence in TrainConstant.SentenceList) { foreach (TargetAnnotation targetannotation in sentence.TargetWorkerTargetAnnotationDic.Values) { if (targetannotation.LabelAndTruthDic[this.Label] == this.Truth) { ++nt; if (targetAnnotation.Equals(targetannotation)) { ++nT; } } } } double pribability = nT / nt; //存储,以备以后再用 CascadedConstant.Pr_T_t[this].Add(targetAnnotation, pribability); return(pribability); } #endregion }
static private void initializeNakaAsTarget(Corpus corpus, int startIndex, int endIndex, IList <Sentence> sentences, ref IList <TargetWorker> targetWorkerList) { string[] data = File.ReadAllLines(corpus + "/NakaData" + startIndex + "-" + endIndex + ".csv");//不需要单独做一个data-sample,因为sentence-sample以外的sentence在下面的for循环里遍历不到。 foreach (string row in data) { string[] labels = row.Split(','); //labels[0]是用户名 TargetWorker worker = new TargetWorker(labels[0]); if (!targetWorkerList.Contains(worker)) //重复的人不再添加 { targetWorkerList.Add(worker); } else { worker = targetWorkerList.First(x => x.Equals(worker)); } IList <Label> trueLabels = new List <Label>(); for (int i = 1; i <= (endIndex - startIndex + 1) * (Constant.NakaLabelArray.Length + 1); ++i) { switch (labels[i]) { case "happiness": trueLabels.Add(Label.喜Joy); break; case "fondness": trueLabels.Add(Label.好Fondness); break; case "relief": trueLabels.Add(Label.安Relief); break; case "anger": trueLabels.Add(Label.怒Anger); break; case "sadness": trueLabels.Add(Label.哀Sadness); break; case "fear": trueLabels.Add(Label.怖Fear); break; case "shame": trueLabels.Add(Label.恥Shame); break; case "disgust": trueLabels.Add(Label.厭Disgust); break; case "excitement": trueLabels.Add(Label.昂Excitement); break; case "surprise": trueLabels.Add(Label.驚Surprise); break; } if (i % (Constant.NakaLabelArray.Length + 1) == 0) { //取出SentenceList里的一个Sentence Sentence sentence = sentences[startIndex + (i - 1) / (Constant.NakaLabelArray.Length + 1)]; TargetAnnotation targetAnnotation = new TargetAnnotation(trueLabels.ToArray()); trueLabels.Clear(); worker.SentenceTargetAnnotationDic.Add(sentence, targetAnnotation); sentence.TargetWorkerTargetAnnotationDic.Add(worker, targetAnnotation); } } } }
/// <summary> /// 输出每个setnence的最优target anntoation, i.e., 概率最大的target annotation。 /// </summary> /// <param name="method">方法</param> /// <param name="groupsize">组容量</param> /// <param name="groupindex">组号</param> static public void OutputEstimatedBinaryTargetAnnotations(Method method, int groupsize, int groupindex) { string path = "Output/" + Constant.Gold + "/" + TrainConstant.Corpus + "_" + Constant.SourceTaxonomy.Name + "/" + method; if (!Directory.Exists(path)) { Directory.CreateDirectory(path); } StreamWriter File = new StreamWriter(path + "/" + groupsize + "-" + groupindex + method + "_BinaryTargetAnnotations.csv", false, Encoding.Default); File.Write("Sentence,"); foreach (Label label in Constant.TargetTaxonomy.LabelArray) { File.Write(label + ","); } File.WriteLine("GashCode"); switch (method) { case Method.MLE: foreach (Sentence sentence in Constant.SentenceList) { File.Write(sentence.ToString() + ","); TargetAnnotation targetAnnotation = sentence.MLETargetAnnotation(); foreach (Label label in Constant.TargetTaxonomy.LabelArray) { File.Write((targetAnnotation.LabelAndTruthDic[label] ? 1 : 0) + ","); } File.WriteLine(targetAnnotation.GetHashCode()); } break; case Method.Cascaded: foreach (Sentence sentence in Constant.SentenceList) { File.Write(sentence.ToString() + ","); TargetAnnotation targetAnnotation = sentence.CascadedTargetAnnotation(); foreach (Label label in Constant.TargetTaxonomy.LabelArray) { File.Write((targetAnnotation.LabelAndTruthDic[label] ? 1 : 0) + ","); } File.WriteLine(targetAnnotation.GetHashCode()); } break; case Method.Aggregation: case Method.OrdinaryCombination: case Method.WeightedCombination: case Method.ExpertiseCombination: case Method.TemporaryNogeneralNonormalize: case Method.TemporaryNogeneralNormalize: case Method.TemporaryGeneralNonormalize: case Method.TemporaryGeneralNormalize: case Method.OtherNogeneralNormalize: case Method.OtherGeneralNonormalize: case Method.OtherGeneralNormalize: foreach (Sentence sentence in Constant.SentenceList) { File.Write(sentence.ToString() + ","); TargetAnnotation targetAnnotation = sentence.SpaceTargetAnnotation(); foreach (Label label in Constant.TargetTaxonomy.LabelArray) { File.Write((targetAnnotation.LabelAndTruthDic[label] ? 1 : 0) + ","); } File.WriteLine(targetAnnotation.GetHashCode()); } break; } File.Close(); }