public void RoverPostionShouldBeTwoDigitFollowedByOneCharacterCommandSeparatedBySpace(string command) { //Act Position position = _helper.ToRover(command); //Assert Assert.IsNotNull(position); Assert.AreEqual(_helper.ToString(position), command); }
/*/// <summary> * /// Constructs context information for the specified mention. * /// </summary> * /// <param name="mentionParse"> * /// Mention parse structure for which context is to be constructed. * /// </param> * /// <param name="mentionIndex"> * /// mention position in sentence. * /// </param> * /// <param name="mentionsInSentence"> * /// Number of mentions in the sentence. * /// </param> * /// <param name="mentionsInDocument"> * /// Number of mentions in the document. * /// </param> * /// <param name="sentenceIndex"> * /// Sentence number for this mention. * /// </param> * /// <param name="nameType"> * /// The named-entity type for this mention. * /// </param> * /// <param name="headFinder"> * /// Object which provides head information. * /// </param> * public MentionContext(Parse mentionParse, int mentionIndex, int mentionsInSentence, int mentionsInDocument, int sentenceIndex, string nameType, HeadFinder headFinder) { * nounLocation = mentionIndex; * maxNounLocation = mentionsInDocument; * sentenceNumber = sentenceIndex; * parse = mentionParse; * indexSpan = mentionParse.getSpan(); * prevToken = mentionParse.getPreviousToken(); * nextToken = mentionParse.getNextToken(); * head = headFinder.getLastHead(mentionParse); * List headTokens = head.getTokens(); * tokens = (Parse[]) headTokens.toArray(new Parse[headTokens.size()]); * basalNextToken = head.getNextToken(); * indexHeadSpan = head.getSpan(); * nonDescriptorStart = 0; * initHeads(headFinder.getHeadIndex(head)); * this.neType= nameType; * if (PartsOfSpeech.IsNoun(getHeadTokenTag()) && !PartsOfSpeech.IsProperNoun(getHeadTokenTag())) { * //if (PartsOfSpeech.IsProperNoun(headTokenTag) && neType != null) { * this.synsets = getSynsetSet(this); * } * else { * this.synsets=Collections.EMPTY_SET; * } * gender = GenderEnum.UNKNOWN; * this.genderProb = 0d; * number = NumberEnum.UNKNOWN; * this.numberProb = 0d; * } */ private void InitializeHeads(int headIndex) { HeadTokenIndex = headIndex; mHeadToken = (IParse)Tokens[HeadTokenIndex]; HeadTokenText = mHeadToken.ToString(); HeadTokenTag = mHeadToken.SyntacticType; FirstToken = (IParse)Tokens[0]; FirstTokenTag = FirstToken.SyntacticType; FirstTokenText = FirstToken.ToString(); }
private void CollectCoordinatedNounPhraseMentions(IParse nounPhrase, List <Mention> entities) { //System.err.println("collectCoordNp: "+np); List <IParse> nounPhraseTokens = nounPhrase.Tokens; bool inCoordinatedNounPhrase = false; int lastNounPhraseTokenIndex = mHeadFinder.GetHeadIndex(nounPhrase); for (int tokenIndex = lastNounPhraseTokenIndex - 1; tokenIndex >= 0; tokenIndex--) { IParse token = nounPhraseTokens[tokenIndex]; string tokenText = token.ToString(); if (tokenText == "and" || tokenText == "or") { if (lastNounPhraseTokenIndex != tokenIndex) { if (tokenIndex - 1 >= 0 && (nounPhraseTokens[tokenIndex - 1]).SyntacticType.StartsWith("NN")) { Util.Span nounPhraseSpan = new Util.Span((nounPhraseTokens[tokenIndex + 1]).Span.Start, (nounPhraseTokens[lastNounPhraseTokenIndex]).Span.End); Mention nounPhraseSpanExtent = new Mention(nounPhraseSpan, nounPhraseSpan, token.EntityId, null, "CNP"); entities.Add(nounPhraseSpanExtent); //System.err.println("adding extent for conjunction in: "+np+" preeceeded by "+((Parse) npTokens.get(ti-1)).getSyntacticType()); inCoordinatedNounPhrase = true; } else { break; } } lastNounPhraseTokenIndex = tokenIndex - 1; } else if (inCoordinatedNounPhrase && tokenText.Equals(",")) { if (lastNounPhraseTokenIndex != tokenIndex) { Util.Span nounPhraseSpan = new Util.Span((nounPhraseTokens[tokenIndex + 1]).Span.Start, (nounPhraseTokens[lastNounPhraseTokenIndex]).Span.End); Mention nounPhraseSpanExtent = new Mention(nounPhraseSpan, nounPhraseSpan, token.EntityId, null, "CNP"); entities.Add(nounPhraseSpanExtent); //System.err.println("adding extent for comma in: "+np); } lastNounPhraseTokenIndex = tokenIndex - 1; } else if (inCoordinatedNounPhrase && tokenIndex == 0 && lastNounPhraseTokenIndex >= 0) { Util.Span nounPhraseSpan = new Util.Span((nounPhraseTokens[tokenIndex]).Span.Start, (nounPhraseTokens[lastNounPhraseTokenIndex]).Span.End); Mention nounPhraseSpanExtent = new Mention(nounPhraseSpan, nounPhraseSpan, token.EntityId, null, "CNP"); entities.Add(nounPhraseSpanExtent); //System.err.println("adding extent for start coord in: "+np); } } }
private void CollectCoordinatedNounPhraseMentions(IParse nounPhrase, List <Mention> entities) { List <IParse> nounPhraseTokens = nounPhrase.Tokens; bool inCoordinatedNounPhrase = false; int lastNounPhraseTokenIndex = mHeadFinder.GetHeadIndex(nounPhrase); for (int tokenIndex = lastNounPhraseTokenIndex - 1; tokenIndex >= 0; tokenIndex--) { IParse token = nounPhraseTokens[tokenIndex]; string tokenText = token.ToString(); if (tokenText == "and" || tokenText == "or") { if (lastNounPhraseTokenIndex != tokenIndex) { if (tokenIndex - 1 >= 0 && PartsOfSpeech.IsNoun(nounPhraseTokens[tokenIndex - 1].SyntacticType)) { var nounPhraseSpan = new Util.Span((nounPhraseTokens[tokenIndex + 1]).Span.Start, (nounPhraseTokens[lastNounPhraseTokenIndex]).Span.End); var nounPhraseSpanExtent = new Mention(nounPhraseSpan, nounPhraseSpan, token.EntityId, null, "CNP"); entities.Add(nounPhraseSpanExtent); inCoordinatedNounPhrase = true; } else { break; } } lastNounPhraseTokenIndex = tokenIndex - 1; } else if (inCoordinatedNounPhrase && tokenText == PartsOfSpeech.Comma) { if (lastNounPhraseTokenIndex != tokenIndex) { var nounPhraseSpan = new Util.Span((nounPhraseTokens[tokenIndex + 1]).Span.Start, (nounPhraseTokens[lastNounPhraseTokenIndex]).Span.End); var nounPhraseSpanExtent = new Mention(nounPhraseSpan, nounPhraseSpan, token.EntityId, null, "CNP"); entities.Add(nounPhraseSpanExtent); } lastNounPhraseTokenIndex = tokenIndex - 1; } else if (inCoordinatedNounPhrase && tokenIndex == 0 && lastNounPhraseTokenIndex >= 0) { var nounPhraseSpan = new Util.Span((nounPhraseTokens[tokenIndex]).Span.Start, (nounPhraseTokens[lastNounPhraseTokenIndex]).Span.End); var nounPhraseSpanExtent = new Mention(nounPhraseSpan, nounPhraseSpan, token.EntityId, null, "CNP"); entities.Add(nounPhraseSpanExtent); } } }
private void CollectPossessivePronouns(IParse nounPhrase, List <Mention> entities) { //TODO: Look at how training is done and examine whether this is needed or can be accomidated in a different way. /* * List snps = np.getSubNounPhrases(); * if (snps.size() != 0) { * //System.err.println("AbstractMentionFinder: Found existing snps"); * for (int si = 0, sl = snps.size(); si < sl; si++) { * Parse snp = (Parse) snps.get(si); * Extent ppExtent = new Extent(snp.getSpan(), snp.getSpan(), snp.getEntityId(), null,Linker.PRONOUN_MODIFIER); * entities.add(ppExtent); * } * } * else { */ //System.err.println("AbstractEntityFinder.collectPossesivePronouns: "+np); List <IParse> nounPhraseTokens = nounPhrase.Tokens; IParse headToken = mHeadFinder.GetHeadToken(nounPhrase); for (int tokenIndex = nounPhraseTokens.Count - 2; tokenIndex >= 0; tokenIndex--) { IParse token = nounPhraseTokens[tokenIndex]; if (token == headToken) { continue; } if (token.SyntacticType.StartsWith("PRP") && IsHandledPronoun(token.ToString())) { Mention possessivePronounExtent = new Mention(token.Span, token.Span, token.EntityId, null, OpenNLP.Tools.Coreference.Linker.PronounModifier); //System.err.println("AbstractEntityFinder.collectPossesivePronouns: adding possesive pronoun: "+tok+" "+tok.getEntityId()); entities.Add(possessivePronounExtent); //System.err.println("AbstractMentionFinder: adding pos-pro: "+ppExtent); break; } } //} }
public IEnumerable <string> SendCommands([FromBody] Command command) { try { var plateau = _parse.ToPlateau(command.Plateau); var rovers = new List <string>(); foreach (var rover in command.Rovers) { var position = _parse.ToRover(rover.Position); var movements = _parse.ToMovemenent(rover.Movement); var result = _squad.Deploy(plateau, position, movements); rovers.Add(_parse.ToString(result)); } return(rovers); } catch (Exception ex) { _logger.LogError("Failed to deploy rover!", ex); return(null); } }
/// <summary> /// Constructs context information for the specified mention. /// </summary> /// <param name="mentionParse"> /// Mention parse structure for which context is to be constructed. /// </param> /// <param name="mentionIndex"> /// mention position in sentence. /// </param> /// <param name="mentionsInSentence"> /// Number of mentions in the sentence. /// </param> /// <param name="mentionsInDocument"> /// Number of mentions in the document. /// </param> /// <param name="sentenceIndex"> /// Sentence number for this mention. /// </param> /// <param name="nameType"> /// The named-entity type for this mention. /// </param> /// <param name="headFinder"> /// Object which provides head information. /// </param> /* public MentionContext(Parse mentionParse, int mentionIndex, int mentionsInSentence, int mentionsInDocument, int sentenceIndex, String nameType, HeadFinder headFinder) { nounLocation = mentionIndex; maxNounLocation = mentionsInDocument; sentenceNumber = sentenceIndex; parse = mentionParse; indexSpan = mentionParse.getSpan(); prevToken = mentionParse.getPreviousToken(); nextToken = mentionParse.getNextToken(); head = headFinder.getLastHead(mentionParse); List headTokens = head.getTokens(); tokens = (Parse[]) headTokens.toArray(new Parse[headTokens.size()]); basalNextToken = head.getNextToken(); //System.err.println("MentionContext.init: "+ent+" "+ent.getEntityId()+" head="+head); indexHeadSpan = head.getSpan(); nonDescriptorStart = 0; initHeads(headFinder.getHeadIndex(head)); this.neType= nameType; if (getHeadTokenTag().startsWith("NN") && !getHeadTokenTag().startsWith("NNP")) { //if (headTokenTag.startsWith("NNP") && neType != null) { this.synsets = getSynsetSet(this); } else { this.synsets=Collections.EMPTY_SET; } gender = GenderEnum.UNKNOWN; this.genderProb = 0d; number = NumberEnum.UNKNOWN; this.numberProb = 0d; } */ private void InitializeHeads(int headIndex) { HeadTokenIndex = headIndex; mHeadToken = (IParse) Tokens[HeadTokenIndex]; HeadTokenText = mHeadToken.ToString(); HeadTokenTag = mHeadToken.SyntacticType; mFirstToken = (IParse) Tokens[0]; mFirstTokenTag = mFirstToken.SyntacticType; mFirstTokenText = mFirstToken.ToString(); }
private void CollectPossessivePronouns(IParse nounPhrase, List <Mention> entities) { //TODO: Look at how training is done and examine whether this is needed or can be accomidated in a different way. /* * List snps = np.getSubNounPhrases(); * if (snps.size() != 0) { * for (int si = 0, sl = snps.size(); si < sl; si++) { * Parse snp = (Parse) snps.get(si); * Extent ppExtent = new Extent(snp.getSpan(), snp.getSpan(), snp.getEntityId(), null,Linker.PRONOUN_MODIFIER); * entities.add(ppExtent); * } * } * else { */ List <IParse> nounPhraseTokens = nounPhrase.Tokens; IParse headToken = mHeadFinder.GetHeadToken(nounPhrase); for (int tokenIndex = nounPhraseTokens.Count - 2; tokenIndex >= 0; tokenIndex--) { IParse token = nounPhraseTokens[tokenIndex]; if (token == headToken) { continue; } if (PartsOfSpeech.IsPersOrPossPronoun(token.SyntacticType) && IsHandledPronoun(token.ToString())) { var possessivePronounExtent = new Mention(token.Span, token.Span, token.EntityId, null, Linker.PronounModifier); entities.Add(possessivePronounExtent); break; } } //} }