/// <summary> Tests a CacheEntry[] for indication of "insane" cache usage. /// <p/> /// NOTE:FieldCache CreationPlaceholder objects are ignored. /// (:TODO: is this a bad idea? are we masking a real problem?) /// <p/> /// </summary> public Insanity[] Check(params CacheEntry[] cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.Length) { return(new Insanity[0]); } if (null != ramCalc) { for (int i = 0; i < cacheEntries.Length; i++) { cacheEntries[i].EstimateSize(ramCalc); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances MapOfSets <int, CacheEntry> valIdToItems = new MapOfSets <int, CacheEntry>(new Dictionary <int, HashSet <CacheEntry> >(17)); // maps ReaderField keys to Sets of ValueIds MapOfSets <ReaderField, int> readerFieldToValIds = new MapOfSets <ReaderField, int>(new Dictionary <ReaderField, HashSet <int> >(17)); // // any keys that we know result in more then one valId HashSet <ReaderField> valMismatchKeys = new HashSet <ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.Length; i++) { CacheEntry item = cacheEntries[i]; System.Object val = item.Value; if (val.GetType().IsGenericType&& val.GetType().GetGenericTypeDefinition() == typeof(Lazy <>)) { continue; } ReaderField rf = new ReaderField(item.ReaderKey, item.FieldName); System.Int32 valId = val.GetHashCode(); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.Put(valId, item); if (1 < readerFieldToValIds.Put(rf, valId)) { valMismatchKeys.Add(rf); } } List <Insanity> insanity = new List <Insanity>(valMismatchKeys.Count * 3); insanity.AddRange(CheckValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.AddRange(CheckSubreaders(valIdToItems, readerFieldToValIds)); return(insanity.ToArray()); }
public override bool Equals(System.Object that) { if (!(that is ReaderField)) { return(false); } ReaderField other = (ReaderField)that; return(this.readerKey == other.readerKey && this.fieldName.Equals(other.fieldName)); }
public override bool Equals(object that) { if (!(that is ReaderField)) { return(false); } ReaderField other = (ReaderField)that; return(object.ReferenceEquals(this.readerKey, other.readerKey) && this.FieldName.Equals(other.FieldName, StringComparison.Ordinal)); }
/// <summary> Internal helper method used by check that iterates over /// the keys of readerFieldToValIds and generates a Collection /// of Insanity instances whenever two (or more) ReaderField instances are /// found that have an ancestery relationships. /// /// </summary> /// <seealso cref="InsanityType.SUBREADER"> /// </seealso> private List<Insanity> CheckSubreaders(MapOfSets<int,CacheEntry> valIdToItems, MapOfSets<ReaderField,int> readerFieldToValIds) { List<Insanity> insanity = new List<Insanity>(23); Dictionary<ReaderField, Dictionary<ReaderField, ReaderField>> badChildren = new Dictionary<ReaderField, Dictionary<ReaderField, ReaderField>>(17); MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper IDictionary<int, Dictionary<CacheEntry, CacheEntry>> viToItemSets = valIdToItems.GetMap(); IDictionary<ReaderField, Dictionary<int, int>> rfToValIdSets = readerFieldToValIds.GetMap(); Dictionary<ReaderField, ReaderField> seen = new Dictionary<ReaderField, ReaderField>(17); foreach (ReaderField rf in rfToValIdSets.Keys) { if (seen.ContainsKey(rf)) continue; System.Collections.IList kids = GetAllDecendentReaderKeys(rf.readerKey); for (int i = 0; i < kids.Count; i++) { ReaderField kid = new ReaderField(kids[i], rf.fieldName); if (badChildren.ContainsKey(kid)) { // we've already process this kid as RF and found other problems // track those problems as our own badKids.Put(rf, kid); badKids.PutAll(rf, badChildren[kid]); badChildren.Remove(kid); } else if (rfToValIdSets.ContainsKey(kid)) { // we have cache entries for the kid badKids.Put(rf, kid); } if (!seen.ContainsKey(kid)) { seen.Add(kid, kid); } } if (!seen.ContainsKey(rf)) { seen.Add(rf, rf); } } // every mapping in badKids represents an Insanity foreach (ReaderField parent in badChildren.Keys) { Dictionary<ReaderField,ReaderField> kids = badChildren[parent]; List<CacheEntry> badEntries = new List<CacheEntry>(kids.Count * 2); // put parent entr(ies) in first { foreach (int val in rfToValIdSets[parent].Keys) { badEntries.AddRange(viToItemSets[val].Keys); } } // now the entries for the descendants foreach (ReaderField kid in kids.Keys) { foreach (int val in rfToValIdSets[kid].Keys) { badEntries.AddRange(viToItemSets[val].Keys); } } insanity.Add(new Insanity(InsanityType.SUBREADER, "Found caches for decendents of " + parent.ToString(), badEntries.ToArray())); } return insanity; }
/// <summary> Tests a CacheEntry[] for indication of "insane" cache usage. /// <p/> /// NOTE:FieldCache CreationPlaceholder objects are ignored. /// (:TODO: is this a bad idea? are we masking a real problem?) /// <p/> /// </summary> public Insanity[] Check(CacheEntry[] cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.Length) return new Insanity[0]; if (null != ramCalc) { for (int i = 0; i < cacheEntries.Length; i++) { cacheEntries[i].EstimateSize(ramCalc); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances MapOfSets<int,CacheEntry> valIdToItems = new MapOfSets<int,CacheEntry>(new Dictionary<int,Dictionary<CacheEntry,CacheEntry>>(17)); // maps ReaderField keys to Sets of ValueIds MapOfSets<ReaderField,int> readerFieldToValIds = new MapOfSets<ReaderField,int>(new Dictionary<ReaderField,Dictionary<int,int>>(17)); // // any keys that we know result in more then one valId // TODO: This will be a HashSet<T> when we start using .NET Framework 3.5 Dictionary<ReaderField, ReaderField> valMismatchKeys = new Dictionary<ReaderField, ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.Length; i++) { CacheEntry item = cacheEntries[i]; System.Object val = item.GetValue(); if (val is Mono.Lucene.Net.Search.CreationPlaceholder) continue; ReaderField rf = new ReaderField(item.GetReaderKey(), item.GetFieldName()); System.Int32 valId = val.GetHashCode(); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.Put(valId, item); if (1 < readerFieldToValIds.Put(rf, valId)) { if (!valMismatchKeys.ContainsKey(rf)) { valMismatchKeys.Add(rf, rf); } } } List<Insanity> insanity = new List<Insanity>(valMismatchKeys.Count * 3); insanity.AddRange(CheckValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.AddRange(CheckSubreaders(valIdToItems, readerFieldToValIds)); return insanity.ToArray(); }
/// <summary> Internal helper method used by check that iterates over /// the keys of readerFieldToValIds and generates a Collection /// of Insanity instances whenever two (or more) ReaderField instances are /// found that have an ancestery relationships. /// /// </summary> /// <seealso cref="InsanityType.SUBREADER"> /// </seealso> private List <Insanity> CheckSubreaders(MapOfSets <int, CacheEntry> valIdToItems, MapOfSets <ReaderField, int> readerFieldToValIds) { List <Insanity> insanity = new List <Insanity>(23); Dictionary <ReaderField, HashSet <ReaderField> > badChildren = new Dictionary <ReaderField, HashSet <ReaderField> >(17); MapOfSets <ReaderField, ReaderField> badKids = new MapOfSets <ReaderField, ReaderField>(badChildren); // wrapper IDictionary <int, HashSet <CacheEntry> > viToItemSets = valIdToItems.Map; IDictionary <ReaderField, HashSet <int> > rfToValIdSets = readerFieldToValIds.Map; HashSet <ReaderField> seen = new HashSet <ReaderField>(); foreach (ReaderField rf in rfToValIdSets.Keys) { if (seen.Contains(rf)) { continue; } System.Collections.IList kids = GetAllDecendentReaderKeys(rf.readerKey); foreach (Object kidKey in kids) { ReaderField kid = new ReaderField(kidKey, rf.fieldName); if (badChildren.ContainsKey(kid)) { // we've already process this kid as RF and found other problems // track those problems as our own badKids.Put(rf, kid); badKids.PutAll(rf, badChildren[kid]); badChildren.Remove(kid); } else if (rfToValIdSets.ContainsKey(kid)) { // we have cache entries for the kid badKids.Put(rf, kid); } seen.Add(kid); } seen.Add(rf); } // every mapping in badKids represents an Insanity foreach (ReaderField parent in badChildren.Keys) { HashSet <ReaderField> kids = badChildren[parent]; List <CacheEntry> badEntries = new List <CacheEntry>(kids.Count * 2); // put parent entr(ies) in first { foreach (int val in rfToValIdSets[parent]) { badEntries.AddRange(viToItemSets[val]); } } // now the entries for the descendants foreach (ReaderField kid in kids) { foreach (int val in rfToValIdSets[kid]) { badEntries.AddRange(viToItemSets[val]); } } insanity.Add(new Insanity(InsanityType.SUBREADER, "Found caches for decendents of " + parent.ToString(), badEntries.ToArray())); } return(insanity); }
/// <summary> /// Internal helper method used by check that iterates over /// the keys of <paramref name="readerFieldToValIds"/> and generates a <see cref="ICollection{T}"/> /// of <see cref="Insanity"/> instances whenever two (or more) <see cref="ReaderField"/> instances are /// found that have an ancestry relationships. /// </summary> /// <seealso cref="InsanityType.SUBREADER"/> private static ICollection <Insanity> CheckSubreaders(MapOfSets <int, FieldCache.CacheEntry> valIdToItems, MapOfSets <ReaderField, int> readerFieldToValIds) // LUCENENET: CA1822: Mark members as static { List <Insanity> insanity = new List <Insanity>(23); Dictionary <ReaderField, ISet <ReaderField> > badChildren = new Dictionary <ReaderField, ISet <ReaderField> >(17); MapOfSets <ReaderField, ReaderField> badKids = new MapOfSets <ReaderField, ReaderField>(badChildren); // wrapper IDictionary <int, ISet <FieldCache.CacheEntry> > viToItemSets = valIdToItems.Map; IDictionary <ReaderField, ISet <int> > rfToValIdSets = readerFieldToValIds.Map; HashSet <ReaderField> seen = new HashSet <ReaderField>(); //IDictionary<ReaderField, ISet<int>>.KeyCollection readerFields = rfToValIdSets.Keys; foreach (ReaderField rf in rfToValIdSets.Keys) { if (seen.Contains(rf)) { continue; } IList <object> kids = GetAllDescendantReaderKeys(rf.ReaderKey); foreach (object kidKey in kids) { ReaderField kid = new ReaderField(kidKey, rf.FieldName); // LUCENENET: Eliminated extra lookup by using TryGetValue instead of ContainsKey if (badChildren.TryGetValue(kid, out ISet <ReaderField> badKid)) { // we've already process this kid as RF and found other problems // track those problems as our own badKids.Put(rf, kid); badKids.PutAll(rf, badKid); badChildren.Remove(kid); } else if (rfToValIdSets.ContainsKey(kid)) { // we have cache entries for the kid badKids.Put(rf, kid); } seen.Add(kid); } seen.Add(rf); } // every mapping in badKids represents an Insanity foreach (ReaderField parent in badChildren.Keys) { ISet <ReaderField> kids = badChildren[parent]; List <FieldCache.CacheEntry> badEntries = new List <FieldCache.CacheEntry>(kids.Count * 2); // put parent entr(ies) in first { foreach (int value in rfToValIdSets[parent]) { badEntries.AddRange(viToItemSets[value]); } } // now the entries for the descendants foreach (ReaderField kid in kids) { foreach (int value in rfToValIdSets[kid]) { badEntries.AddRange(viToItemSets[value]); } } FieldCache.CacheEntry[] badness = badEntries.ToArray(); insanity.Add(new Insanity(InsanityType.SUBREADER, "Found caches for descendants of " + parent.ToString(), badness)); } return(insanity); }
/// <summary> /// Tests a CacheEntry[] for indication of "insane" cache usage. /// <para> /// <b>NOTE:</b>FieldCache CreationPlaceholder objects are ignored. /// (:TODO: is this a bad idea? are we masking a real problem?) /// </para> /// </summary> public Insanity[] Check(params FieldCache.CacheEntry[] cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.Length) { return(Arrays.Empty <Insanity>()); } if (estimateRam) { for (int i = 0; i < cacheEntries.Length; i++) { cacheEntries[i].EstimateSize(); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances MapOfSets <int, FieldCache.CacheEntry> valIdToItems = new MapOfSets <int, FieldCache.CacheEntry>(new Dictionary <int, ISet <FieldCache.CacheEntry> >(17)); // maps ReaderField keys to Sets of ValueIds MapOfSets <ReaderField, int> readerFieldToValIds = new MapOfSets <ReaderField, int>(new Dictionary <ReaderField, ISet <int> >(17)); // any keys that we know result in more then one valId ISet <ReaderField> valMismatchKeys = new JCG.HashSet <ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.Length; i++) { FieldCache.CacheEntry item = cacheEntries[i]; object val = item.Value; // It's OK to have dup entries, where one is eg // float[] and the other is the Bits (from // getDocWithField()) if (val is IBits) { continue; } if (val is FieldCache.ICreationPlaceholder) { continue; } ReaderField rf = new ReaderField(item.ReaderKey, item.FieldName); int valId = RuntimeHelpers.GetHashCode(val); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.Put(valId, item); if (1 < readerFieldToValIds.Put(rf, valId)) { valMismatchKeys.Add(rf); } } List <Insanity> insanity = new List <Insanity>(valMismatchKeys.Count * 3); insanity.AddRange(CheckValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.AddRange(CheckSubreaders(valIdToItems, readerFieldToValIds)); return(insanity.ToArray()); }
/// <summary> Tests a CacheEntry[] for indication of "insane" cache usage. /// <p/> /// NOTE:FieldCache CreationPlaceholder objects are ignored. /// (:TODO: is this a bad idea? are we masking a real problem?) /// <p/> /// </summary> public Insanity[] Check(CacheEntry[] cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.Length) { return(new Insanity[0]); } if (null != ramCalc) { for (int i = 0; i < cacheEntries.Length; i++) { cacheEntries[i].EstimateSize(ramCalc); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances MapOfSets <int, CacheEntry> valIdToItems = new MapOfSets <int, CacheEntry>(new Dictionary <int, Dictionary <CacheEntry, CacheEntry> >(17)); // maps ReaderField keys to Sets of ValueIds MapOfSets <ReaderField, int> readerFieldToValIds = new MapOfSets <ReaderField, int>(new Dictionary <ReaderField, Dictionary <int, int> >(17)); // // any keys that we know result in more then one valId // TODO: This will be a HashSet<T> when we start using .NET Framework 3.5 Dictionary <ReaderField, ReaderField> valMismatchKeys = new Dictionary <ReaderField, ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.Length; i++) { CacheEntry item = cacheEntries[i]; System.Object val = item.GetValue(); if (val is Mono.Lucene.Net.Search.CreationPlaceholder) { continue; } ReaderField rf = new ReaderField(item.GetReaderKey(), item.GetFieldName()); System.Int32 valId = val.GetHashCode(); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.Put(valId, item); if (1 < readerFieldToValIds.Put(rf, valId)) { if (!valMismatchKeys.ContainsKey(rf)) { valMismatchKeys.Add(rf, rf); } } } List <Insanity> insanity = new List <Insanity>(valMismatchKeys.Count * 3); insanity.AddRange(CheckValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.AddRange(CheckSubreaders(valIdToItems, readerFieldToValIds)); return(insanity.ToArray()); }
/// <summary> /// Tests a CacheEntry[] for indication of "insane" cache usage. /// <p> /// <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored. /// (:TODO: is this a bad idea? are we masking a real problem?) /// </p> /// </summary> public Insanity[] Check(params FieldCache.CacheEntry[] cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.Length) { return new Insanity[0]; } if (EstimateRam) { for (int i = 0; i < cacheEntries.Length; i++) { cacheEntries[i].EstimateSize(); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances MapOfSets<int, FieldCache.CacheEntry> valIdToItems = new MapOfSets<int, FieldCache.CacheEntry>(new Dictionary<int, HashSet<FieldCache.CacheEntry>>(17)); // maps ReaderField keys to Sets of ValueIds MapOfSets<ReaderField, int> readerFieldToValIds = new MapOfSets<ReaderField, int>(new Dictionary<ReaderField, HashSet<int>>(17)); // any keys that we know result in more then one valId ISet<ReaderField> valMismatchKeys = new HashSet<ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.Length; i++) { FieldCache.CacheEntry item = cacheEntries[i]; object val = item.Value; // It's OK to have dup entries, where one is eg // float[] and the other is the Bits (from // getDocWithField()) if (val is Bits) { continue; } if (val is Lucene.Net.Search.FieldCache.CreationPlaceholder) { continue; } ReaderField rf = new ReaderField(item.ReaderKey, item.FieldName); int valId = val.GetHashCode(); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.Put(valId, item); if (1 < readerFieldToValIds.Put(rf, valId)) { valMismatchKeys.Add(rf); } } List<Insanity> insanity = new List<Insanity>(valMismatchKeys.Count * 3); insanity.AddRange(CheckValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.AddRange(CheckSubreaders(valIdToItems, readerFieldToValIds)); return insanity.ToArray(); }
/// <summary> /// Internal helper method used by check that iterates over /// the keys of readerFieldToValIds and generates a Collection /// of Insanity instances whenever two (or more) ReaderField instances are /// found that have an ancestry relationships. /// </summary> /// <seealso cref= InsanityType#SUBREADER </seealso> private ICollection<Insanity> CheckSubreaders(MapOfSets<int, FieldCache.CacheEntry> valIdToItems, MapOfSets<ReaderField, int> readerFieldToValIds) { List<Insanity> insanity = new List<Insanity>(23); Dictionary<ReaderField, HashSet<ReaderField>> badChildren = new Dictionary<ReaderField, HashSet<ReaderField>>(17); MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper IDictionary<int, HashSet<FieldCache.CacheEntry>> viToItemSets = valIdToItems.Map; IDictionary<ReaderField, HashSet<int>> rfToValIdSets = readerFieldToValIds.Map; HashSet<ReaderField> seen = new HashSet<ReaderField>(); //IDictionary<ReaderField, ISet<int>>.KeyCollection readerFields = rfToValIdSets.Keys; foreach (ReaderField rf in rfToValIdSets.Keys) { if (seen.Contains(rf)) { continue; } IList<object> kids = GetAllDescendantReaderKeys(rf.ReaderKey); foreach (object kidKey in kids) { ReaderField kid = new ReaderField(kidKey, rf.FieldName); if (badChildren.ContainsKey(kid)) { // we've already process this kid as RF and found other problems // track those problems as our own badKids.Put(rf, kid); badKids.PutAll(rf, badChildren[kid]); badChildren.Remove(kid); } else if (rfToValIdSets.ContainsKey(kid)) { // we have cache entries for the kid badKids.Put(rf, kid); } seen.Add(kid); } seen.Add(rf); } // every mapping in badKids represents an Insanity foreach (ReaderField parent in badChildren.Keys) { HashSet<ReaderField> kids = badChildren[parent]; List<FieldCache.CacheEntry> badEntries = new List<FieldCache.CacheEntry>(kids.Count * 2); // put parent entr(ies) in first { foreach (int value in rfToValIdSets[parent]) { badEntries.AddRange(viToItemSets[value]); } } // now the entries for the descendants foreach (ReaderField kid in kids) { foreach (int value in rfToValIdSets[kid]) { badEntries.AddRange(viToItemSets[value]); } } FieldCache.CacheEntry[] badness = new FieldCache.CacheEntry[badEntries.Count]; badness = badEntries.ToArray();//LUCENE TO-DO had param of badness first insanity.Add(new Insanity(InsanityType.SUBREADER, "Found caches for descendants of " + parent.ToString(), badness)); } return insanity; }