/// <summary> /// Figure out what possible segment combinations are before showing the window. /// </summary> protected override void OnShow() { var options = new List <Midpoint>(); List <GroundedClause> current = new List <GroundedClause>(); //Populate current midpoint givens. foreach (GroundedClause gc in currentGivens) { Midpoint m = gc as Midpoint; if (m != null) { current.Add(m); } } //Each inmiddle that can be strengthened to a midpoint that is not already a given is an option. foreach (InMiddle im in parser.backendParser.implied.inMiddles) { Strengthened s = im.CanBeStrengthened(); if (s != null) { Midpoint m = s.strengthened as Midpoint; if (!StructurallyContains(current, m)) { options.Add(m); } } } optionsBox.ItemsSource = null; //Makes sure the box is graphically updated. optionsBox.ItemsSource = options; }
public override string ToString() { return(MarketId.ToString() + "," + MarketOffDatetime.ToString("yyyy-MM-dd HH:mm:ss") + "," + SnapshotDatetime.ToString("yyyy-MM-dd HH:mm:ss") + "," + SecondsBeforeOff.ToString() + "," + SelectionId.ToString() + "," + SelectionName.ToString() + "," + (Back3 == 0 ? @"\N" : Back3.ToString()) + "," + (Back3Vol == 0 ? @"\N" : Back3Vol.ToString()) + "," + (Back2 == 0 ? @"\N" : Back2.ToString()) + "," + (Back2Vol == 0 ? @"\N" : Back2Vol.ToString()) + "," + (Back == 0 ? @"\N" : Back.ToString()) + "," + (BackVol == 0 ? @"\N" : BackVol.ToString()) + "," + (Lay == 0 ? @"\N" : Lay.ToString()) + "," + (LayVol == 0 ? @"\N" : LayVol.ToString()) + "," + (Lay2 == 0 ? @"\N" : Lay2.ToString()) + "," + (Lay2Vol == 0 ? @"\N" : Lay2Vol.ToString()) + "," + (Lay3 == 0 ? @"\N" : Lay3.ToString()) + "," + (Lay3Vol == 0 ? @"\N" : Lay3Vol.ToString()) + "," + (Trade ? "1" : "0") + "," + (LastTradedPrice == 0 ? @"\N" : LastTradedPrice.ToString()) + "," + CumulTradedVolSelection.ToString() + "," + CumulTradedVolMarket.ToString() + "," + (BecomesNonRunner ? "1" : "0") + "," + (ReductionFactorToApply > 0 ? ReductionFactorToApply.ToString() : @"\N") + "," + (Midpoint == 0 ? @"\N" : Midpoint.ToString()) + "," + (WeightedAverage == 0 ? @"\N" : WeightedAverage.ToString()) ); }
private static List <EdgeAggregator> InstantiateFromMidpoint(InMiddle im, Midpoint midpt, GroundedClause original) { List <EdgeAggregator> newGrounded = new List <EdgeAggregator>(); // Does this ImMiddle apply to this midpoint? if (!im.point.StructurallyEquals(midpt.point)) { return(newGrounded); } if (!im.segment.StructurallyEquals(midpt.segment)) { return(newGrounded); } // For hypergraph List <GroundedClause> antecedent = Utilities.MakeList <GroundedClause>(original); // Backward: Midpoint(M, Segment(A, B)) -> InMiddle(A, M, B) newGrounded.Add(new EdgeAggregator(antecedent, im, annotation)); // // Forward: Midpoint(M, Segment(A, B)) -> Congruent(Segment(A,M), Segment(M,B)) // Segment left = new Segment(midpt.segment.Point1, midpt.point); Segment right = new Segment(midpt.point, midpt.segment.Point2); GeometricCongruentSegments ccss = new GeometricCongruentSegments(left, right); newGrounded.Add(new EdgeAggregator(antecedent, ccss, annotation)); return(newGrounded); }
private Midpoint[] PopulateMidpoints(int depth) { if (depth > 31) { throw new ArgumentOutOfRangeException("depth", "Depth too for midpoints."); } if (Chunk.ChunkFooter.MapCount == 0) // empty chunk { return(null); } ReaderWorkItem workItem = null; try { workItem = Chunk.GetReaderWorkItem(); int midPointsCnt = 1 << depth; int segmentSize; Midpoint[] midpoints; var mapCount = Chunk.ChunkFooter.MapCount; if (mapCount < midPointsCnt) { segmentSize = 1; // we cache all items midpoints = new Midpoint[mapCount]; } else { segmentSize = mapCount / midPointsCnt; midpoints = new Midpoint[1 + (mapCount + segmentSize - 1) / segmentSize]; } for (int x = 0, i = 0, xN = mapCount - 1; x < xN; x += segmentSize, i += 1) { midpoints[i] = new Midpoint(x, ReadPosMap(workItem, x)); } // add the very last item as the last midpoint (possibly it is done twice) midpoints[midpoints.Length - 1] = new Midpoint(mapCount - 1, ReadPosMap(workItem, mapCount - 1)); return(midpoints); } catch (FileBeingDeletedException) { return(null); } catch (OutOfMemoryException) { return(null); } finally { if (workItem != null) { Chunk.ReturnReaderWorkItem(workItem); } } }
public ElasticPlasticMaterial(String stressState) : base(stressState) { if (stressState.Equals("plstress")) { UTIL.errorMsg("Elastic-plastic material is not implemented for plane stress"); } if (!FE.epIntegrationTANGENT) { midpoint = new Midpoint(this); } }
public override void AlignActiveValues() { AccelTypeActiveValue.Align(); Acceleration.AlignActiveValues(); Scale.AlignActiveValues(); Cap.AlignActiveValues(); Offset.AlignActiveValues(); Weight.AlignActiveValues(); Limit.AlignActiveValues(); Exponent.AlignActiveValues(); Midpoint.AlignActiveValues(); }
private Midpoint[] PopulateCache(int depth) { if (Count == 0) { throw new InvalidOperationException("Empty PTable."); } var buffer = new byte[16]; var handle = GCHandle.Alloc(buffer, GCHandleType.Pinned); FileStream stream = null; try { stream = GetFileStream(); int midPointsCnt = 1 << depth; int segmentSize; Midpoint[] midpoints; if (Count < midPointsCnt) { segmentSize = 1; // we cache all items midpoints = new Midpoint[Count]; } else { segmentSize = Count / midPointsCnt; midpoints = new Midpoint[1 + (Count + segmentSize - 1) / segmentSize]; } for (int x = 0, i = 0, xN = Count - 1; x < xN; x += segmentSize, i += 1) { stream.Seek(PTableHeader.Size + (((long)x) << 4), SeekOrigin.Begin); var record = ReadNext(stream, buffer, handle); midpoints[i] = new Midpoint(record.Key, x); } // add the very last item as the last midpoint (possibly it is done twice) { stream.Seek(PTableHeader.Size + (((long)(Count - 1)) << 4), SeekOrigin.Begin); var record = ReadNext(stream, buffer, handle); midpoints[midpoints.Length - 1] = new Midpoint(record.Key, Count - 1); } return(midpoints); } finally { if (stream != null) { ReturnStream(stream); } handle.Free(); } }
// // Midpoint(M, Segment(A, B)) -> InMiddle(A, M, B) // Midpoint(M, Segment(A, B)) -> Congruent(Segment(A,M), Segment(M,B)); This implies: AM = MB // private static List <EdgeAggregator> InstantiateFromMidpoint(GroundedClause clause) { List <EdgeAggregator> newGrounded = new List <EdgeAggregator>(); if (clause is InMiddle && !(clause is Midpoint)) { InMiddle inMid = clause as InMiddle; foreach (Midpoint midpt in candidateMidpoint) { newGrounded.AddRange(InstantiateFromMidpoint(inMid, midpt, midpt)); } foreach (Strengthened streng in candidateStrengthened) { newGrounded.AddRange(InstantiateFromMidpoint(inMid, streng.strengthened as Midpoint, streng)); } candidateInMiddle.Add(inMid); } else if (clause is Midpoint) { Midpoint midpt = clause as Midpoint; foreach (InMiddle im in candidateInMiddle) { newGrounded.AddRange(InstantiateFromMidpoint(im, midpt, midpt)); } candidateMidpoint.Add(midpt); } else if (clause is Strengthened) { Strengthened streng = clause as Strengthened; if (!(streng.strengthened is Midpoint)) { return(newGrounded); } foreach (InMiddle im in candidateInMiddle) { newGrounded.AddRange(InstantiateFromMidpoint(im, streng.strengthened as Midpoint, streng)); } candidateStrengthened.Add(streng); } return(newGrounded); }
public override void Hide() { AccelDropdown.Hide(); AccelTypeActiveValue.Hide(); Acceleration.Hide(); Scale.Hide(); Cap.Hide(); Weight.Hide(); Offset.Hide(); Limit.Hide(); Exponent.Hide(); Midpoint.Hide(); }
public void SetActiveValues(int index, AccelArgs args) { AccelerationType = AccelerationTypes.Where(t => t.Value.Index == index).FirstOrDefault().Value; AccelTypeActiveValue.SetValue(AccelerationType.Name); AccelDropdown.SelectedIndex = AccelerationType.Index; Weight.SetActiveValue(args.weight); Cap.SetActiveValues(args.gainCap, args.scaleCap, args.gainCap > 0 || args.scaleCap <= 0); Offset.SetActiveValue(args.offset, args.legacyOffset); Acceleration.SetActiveValue(args.acceleration); Scale.SetActiveValue(args.scale); Limit.SetActiveValue(args.limit); Exponent.SetActiveValue(args.exponent); Midpoint.SetActiveValue(args.midpoint); }
internal Midpoint[] CacheMidpoints(int depth) { if (depth < 0 || depth > 30) { throw new ArgumentOutOfRangeException("depth"); } var count = Count; if (count == 0 || depth == 0) { return(null); } //TODO GFY can make slightly faster with a sequential worker. var workItem = GetWorkItem(); try { int midpointsCount; Midpoint[] midpoints; try { midpointsCount = Math.Max(2, Math.Min(1 << depth, count)); midpoints = new Midpoint[midpointsCount]; } catch (OutOfMemoryException exc) { throw new PossibleToHandleOutOfMemoryException("Failed to allocate memory for Midpoint cache.", exc); } workItem.Stream.Position = PTableHeader.Size; for (int k = 0; k < midpointsCount; ++k) { var nextIndex = (long)k * (count - 1) / (midpointsCount - 1); ReadUntil(PTableHeader.Size + IndexEntrySize * nextIndex, workItem.Stream); midpoints[k] = new Midpoint(ReadNextNoSeek(workItem).Key, (int)nextIndex); } return(midpoints); } finally { ReturnWorkItem(workItem); } }
private Midpoint[] PopulateMidpoints(int depth) { if (!_isReadonly || _chunkFooter.MapSize == 0) { return(null); } var workItem = GetReaderWorkItem(); try { int midPointsCnt = 1 << depth; int segmentSize; Midpoint[] midpoints; var mapCount = _chunkFooter.MapCount; if (mapCount < midPointsCnt) { segmentSize = 1; // we cache all items midpoints = new Midpoint[mapCount]; } else { segmentSize = mapCount / midPointsCnt; midpoints = new Midpoint[1 + (mapCount + segmentSize - 1) / segmentSize]; } for (int x = 0, i = 0, xN = mapCount - 1; x < xN; x += segmentSize, i += 1) { midpoints[i] = new Midpoint(x, ReadPosMap(workItem, x)); } // add the very last item as the last midpoint (possibly it is done twice) midpoints[midpoints.Length - 1] = new Midpoint(mapCount - 1, ReadPosMap(workItem, mapCount - 1)); return(midpoints); } finally { ReturnReaderWorkItem(workItem); } }
private Midpoint[] PopulateMidpoints(int depth) { if (depth > 31) throw new ArgumentOutOfRangeException("depth", "Too large depth for midpoints."); ReaderWorkItem workItem = null; try { workItem = Chunk.GetReaderWorkItem(); int midPointsCnt = 1 << depth; int segmentSize; Midpoint[] midpoints; var mapCount = Chunk.ChunkFooter.MapCount; if (mapCount < midPointsCnt) { segmentSize = 1; // we cache all items midpoints = new Midpoint[mapCount]; } else { segmentSize = mapCount / midPointsCnt; midpoints = new Midpoint[1 + (mapCount + segmentSize - 1) / segmentSize]; } for (int x = 0, i = 0, xN = mapCount - 1; x < xN; x += segmentSize, i += 1) { midpoints[i] = new Midpoint(x, ReadPosMap(workItem, x)); } // add the very last item as the last midpoint (possibly it is done twice) midpoints[midpoints.Length - 1] = new Midpoint(mapCount - 1, ReadPosMap(workItem, mapCount - 1)); return midpoints; } catch (FileBeingDeletedException) { return null; } catch (OutOfMemoryException) { return null; } finally { if (workItem != null) Chunk.ReturnReaderWorkItem(workItem); } }
private static void AppendMidpointRecordTo(Stream stream, byte[] buffer, byte version, Midpoint midpointEntry, int midpointEntrySize) { if (version >= PTableVersions.IndexV4) { ulong eventStream = midpointEntry.Key.Stream; long eventVersion = midpointEntry.Key.Version; long itemIndex = midpointEntry.ItemIndex; for (int i = 0; i < 8; i++) { buffer[i] = (byte)(eventVersion & 0xFF); eventVersion >>= 8; } for (int i = 0; i < 8; i++) { buffer[i + 8] = (byte)(eventStream & 0xFF); eventStream >>= 8; } for (int i = 0; i < 8; i++) { buffer[i + 16] = (byte)(itemIndex & 0xFF); itemIndex >>= 8; } stream.Write(buffer, 0, midpointEntrySize); } }
private Midpoint[] PopulateMidpoints(int depth) { if (!_isReadonly || _chunkFooter.MapSize == 0) return null; var workItem = GetReaderWorkItem(); try { int midPointsCnt = 1 << depth; int segmentSize; Midpoint[] midpoints; var mapCount = _chunkFooter.MapCount; if (mapCount < midPointsCnt) { segmentSize = 1; // we cache all items midpoints = new Midpoint[mapCount]; } else { segmentSize = mapCount / midPointsCnt; midpoints = new Midpoint[1 + (mapCount + segmentSize - 1) / segmentSize]; } for (int x = 0, i = 0, xN = mapCount - 1; x < xN; x += segmentSize, i += 1) { midpoints[i] = new Midpoint(x, ReadPosMap(workItem, x)); } // add the very last item as the last midpoint (possibly it is done twice) midpoints[midpoints.Length - 1] = new Midpoint(mapCount - 1, ReadPosMap(workItem, mapCount - 1)); return midpoints; } finally { ReturnReaderWorkItem(workItem); } }
private static Range LocatePosRange(Midpoint[] midpoints, long pos) { int lowerMidpoint = LowerMidpointBound(midpoints, pos); int upperMidpoint = UpperMidpointBound(midpoints, pos); return new Range(midpoints[lowerMidpoint].ItemIndex, midpoints[upperMidpoint].ItemIndex); }
/// <summary> /// Returns the index of upper midpoint for given logical position. /// Assumes it always exist. /// </summary> private static int UpperMidpointBound(Midpoint[] midpoints, int pos) { int l = 0; int r = midpoints.Length - 1; while (l < r) { int m = l + (r - l) / 2; if (midpoints[m].LogPos >= pos) r = m; else l = m + 1; } return l; }
private static Tuple<int, int> LocatePosRange(Midpoint[] midpoints, int pos) { int lowerMidpoint = LowerMidpointBound(midpoints, pos); int upperMidpoint = UpperMidpointBound(midpoints, pos); return Tuple.Create(midpoints[lowerMidpoint].ItemIndex, midpoints[upperMidpoint].ItemIndex); }
private long LowerMidpointBound(Midpoint[] midpoints, IndexEntryKey key) { long l = 0; long r = midpoints.Length - 1; while (l < r) { long m = l + (r - l + 1) / 2; if (midpoints[m].Key.GreaterThan(key)) l = m; else r = m - 1; } return l; }
private long UpperMidpointBound(Midpoint[] midpoints, IndexEntryKey key) { long l = 0; long r = midpoints.Length - 1; while (l < r) { long m = l + (r - l) / 2; if (midpoints[m].Key.SmallerThan(key)) r = m; else l = m + 1; } return r; }
private int LowerMidpointBound(Midpoint[] midpoints, ulong stream) { int l = 0; int r = midpoints.Length - 1; while (l < r) { int m = l + (r - l + 1) / 2; if (midpoints[m].Key > stream) l = m; else r = m - 1; } return l; }
private void Awake() { instance = this; }
internal Midpoint[] CacheMidpointsAndVerifyHash(int depth) { var buffer = new byte[4096]; if (depth < 0 || depth > 30) { throw new ArgumentOutOfRangeException("depth"); } var count = Count; if (count == 0 || depth == 0) { return(null); } #if __MonoCS__ var workItem = GetWorkItem(); var stream = workItem.Stream; try { #else using (var stream = UnbufferedFileStream.Create(_filename, FileMode.Open, FileAccess.Read, FileShare.Read, false, 4096, 4096, false, 4096)) { #endif try { int midpointsCount; Midpoint[] midpoints; using (MD5 md5 = MD5.Create()) { try { midpointsCount = (int)Math.Max(2L, Math.Min((long)1 << depth, count)); midpoints = new Midpoint[midpointsCount]; } catch (OutOfMemoryException exc) { throw new PossibleToHandleOutOfMemoryException("Failed to allocate memory for Midpoint cache.", exc); } stream.Seek(0, SeekOrigin.Begin); stream.Read(buffer, 0, PTableHeader.Size); md5.TransformBlock(buffer, 0, PTableHeader.Size, null, 0); long previousNextIndex = long.MinValue; var previousKey = new IndexEntryKey(long.MaxValue, int.MaxValue); for (long k = 0; k < midpointsCount; ++k) { var nextIndex = (long)k * (count - 1) / (midpointsCount - 1); if (previousNextIndex != nextIndex) { ReadUntilWithMd5(PTableHeader.Size + _indexEntrySize * nextIndex, stream, md5); stream.Read(buffer, 0, _indexKeySize); md5.TransformBlock(buffer, 0, _indexKeySize, null, 0); IndexEntryKey key; if (_version == PTableVersions.Index32Bit) { key = new IndexEntryKey(BitConverter.ToUInt32(buffer, 4), BitConverter.ToInt32(buffer, 0)); } else { key = new IndexEntryKey(BitConverter.ToUInt64(buffer, 4), BitConverter.ToInt32(buffer, 0)); } midpoints[k] = new Midpoint(key, nextIndex); previousNextIndex = nextIndex; previousKey = key; } else { midpoints[k] = new Midpoint(previousKey, previousNextIndex); } } ReadUntilWithMd5(stream.Length - MD5Size, stream, md5); //verify hash (should be at stream.length - MD5Size) md5.TransformFinalBlock(Empty.ByteArray, 0, 0); var fileHash = new byte[MD5Size]; stream.Read(fileHash, 0, MD5Size); ValidateHash(md5.Hash, fileHash); return(midpoints); } } catch { Dispose(); throw; } } #if __MonoCS__ finally { ReturnWorkItem(workItem); } #endif }
private int TranslateExactWithMidpoints(ReaderWorkItem workItem, Midpoint[] midpoints, int pos) { if (pos < midpoints[0].LogPos || pos > midpoints[midpoints.Length - 1].LogPos) return -1; var recordRange = LocatePosRange(midpoints, pos); return TranslateExactWithoutMidpoints(workItem, pos, recordRange.Item1, recordRange.Item2); }
internal Midpoint[] CacheMidpointsAndVerifyHash(int depth, bool skipIndexVerify) { var buffer = new byte[4096]; if (depth < 0 || depth > 30) { throw new ArgumentOutOfRangeException("depth"); } var count = Count; if (count == 0 || depth == 0) { return(null); } if (skipIndexVerify) { Log.Debug("Disabling Verification of PTable"); } Stream stream = null; WorkItem workItem = null; if (Runtime.IsUnixOrMac) { workItem = GetWorkItem(); stream = workItem.Stream; } else { stream = UnbufferedFileStream.Create(_filename, FileMode.Open, FileAccess.Read, FileShare.Read, false, 4096, 4096, false, 4096); } try { int midpointsCount; Midpoint[] midpoints; using (MD5 md5 = MD5.Create()) { try { midpointsCount = (int)Math.Max(2L, Math.Min((long)1 << depth, count)); midpoints = new Midpoint[midpointsCount]; } catch (OutOfMemoryException exc) { throw new PossibleToHandleOutOfMemoryException("Failed to allocate memory for Midpoint cache.", exc); } if (skipIndexVerify && (_version >= PTableVersions.IndexV4)) { if (_midpointsCached == midpointsCount) { //index verification is disabled and cached midpoints with the same depth requested are available //so, we can load them directly from the PTable file Log.Debug("Loading {midpointsCached} cached midpoints from PTable", _midpointsCached); long startOffset = stream.Length - MD5Size - PTableFooter.GetSize(_version) - _midpointsCacheSize; stream.Seek(startOffset, SeekOrigin.Begin); for (uint k = 0; k < _midpointsCached; k++) { stream.Read(buffer, 0, _indexEntrySize); IndexEntryKey key; long index; if (_version == PTableVersions.IndexV4) { key = new IndexEntryKey(BitConverter.ToUInt64(buffer, 8), BitConverter.ToInt64(buffer, 0)); index = BitConverter.ToInt64(buffer, 8 + 8); } else { throw new InvalidOperationException("Unknown PTable version: " + _version); } midpoints[k] = new Midpoint(key, index); if (k > 0) { if (midpoints[k].Key.GreaterThan(midpoints[k - 1].Key)) { throw new CorruptIndexException(String.Format( "Index entry key for midpoint {0} (stream: {1}, version: {2}) < index entry key for midpoint {3} (stream: {4}, version: {5})", k - 1, midpoints[k - 1].Key.Stream, midpoints[k - 1].Key.Version, k, midpoints[k].Key.Stream, midpoints[k].Key.Version)); } else if (midpoints[k - 1].ItemIndex > midpoints[k].ItemIndex) { throw new CorruptIndexException(String.Format( "Item index for midpoint {0} ({1}) > Item index for midpoint {2} ({3})", k - 1, midpoints[k - 1].ItemIndex, k, midpoints[k].ItemIndex)); } } } return(midpoints); } else { Log.Debug( "Skipping loading of cached midpoints from PTable due to count mismatch, cached midpoints: {midpointsCached} / required midpoints: {midpointsCount}", _midpointsCached, midpointsCount); } } if (!skipIndexVerify) { stream.Seek(0, SeekOrigin.Begin); stream.Read(buffer, 0, PTableHeader.Size); md5.TransformBlock(buffer, 0, PTableHeader.Size, null, 0); } long previousNextIndex = long.MinValue; var previousKey = new IndexEntryKey(long.MaxValue, long.MaxValue); for (long k = 0; k < midpointsCount; ++k) { long nextIndex = GetMidpointIndex(k, count, midpointsCount); if (previousNextIndex != nextIndex) { if (!skipIndexVerify) { ReadUntilWithMd5(PTableHeader.Size + _indexEntrySize * nextIndex, stream, md5); stream.Read(buffer, 0, _indexKeySize); md5.TransformBlock(buffer, 0, _indexKeySize, null, 0); } else { stream.Seek(PTableHeader.Size + _indexEntrySize * nextIndex, SeekOrigin.Begin); stream.Read(buffer, 0, _indexKeySize); } IndexEntryKey key; if (_version == PTableVersions.IndexV1) { key = new IndexEntryKey(BitConverter.ToUInt32(buffer, 4), BitConverter.ToInt32(buffer, 0)); } else if (_version == PTableVersions.IndexV2) { key = new IndexEntryKey(BitConverter.ToUInt64(buffer, 4), BitConverter.ToInt32(buffer, 0)); } else { key = new IndexEntryKey(BitConverter.ToUInt64(buffer, 8), BitConverter.ToInt64(buffer, 0)); } midpoints[k] = new Midpoint(key, nextIndex); previousNextIndex = nextIndex; previousKey = key; } else { midpoints[k] = new Midpoint(previousKey, previousNextIndex); } if (k > 0) { if (midpoints[k].Key.GreaterThan(midpoints[k - 1].Key)) { throw new CorruptIndexException(String.Format( "Index entry key for midpoint {0} (stream: {1}, version: {2}) < index entry key for midpoint {3} (stream: {4}, version: {5})", k - 1, midpoints[k - 1].Key.Stream, midpoints[k - 1].Key.Version, k, midpoints[k].Key.Stream, midpoints[k].Key.Version)); } else if (midpoints[k - 1].ItemIndex > midpoints[k].ItemIndex) { throw new CorruptIndexException(String.Format( "Item index for midpoint {0} ({1}) > Item index for midpoint {2} ({3})", k - 1, midpoints[k - 1].ItemIndex, k, midpoints[k].ItemIndex)); } } } if (!skipIndexVerify) { ReadUntilWithMd5(stream.Length - MD5Size, stream, md5); //verify hash (should be at stream.length - MD5Size) md5.TransformFinalBlock(Empty.ByteArray, 0, 0); var fileHash = new byte[MD5Size]; stream.Read(fileHash, 0, MD5Size); ValidateHash(md5.Hash, fileHash); } return(midpoints); } } catch { Dispose(); throw; } finally { if (Runtime.IsUnixOrMac) { if (workItem != null) { ReturnWorkItem(workItem); } } else { if (stream != null) { stream.Dispose(); } } } }
private Tuple<int, int> TranslateClosestForwardWithMidpoints(ReaderWorkItem workItem, Midpoint[] midpoints, int pos) { if (pos > midpoints[midpoints.Length - 1].LogPos) return Tuple.Create(Chunk.ActualDataSize, midpoints.Length); // to allow backward reading of the last record, forward read will decline anyway var recordRange = LocatePosRange(midpoints, pos); return TranslateClosestForwardWithoutMidpoints(workItem, pos, recordRange.Item1, recordRange.Item2); }
internal Midpoint[] CacheMidpointsAndVerifyHash(int depth) { var buffer = new byte[4096]; if (depth < 0 || depth > 30) throw new ArgumentOutOfRangeException("depth"); var count = Count; if (count == 0 || depth == 0) return null; #if __MonoCS__ var workItem = GetWorkItem(); var stream = workItem.Stream; try { #else using (var stream = UnbufferedFileStream.Create(_filename, FileMode.Open, FileAccess.Read, FileShare.Read, false, 4096, 4096, false, 4096)) { #endif try { int midpointsCount; Midpoint[] midpoints; using (MD5 md5 = MD5.Create()) { try { midpointsCount = (int)Math.Max(2L, Math.Min((long)1 << depth, count)); midpoints = new Midpoint[midpointsCount]; } catch (OutOfMemoryException exc) { throw new PossibleToHandleOutOfMemoryException("Failed to allocate memory for Midpoint cache.", exc); } stream.Seek(0, SeekOrigin.Begin); stream.Read(buffer, 0, PTableHeader.Size); md5.TransformBlock(buffer, 0, PTableHeader.Size, null, 0); long previousNextIndex = long.MinValue; var previousKey = new IndexEntryKey(long.MaxValue, int.MaxValue); for (long k = 0; k < midpointsCount; ++k) { var nextIndex = (long)k * (count - 1) / (midpointsCount - 1); if (previousNextIndex != nextIndex) { ReadUntilWithMd5(PTableHeader.Size + _indexEntrySize * nextIndex, stream, md5); stream.Read(buffer, 0, _indexKeySize); md5.TransformBlock(buffer, 0, _indexKeySize, null, 0); IndexEntryKey key; if (_version == PTableVersions.Index32Bit) { key = new IndexEntryKey(BitConverter.ToUInt32(buffer, 4), BitConverter.ToInt32(buffer, 0)); } else { key = new IndexEntryKey(BitConverter.ToUInt64(buffer, 4), BitConverter.ToInt32(buffer, 0)); } midpoints[k] = new Midpoint(key, nextIndex); previousNextIndex = nextIndex; previousKey = key; } else { midpoints[k] = new Midpoint(previousKey, previousNextIndex); } } ReadUntilWithMd5(stream.Length - MD5Size, stream, md5); //verify hash (should be at stream.length - MD5Size) md5.TransformFinalBlock(Empty.ByteArray, 0, 0); var fileHash = new byte[MD5Size]; stream.Read(fileHash, 0, MD5Size); ValidateHash(md5.Hash, fileHash); return midpoints; } } catch { Dispose(); throw; } } #if __MonoCS__ finally { ReturnWorkItem(workItem); } #endif }
/// <summary> /// Returns the index of lower midpoint for given logical position. /// Assumes it always exist. /// </summary> private static int LowerMidpointBound(Midpoint[] midpoints, int pos) { int l = 0; int r = midpoints.Length - 1; while (l < r) { int m = l + (r - l + 1) / 2; if (midpoints[m].LogPos <= pos) l = m; else r = m - 1; } return l; }
private Midpoint[] PopulateCache(int depth) { if (depth > 31) throw new ArgumentOutOfRangeException("depth"); if (Count == 0) throw new InvalidOperationException("Empty PTable."); var workItem = GetWorkItem(); try { int segmentSize; Midpoint[] midpoints; try { int midPointsCnt = 1 << depth; if (Count < midPointsCnt) { segmentSize = 1; // we cache all items midpoints = new Midpoint[Count]; } else { segmentSize = Count / midPointsCnt; midpoints = new Midpoint[1 + (Count + segmentSize - 1) / segmentSize]; } } catch (OutOfMemoryException exc) { throw new PossibleToHandleOutOfMemoryException("Failed to allocate memory for Midpoint cache.", exc); } for (int x = 0, i = 0, xN = Count - 1; x <= xN; x += segmentSize, i += 1) { var record = ReadEntry(x, workItem); midpoints[i] = new Midpoint(record.Key, x); } // add the very last item as the last midpoint (possibly it is done twice) { var record = ReadEntry(Count - 1, workItem); midpoints[midpoints.Length - 1] = new Midpoint(record.Key, Count - 1); } return midpoints; } finally { ReturnWorkItem(workItem); } }
internal Midpoint[] CacheMidpoints(int depth) { if (depth < 0 || depth > 30) throw new ArgumentOutOfRangeException("depth"); var count = Count; if (count == 0 || depth == 0) return null; //TODO GFY can make slightly faster with a sequential worker. var workItem = GetWorkItem(); try { int midpointsCount; Midpoint[] midpoints; try { midpointsCount = Math.Max(2, Math.Min(1 << depth, count)); midpoints = new Midpoint[midpointsCount]; } catch (OutOfMemoryException exc) { throw new PossibleToHandleOutOfMemoryException("Failed to allocate memory for Midpoint cache.", exc); } workItem.Stream.Position = PTableHeader.Size; for (int k = 0; k < midpointsCount; ++k) { var nextIndex = (long)k * (count - 1) / (midpointsCount - 1); ReadUntil(PTableHeader.Size + IndexEntrySize*nextIndex, workItem.Stream); midpoints[k] = new Midpoint(ReadNextNoSeek(workItem).Key, (int)nextIndex); } return midpoints; } finally { ReturnWorkItem(workItem); } }
private int TranslateClosestForwardWithMidpoints(ReaderWorkItem workItem, Midpoint[] midpoints, long pos) { // to allow backward reading of the last record, forward read will decline anyway if (pos > midpoints[midpoints.Length - 1].LogPos) return Chunk.PhysicalDataSize; var recordRange = LocatePosRange(midpoints, pos); return TranslateClosestForwardWithoutMidpoints(workItem, pos, recordRange.Lower, recordRange.Upper); }
private int UpperMidpointBound(Midpoint[] midpoints, ulong stream) { int l = 0; int r = midpoints.Length - 1; while (l < r) { int m = l + (r - l) / 2; if (midpoints[m].Key < stream) r = m; else l = m + 1; } return r; }
public static List <EdgeAggregator> InstantiateMidpointTheorem(GroundedClause original, Midpoint midpt) { List <EdgeAggregator> newGrounded = new List <EdgeAggregator>(); // Construct 2AM Multiplication product1 = new Multiplication(new NumericValue(2), new Segment(midpt.point, midpt.segment.Point1)); // Construct 2BM Multiplication product2 = new Multiplication(new NumericValue(2), new Segment(midpt.point, midpt.segment.Point2)); // 2X = AB GeometricSegmentEquation newEq1 = new GeometricSegmentEquation(product1, midpt.segment); GeometricSegmentEquation newEq2 = new GeometricSegmentEquation(product2, midpt.segment); // For hypergraph List <GroundedClause> antecedent = Utilities.MakeList <GroundedClause>(original); newGrounded.Add(new EdgeAggregator(antecedent, newEq1, annotation)); newGrounded.Add(new EdgeAggregator(antecedent, newEq2, annotation)); return(newGrounded); }