public byte *DecompressToTempBuffer(out AllocatedMemoryData allocatedData) { var sizeOfEscapePositions = GetSizeOfEscapePositions(); allocatedData = _context.GetMemory(UncompressedSize + sizeOfEscapePositions); return(DecompressToBuffer(allocatedData.Address, sizeOfEscapePositions)); }
public void ResetAndRenew() { _unmanagedWriteBuffer.Dispose(); _unmanagedWriteBuffer = (TWriter)(object)_context.GetStream(_lastSize); _position = 0; _innerBuffer = _context.GetMemory(32); }
public void ResetAndRenew() { _documentNumber = -1; _unmanagedWriteBuffer.Dispose(); _unmanagedWriteBuffer = (TWriter)(object)_context.GetStream(_lastSize); _position = 0; if (_innerBuffer == null) { _innerBuffer = _context.GetMemory(32); } }
private void AllocateNextSegment(int required, bool allowGrowth) { //TODO: protect from documents larger than 1GB // grow by doubling segment size until we get to 1 MB, then just use 1 MB segments // otherwise a document with 17 MB will waste 15 MB and require very big allocations var nextSegmentSize = Math.Max(Bits.NextPowerOf2(required), _current.Allocation.SizeInBytes * 2); const int oneMb = 1024 * 1024; if (nextSegmentSize > oneMb && required <= oneMb) { nextSegmentSize = oneMb; } if (allowGrowth && // we successfully grew the allocation, nothing to do _context.GrowAllocation(_current.Allocation, nextSegmentSize)) { return; } var allocatedMemoryData = _context.GetMemory(nextSegmentSize); _current = new Segment { Address = (byte *)allocatedMemoryData.Address, Allocation = allocatedMemoryData, Used = 0, Previous = _current, PreviousAllocated = _current }; }
public BlittableJsonReaderObject Clone(JsonOperationContext context) { if (_parent != null) { return(context.ReadObject(this, "cloning nested obj")); } var mem = context.GetMemory(Size); CopyTo(mem.Address); var cloned = new BlittableJsonReaderObject(mem.Address, Size, context) { _allocatedMemory = mem }; if (Modifications != null) { cloned.Modifications = new DynamicJsonValue(cloned); foreach (var property in Modifications.Properties) { cloned.Modifications.Properties.Enqueue(property); } } return(cloned); }
public AllocatedMemoryData DecompressToAllocatedMemoryData(JsonOperationContext externalContext) { var sizeOfEscapePositions = GetSizeOfEscapePositions(); var allocatedBuffer = externalContext.GetMemory(UncompressedSize + sizeOfEscapePositions); DecompressToBuffer(allocatedBuffer.Address, sizeOfEscapePositions); return(allocatedBuffer); }
public BlittableJsonTextWriter(JsonOperationContext context, Stream stream) { _context = context; _stream = stream; _returnBuffer = context.GetManagedBuffer(out _pinnedBuffer); _buffer = _pinnedBuffer.Pointer; _bufferLen = _pinnedBuffer.Length; _dateTimeMemory = context.GetMemory(32); }
public AbstractBlittableJsonTextWriter(JsonOperationContext context, Stream stream) { _context = context; _stream = stream; _returnBuffer = context.GetManagedBuffer(out _pinnedBuffer); _buffer = _pinnedBuffer.Pointer; _parserAuxiliarMemory = context.GetMemory(32); }
private unsafe byte *GetCompressionBuffer(int minSize) { // enlarge buffer if needed if (_compressionBuffer == null || minSize > _compressionBuffer.SizeInBytes) { _compressionBuffer = _context.GetMemory(minSize); } return(_compressionBuffer.Address); }
private void AllocateNextSegment(int required, bool allowGrowth) { Debug.Assert(required > 0); if (required > ArenaMemoryAllocator.MaxArenaSize) { ThrowOnAllocationSizeExceeded(required, ArenaMemoryAllocator.MaxArenaSize); } // Grow by doubling segment size until we get to 1 MB, then just use 1 MB segments // otherwise a document with 17 MB will waste 15 MB and require very big allocations var requiredPowerOfTwo = Bits.PowerOf2(required); var segmentSize = Math.Max(requiredPowerOfTwo, _head.Allocation.SizeInBytes * 2); const int oneMb = 1024 * 1024; if (segmentSize > oneMb && required <= oneMb) { segmentSize = oneMb; } // We can sometimes ask the context to grow the allocation size; // it may do so at its own discretion; if this happens, then we // are good to go. if (allowGrowth && _context.GrowAllocation(_head.Allocation, segmentSize)) { return; } // Can't change _head because there may be copies of the current // instance of UnmanagedWriteBuffer going around. Thus, we simply // mutate it to ensure all copies have the same allocations. var allocation = _context.GetMemory(segmentSize); if (allocation.SizeInBytes < required) { ThrowOnAllocationSizeMismatch(allocation.SizeInBytes, required); } // Copy the head Segment previousHead = _head.ShallowCopy(); // Reset the head (this change happens in all instances at the // same time, albeit not atomically). _head.Previous = previousHead; _head.DeallocationPendingPrevious = previousHead; _head.Allocation = allocation; _head.Address = allocation.Address; _head.Used = 0; _head.AccumulatedSizeInBytes = previousHead.AccumulatedSizeInBytes; }
private void AllocateNextSegment(int required) { // grow by doubling segment size until we get to 1 MB, then just use 1 MB segments // otherwise a document with 17 MB will waste 15 MB and require very big allocations var nextSegmentSize = Math.Max(Bits.NextPowerOf2(required), _current.Allocation.SizeInBytes * 2); const int oneMb = 1024 * 1024 * 1024; if (nextSegmentSize > oneMb && required <= oneMb) { nextSegmentSize = oneMb; } var allocatedMemoryData = _context.GetMemory(nextSegmentSize); _current = new Segment { Address = (byte *)allocatedMemoryData.Address, Allocation = allocatedMemoryData, Used = 0, Previous = _current }; }
public BlittableWriter(JsonOperationContext context) { _context = context; _innerBuffer = _context.GetMemory(32); }
public BlittableWriter(JsonOperationContext context, TWriter writer) { _context = context; _unmanagedWriteBuffer = writer; _innerBuffer = _context.GetMemory(32); }