public override void Set3ByteInt(long index, int offset, int value) { long address = address(index, offset); UnsafeUtil.putShort(address, ( short )value); UnsafeUtil.putByte(address + Short.BYTES, ( sbyte )(( int )(( uint )value >> (sizeof(short) * 8)))); }
protected internal OffHeapNumberArray(long length, int itemSize, long @base, MemoryAllocationTracker allocationTracker) : base(itemSize, @base) { UnsafeUtil.assertHasUnsafe(); this.LengthConflict = length; this.AllocationTracker = allocationTracker; long dataSize = length * itemSize; bool itemSizeIsPowerOfTwo = Integer.bitCount(itemSize) == 1; if (UnsafeUtil.allowUnalignedMemoryAccess || !itemSizeIsPowerOfTwo) { // we can end up here even if we require aligned memory access. Reason is that item size // isn't power of two anyway and so we have to fallback to safer means of accessing the memory, // i.e. byte for byte. _allocatedBytes = dataSize; this._allocatedAddress = this.Address = UnsafeUtil.allocateMemory(_allocatedBytes, allocationTracker); } else { // the item size is a power of two and we're required to access memory aligned // so we can allocate a bit more to ensure we can get an aligned memory address to start from. _allocatedBytes = dataSize + itemSize - 1; this._allocatedAddress = UnsafeUtil.allocateMemory(_allocatedBytes, allocationTracker); this.Address = UnsafeUtil.alignedMemory(_allocatedAddress, itemSize); } }
/// <summary> /// Try taking a concurrent write lock. Multiple write locks can be held at the same time. Write locks will /// invalidate any optimistic read lock that overlaps with them, and write locks will make any attempt at grabbing /// an exclusive lock fail. If an exclusive lock is currently held, then the attempt to take a write lock will fail. /// <para> /// Write locks must be paired with a corresponding <seealso cref="unlockWrite(long)"/>. /// /// </para> /// </summary> /// <returns> {@code true} if the write lock was taken, {@code false} otherwise. </returns> public static bool TryWriteLock(long address) { long s; long n; for ( ; ;) { s = GetState(address); bool unwritablyLocked = (s & _exlMask) != 0; bool writeCountOverflow = (s & _cntMask) == _cntMask; // bitwise-OR to reduce branching and allow more ILP if (unwritablyLocked | writeCountOverflow) { return(FailWriteLock(s, writeCountOverflow)); } n = s + _cntUnit | _modMask; if (CompareAndSetState(address, s, n)) { UnsafeUtil.storeFence(); return(true); } } }
protected internal override void InternalPut(long keyAddress, long key, VALUE value) { assert(int) key == key : "Illegal key " + key + ", it's bigger than int"; // We can "safely" cast to int here, assuming that this call trickles in via a PrimitiveIntCollection UnsafeUtil.putInt(keyAddress, ( int )key); }
public override void Set5ByteLong(long index, int offset, long value) { long address = address(index, offset); PutInt(address, ( int )value); UnsafeUtil.putByte(address + Integer.BYTES, ( sbyte )(( long )(( ulong )value >> 32))); }
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @Test void allocatedPointerMustBePageAligned() internal virtual void AllocatedPointerMustBePageAligned() { MemoryAllocator mman = CreateAllocator(_eightPages); long address = mman.AllocateAligned(Org.Neo4j.Io.pagecache.PageCache_Fields.PAGE_SIZE, UnsafeUtil.pageSize()); assertThat(address % UnsafeUtil.pageSize(), @is(0L)); }
/// <summary> /// Grab the exclusive lock if it is immediately available. Exclusive locks will invalidate any overlapping /// optimistic read lock, and fail write and flush locks. If any write or flush locks are currently taken, or if /// the exclusive lock is already taken, then the attempt to grab an exclusive lock will fail. /// <para> /// Successfully grabbed exclusive locks must always be paired with a corresponding <seealso cref="unlockExclusive(long)"/>. /// /// </para> /// </summary> /// <returns> {@code true} if we successfully got the exclusive lock, {@code false} otherwise. </returns> public static bool TryExclusiveLock(long address) { long s = GetState(address); bool res = ((s & _unlMask) == 0) && CompareAndSetState(address, s, s + _exlMask); UnsafeUtil.storeFence(); return(res); }
public override long Get5ByteLong(long index, int offset) { long address = address(index, offset); long low4b = GetInt(address) & 0xFFFFFFFFL; long high1b = UnsafeUtil.getByte(address + Integer.BYTES) & 0xFF; long result = low4b | (high1b << 32); return(result == 0xFFFFFFFFFFL ? -1 : result); }
private long GetLong(long p) { if (UnsafeUtil.allowUnalignedMemoryAccess) { return(UnsafeUtil.getLong(p)); } return(UnsafeUtil.getLongByteWiseLittleEndian(p)); }
public override void Set(long index, sbyte[] value) { long address = address(index, 0); for (int i = 0; i < itemSize; i++, address++) { UnsafeUtil.putByte(address, value[i]); } }
public override int Get3ByteInt(long index, int offset) { long address = address(index, offset); int lowWord = UnsafeUtil.getShort(address) & 0xFFFF; int highByte = UnsafeUtil.getByte(address + Short.BYTES) & 0xFF; int result = lowWord | (highByte << (sizeof(short) * 8)); return(result == 0xFFFFFF ? -1 : result); }
private int GetInt(long p) { if (UnsafeUtil.allowUnalignedMemoryAccess) { return(UnsafeUtil.getInt(p)); } return(UnsafeUtil.getIntByteWiseLittleEndian(p)); }
public override void Get(long index, sbyte[] into) { long address = address(index, 0); for (int i = 0; i < itemSize; i++, address++) { into[i] = UnsafeUtil.getByte(address); } }
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @Test void mustBeAbleToAllocateSlabsLargerThanGrabSize() internal virtual void MustBeAbleToAllocateSlabsLargerThanGrabSize() { MemoryAllocator mman = CreateAllocator("2 MiB"); long page1 = mman.AllocateAligned(UnsafeUtil.pageSize(), 1); long largeBlock = mman.AllocateAligned(1024 * 1024, 1); // 1 MiB long page2 = mman.AllocateAligned(UnsafeUtil.pageSize(), 1); assertThat(page1, @is(not(0L))); assertThat(largeBlock, @is(not(0L))); assertThat(page2, @is(not(0L))); }
private void PutShort(long p, short value) { if (UnsafeUtil.allowUnalignedMemoryAccess) { UnsafeUtil.putShort(p, value); } else { UnsafeUtil.putShortByteWiseLittleEndian(p, value); } }
private void PutLong(long p, long value) { if (UnsafeUtil.allowUnalignedMemoryAccess) { UnsafeUtil.putLong(p, value); } else { UnsafeUtil.putLongByteWiseLittleEndian(p, value); } }
public override void Close() { if (!_closed) { if (LengthConflict > 0) { // Allocating 0 bytes actually returns address 0 UnsafeUtil.free(_allocatedAddress, _allocatedBytes, AllocationTracker); } _closed = true; } }
/// <summary> /// Grab the flush lock if it is immediately available. Flush locks prevent overlapping exclusive locks, /// but do not invalidate optimistic read locks, nor do they prevent overlapping write locks. Only one flush lock /// can be held at a time. If any flush or exclusive lock is already held, the attempt to take the flush lock will /// fail. /// <para> /// Successfully grabbed flush locks must always be paired with a corresponding /// <seealso cref="unlockFlush(long, long, bool)"/>. /// /// </para> /// </summary> /// <returns> If the lock is successfully grabbed, the method will return a stamp value that must be passed to the /// <seealso cref="unlockFlush(long, long, bool)"/>, and which is used for detecting any overlapping write locks. If the /// flush lock could not be taken, {@code 0} will be returned. </returns> public static long TryFlushLock(long address) { long s = GetState(address); if ((s & _faeMask) == 0) { long n = s + _flsMask; bool res = CompareAndSetState(address, s, n); UnsafeUtil.storeFence(); return(res ? n : 0); } return(0); }
public override void Clear() { if (IsByteUniform(_defaultValue)) { UnsafeUtil.setMemory(address, length << Shift, ( sbyte )_defaultValue); } else { for (long i = 0, adr = address; i < length; i++, adr += itemSize) { UnsafeUtil.putLong(adr, _defaultValue); } } }
private void DoUnlock(long targetLockBit) { long currentState; long newState; do { currentState = _state; if (!CanUnlock(currentState, targetLockBit)) { throw new System.InvalidOperationException("Can not unlock lock that is already locked"); } newState = currentState & ~targetLockBit; } while (!UnsafeUtil.compareAndSwapLong(this, _stateOffset, currentState, newState)); }
internal static long GetVictimPage(int pageSize, MemoryAllocationTracker allocationTracker) { lock (typeof(VictimPageReference)) { if (_victimPageSize < pageSize) { // Note that we NEVER free any old victim pages. This is important because we cannot tell // when we are done using them. Therefor, victim pages are allocated and stay allocated // until our process terminates. _victimPagePointer = UnsafeUtil.allocateMemory(pageSize, allocationTracker); _victimPageSize = pageSize; } return(_victimPagePointer); } }
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @Test void allocatedPointerMustBeAlignedToArbitraryByte() internal virtual void AllocatedPointerMustBeAlignedToArbitraryByte() { int pageSize = UnsafeUtil.pageSize(); for (int initialOffset = 0; initialOffset < 8; initialOffset++) { for (int i = 0; i < pageSize - 1; i++) { MemoryAllocator mman = CreateAllocator(_onePage); mman.AllocateAligned(initialOffset, 1); long alignment = 1 + i; long address = mman.AllocateAligned(Org.Neo4j.Io.pagecache.PageCache_Fields.PAGE_SIZE, alignment); assertThat("With initial offset " + initialOffset + ", iteration " + i + ", aligning to " + alignment + " and got address " + address, address % alignment, @is(0L)); } } }
private void DoLock(long targetLockBit) { long currentState; long newState; do { currentState = _state; while (!CanLock(currentState, targetLockBit)) { // sleep Sleep(); currentState = _state; } newState = currentState | targetLockBit; } while (!UnsafeUtil.compareAndSwapLong(this, _stateOffset, currentState, newState)); }
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @Test void allocatedPointersMustBeAlignedPastMemoryLimit() internal virtual void AllocatedPointersMustBeAlignedPastMemoryLimit() { MemoryAllocator mman = CreateAllocator(_onePage); for (int i = 0; i < 4100; i++) { assertThat(mman.AllocateAligned(1, 2) % 2, @is(0L)); } int pageSize = UnsafeUtil.pageSize(); for (int i = 0; i < pageSize - 1; i++) { int alignment = pageSize - i; long address = mman.AllocateAligned(Org.Neo4j.Io.pagecache.PageCache_Fields.PAGE_SIZE, alignment); assertThat("iteration " + i + ", aligning to " + alignment, address % alignment, @is(0L)); } }
public static long UnlockWriteAndTryTakeFlushLock(long address) { long s; long n; long r; do { r = 0; s = GetState(address); if ((s & _cntMask) == 0) { ThrowUnmatchedUnlockWrite(s); } n = NextSeq(s) - _cntUnit; if ((n & _faeMask) == 0) { n += _flsMask; r = n; } } while (!CompareAndSetState(address, s, n)); UnsafeUtil.storeFence(); return(r); }
private void PutByte(long p, sbyte value) { UnsafeUtil.putByte(p, value); }
public override void SetByte(long index, int offset, sbyte value) { UnsafeUtil.putByte(Address(index, offset), value); }
private sbyte GetByte(long p) { return(UnsafeUtil.getByte(p)); }