public void CheckForAllocateComplete(ref long address) { if (address >= 0) { throw new Exception("Address already allocated!"); } PageOffset p = default(PageOffset); p.Page = (int)((-address) >> LogPageSizeBits); p.Offset = (int)((-address) & PageSizeMask); //Check write cache int pageIndex = p.Page % BufferSize; if (TailPageIndex == pageIndex) { address = -address; return; } //Check if we can move the head offset long currentTailAddress = GetTailAddress(); PageAlignedShiftHeadAddress(currentTailAddress); //Check if I can allocate pageIndex at all if ((PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageFlushStatus != FlushStatus.Flushed) || (PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus != CloseStatus.Closed) || (values[pageIndex] == null)) { return; } //correct values and set write cache address = -address; if (p.Offset == 0) { TailPageIndex = pageIndex; } return; }
public long Allocate(int numSlots = 1) { PageOffset localTailPageOffset = default(PageOffset); // Determine insertion index. // ReSharper disable once CSharpWarnings::CS0420 #pragma warning disable 420 localTailPageOffset.PageAndOffset = Interlocked.Add(ref TailPageOffset.PageAndOffset, numSlots); #pragma warning restore 420 int page = localTailPageOffset.Page; int offset = localTailPageOffset.Offset - numSlots; #region HANDLE PAGE OVERFLOW /* To prove correctness of the following modifications * done to TailPageOffset and the allocation itself, * we should use the fact that only one thread will have any * of the following cases since it is a counter and we spin-wait * until the tail is folded onto next page accordingly. */ if (localTailPageOffset.Offset >= PageSize) { if (offset >= PageSize) { //The tail offset value was more than page size before atomic add //We consider that a failed attempt and retry again var spin = new SpinWait(); do { //Just to give some more time to the thread // that is handling this overflow while (TailPageOffset.Offset >= PageSize) { spin.SpinOnce(); } // ReSharper disable once CSharpWarnings::CS0420 #pragma warning disable 420 localTailPageOffset.PageAndOffset = Interlocked.Add(ref TailPageOffset.PageAndOffset, numSlots); #pragma warning restore 420 page = localTailPageOffset.Page; offset = localTailPageOffset.Offset - numSlots; } while (offset >= PageSize); } if (localTailPageOffset.Offset == PageSize) { //Folding over at page boundary localTailPageOffset.Page++; localTailPageOffset.Offset = 0; TailPageOffset = localTailPageOffset; } else if (localTailPageOffset.Offset >= PageSize) { //Overflows not allowed. We allot same space in next page. localTailPageOffset.Page++; localTailPageOffset.Offset = numSlots; TailPageOffset = localTailPageOffset; page = localTailPageOffset.Page; offset = 0; } } #endregion long address = (((long)page) << LogPageSizeBits) | ((long)offset); // Check if TailPageIndex is appropriate and allocated! int pageIndex = page % BufferSize; if (TailPageIndex == pageIndex) { return(address); } //Invert the address if either the previous page is not flushed or if it is null if ((PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageFlushStatus != FlushStatus.Flushed) || (PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus != CloseStatus.Closed) || (values[pageIndex] == null)) { address = -address; } // Update the read-only so that we can get more space for the tail if (offset == 0) { if (address >= 0) { TailPageIndex = pageIndex; Interlocked.MemoryBarrier(); } long newPage = page + 1; int newPageIndex = (int)((page + 1) % BufferSize); long tailAddress = (address < 0 ? -address : address); PageAlignedShiftReadOnlyAddress(tailAddress); PageAlignedShiftHeadAddress(tailAddress); if (values[newPageIndex] == null) { AllocatePage(newPageIndex); } } return(address); }