private void FlushAndCloseWithoutFail(MuninnPagedFile file) { bool flushedAndClosed = false; bool printedFirstException = false; do { try { file.FlushAndForceForClose(); file.CloseSwapper(); flushedAndClosed = true; } catch (IOException e) { if (_printExceptionsOnClose && !printedFirstException) { printedFirstException = true; try { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); } catch (Exception) { } } } } while (!flushedAndClosed); }
internal virtual void Unmap(MuninnPagedFile file) { lock (this) { if (file.DecrementRefCount()) { // This was the last reference! // Find and remove the existing mapping: FileMapping prev = null; FileMapping current = _mappedFiles; while (current != null) { if (current.PagedFile == file) { if (prev == null) { _mappedFiles = current.Next; } else { prev.Next = current.Next; } _pageCacheTracer.unmappedFile(current.File); FlushAndCloseWithoutFail(file); break; } prev = current; current = current.Next; } } } }
/// <summary> /// Cursor factory construction </summary> /// <param name="pagedFile"> paged file for which cursor is created </param> /// <param name="pageCursorTracerSupplier"> supplier of thread local (transaction local) page cursor tracers that will /// provide thread local page cache statistics </param> /// <param name="pageCacheTracer"> global page cache tracer </param> /// <param name="versionContextSupplier"> version context supplier </param> internal CursorFactory(MuninnPagedFile pagedFile, PageCursorTracerSupplier pageCursorTracerSupplier, PageCacheTracer pageCacheTracer, VersionContextSupplier versionContextSupplier) { this._pagedFile = pagedFile; this._victimPage = pagedFile.PageCache.victimPage; this._pageCursorTracerSupplier = pageCursorTracerSupplier; this._pageCacheTracer = pageCacheTracer; this._versionContextSupplier = versionContextSupplier; }
internal void Initialise(MuninnPagedFile pagedFile, long pageId, int pfFlags) { this.Swapper = pagedFile.Swapper; this.SwapperId = pagedFile.SwapperId; this._filePageSize = pagedFile.FilePageSize; this.PagedFile = pagedFile; this.PageId = pageId; this.PfFlags = pfFlags; this.EagerFlush = IsFlagRaised(pfFlags, PF_EAGER_FLUSH); this.NoFault = IsFlagRaised(pfFlags, PF_NO_FAULT); this.NoGrow = NoFault | IsFlagRaised(pfFlags, Org.Neo4j.Io.pagecache.PagedFile_Fields.PfNoGrow); }
/// <summary> /// Pin the desired file page to this cursor, page faulting it into memory if it isn't there already. </summary> /// <param name="filePageId"> The file page id we want to pin this cursor to. </param> /// <param name="writeLock"> 'true' if we will be taking a write lock on the page as part of the pin. </param> /// <exception cref="IOException"> if anything goes wrong with the pin, most likely during a page fault. </exception> //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: protected void pin(long filePageId, boolean writeLock) throws java.io.IOException protected internal virtual void Pin(long filePageId, bool writeLock) { PinEvent = _tracer.beginPin(writeLock, filePageId, Swapper); int chunkId = MuninnPagedFile.ComputeChunkId(filePageId); // The chunkOffset is the addressing offset into the chunk array object for the relevant array slot. Using // this, we can access the array slot with Unsafe. long chunkOffset = MuninnPagedFile.ComputeChunkOffset(filePageId); int[][] tt = PagedFile.translationTable; if (tt.Length <= chunkId) { tt = ExpandTranslationTableCapacity(chunkId); } int[] chunk = tt[chunkId]; // Now, if the reference in the chunk slot is a latch, we wait on it and look up again (in a loop, since the // page might get evicted right after the page fault completes). If we find a page, we lock it and check its // binding (since it might get evicted and faulted into something else in the time between our look up and // our locking of the page). If the reference is null or it referred to a page that had wrong bindings, we CAS // in a latch. If that CAS succeeds, we page fault, set the slot to the faulted in page and open the latch. // If the CAS failed, we retry the look up and start over from the top. for ( ;;) { int mappedPageId = UnsafeUtil.getIntVolatile(chunk, chunkOffset); if (mappedPageId != UNMAPPED_TTE) { // We got *a* page, but we might be racing with eviction. To cope with that, we have to take some // kind of lock on the page, and check that it is indeed bound to what we expect. If not, then it has // been evicted, and possibly even page faulted into something else. In this case, we discard the // item and try again, as the eviction thread would have set the chunk array slot to null. long pageRef = PagedFile.deref(mappedPageId); bool locked = TryLockPage(pageRef); if (locked & PagedFile.isBoundTo(pageRef, SwapperId, filePageId)) { PinCursorToPage(pageRef, filePageId, Swapper); PinEvent.hit(); return; } if (locked) { UnlockPage(pageRef); } } else { if (UncommonPin(filePageId, chunkOffset, chunk)) { return; } } } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: public synchronized java.util.Optional<org.neo4j.io.pagecache.PagedFile> getExistingMapping(java.io.File file) throws java.io.IOException public override Optional <PagedFile> GetExistingMapping(File file) { lock (this) { AssertHealthy(); EnsureThreadsInitialised(); file = file.CanonicalFile; MuninnPagedFile pagedFile = TryGetMappingOrNull(file); if (pagedFile != null) { pagedFile.IncrementRefCount(); return(pagedFile); } return(null); } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: public synchronized java.util.List<org.neo4j.io.pagecache.PagedFile> listExistingMappings() throws java.io.IOException public override IList <PagedFile> ListExistingMappings() { lock (this) { AssertNotClosed(); EnsureThreadsInitialised(); IList <PagedFile> list = new List <PagedFile>(); FileMapping current = _mappedFiles; while (current != null) { // Note that we are NOT incrementing the reference count here. // Calling code is expected to be able to deal with asynchronously closed PagedFiles. MuninnPagedFile pagedFile = current.PagedFile; list.Add(pagedFile); current = current.Next; } return(list); } }
public override PageCursor OpenLinkedCursor(long pageId) { CloseLinkedCursorIfAny(); MuninnPagedFile pf = PagedFile; if (pf == null) { // This cursor has been closed throw new System.InvalidOperationException("Cannot open linked cursor on closed page cursor"); } if (LinkedCursor != null) { LinkedCursor.initialise(pf, pageId, PfFlags); LinkedCursor.rewind(); } else { LinkedCursor = ( MuninnPageCursor )pf.Io(pageId, PfFlags); LinkedCursor.isLinkedCursor = true; } return(LinkedCursor); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private void flushFile(MuninnPagedFile muninnPagedFile, org.neo4j.io.pagecache.IOLimiter limiter) throws java.io.IOException private void FlushFile(MuninnPagedFile muninnPagedFile, IOLimiter limiter) { try { using (MajorFlushEvent fileFlush = _pageCacheTracer.beginFileFlush(muninnPagedFile.Swapper)) { FlushEventOpportunity flushOpportunity = fileFlush.FlushEventOpportunity(); muninnPagedFile.FlushAndForceInternal(flushOpportunity, false, limiter); } } catch (ClosedChannelException e) { if (muninnPagedFile.RefCount > 0) { // The file is not supposed to be closed, since we have a positive ref-count, yet we got a // ClosedChannelException anyway? It's an odd situation, so let's tell the outside world about // this failure. throw e; } // Otherwise: The file was closed while we were trying to flush it. Since unmapping implies a flush // anyway, we can safely assume that this is not a problem. The file was flushed, and it doesn't // really matter how that happened. We'll ignore this exception. } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: public synchronized org.neo4j.io.pagecache.PagedFile map(java.io.File file, int filePageSize, java.nio.file.OpenOption... openOptions) throws java.io.IOException public override PagedFile Map(File file, int filePageSize, params OpenOption[] openOptions) { lock (this) { AssertHealthy(); EnsureThreadsInitialised(); if (filePageSize > _cachePageSize) { throw new System.ArgumentException("Cannot map files with a filePageSize (" + filePageSize + ") that is greater than the " + "cachePageSize (" + _cachePageSize + ")"); } file = file.CanonicalFile; bool createIfNotExists = false; bool truncateExisting = false; bool deleteOnClose = false; bool anyPageSize = false; bool noChannelStriping = false; foreach (OpenOption option in openOptions) { if (option.Equals(StandardOpenOption.CREATE)) { createIfNotExists = true; } else if (option.Equals(StandardOpenOption.TRUNCATE_EXISTING)) { truncateExisting = true; } else if (option.Equals(StandardOpenOption.DELETE_ON_CLOSE)) { deleteOnClose = true; } else if (option.Equals(PageCacheOpenOptions.ANY_PAGE_SIZE)) { anyPageSize = true; } else if (option.Equals(PageCacheOpenOptions.NO_CHANNEL_STRIPING)) { noChannelStriping = true; } else if (!_ignoredOpenOptions.Contains(option)) { throw new System.NotSupportedException("Unsupported OpenOption: " + option); } } FileMapping current = _mappedFiles; // find an existing mapping while (current != null) { if (current.File.Equals(file)) { MuninnPagedFile pagedFile = current.PagedFile; if (pagedFile.PageSize() != filePageSize && !anyPageSize) { string msg = "Cannot map file " + file + " with " + "filePageSize " + filePageSize + " bytes, " + "because it has already been mapped with a " + "filePageSize of " + pagedFile.PageSize() + " bytes."; throw new System.ArgumentException(msg); } if (truncateExisting) { throw new System.NotSupportedException("Cannot truncate a file that is already mapped"); } pagedFile.IncrementRefCount(); pagedFile.MarkDeleteOnClose(deleteOnClose); return(pagedFile); } current = current.Next; } if (filePageSize < Long.BYTES) { throw new System.ArgumentException("Cannot map files with a filePageSize (" + filePageSize + ") that is less than " + Long.BYTES + " bytes"); } // there was no existing mapping MuninnPagedFile pagedFile = new MuninnPagedFile(file, this, filePageSize, _swapperFactory, _pageCacheTracer, _pageCursorTracerSupplier, _versionContextSupplier, createIfNotExists, truncateExisting, noChannelStriping); pagedFile.IncrementRefCount(); pagedFile.MarkDeleteOnClose(deleteOnClose); current = new FileMapping(file, pagedFile); current.Next = _mappedFiles; _mappedFiles = current; _pageCacheTracer.mappedFile(file); return(pagedFile); } }