/// <summary> /// Read all database pages inside file with no cache using. PageBuffers dont need to be Released /// </summary> public IEnumerable <PageBuffer> ReadFull(FileOrigin origin) { // do not use MemoryCache factory - reuse same buffer array (one page per time) // do not use BufferPool because header page can't be shared (byte[] is used inside page return) var buffer = new byte[PAGE_SIZE]; var pool = origin == FileOrigin.Log ? _logPool : _dataPool; var stream = pool.Rent(); try { // get length before starts (avoid grow during loop) var length = this.GetLength(origin); stream.Position = 0; while (stream.Position < length) { var position = stream.Position; stream.Read(buffer, 0, PAGE_SIZE); yield return(new PageBuffer(buffer, 0, 0) { Position = position, Origin = origin, ShareCounter = 0 }); } } finally { pool.Return(stream); } }
/// <summary> /// Get page from clean cache (readable). If page not exits, create this new page and load data using factory fn /// </summary> public PageBuffer GetReadablePage(long position, FileOrigin origin, Action <long, BufferSlice> factory) { // get dict key based on position/origin var key = this.GetReadableKey(position, origin); // try get from _readble dict or create new var page = _readable.GetOrAdd(key, (k) => { // get new page from _free pages (or extend) var newPage = this.GetFreePage(); newPage.Position = position; newPage.Origin = origin; // load page content with disk stream factory(position, newPage); return(newPage); }); // update LRU Interlocked.Exchange(ref page.Timestamp, DateTime.UtcNow.Ticks); // increment share counter Interlocked.Increment(ref page.ShareCounter); return(page); }
/// <summary> /// Add a folder in the folder table /// </summary> /// \todo comments /// <param name="_folderName"></param> /// <returns>The id of the folder</returns> public string addFolder(string _folderName, string _idParent, FileOrigin _origin) { string folName = _folderName; string folType = Convert.ToString((int)_origin); //get the id of the folder inputed, and return it return(Convert.ToString(query(String.Format("INSERT INTO t_folder (folName,folType,idParent) VALUES (\"{0}\", {1}, {2});\nSELECT last_insert_rowid();", folName, folType, _idParent)).Rows[0].ItemArray[0])); }
public PageBuffer(byte[] buffer, int offset, int uniqueID) : base(buffer, offset, PAGE_SIZE) { this.UniqueID = uniqueID; this.Position = long.MaxValue; this.Origin = FileOrigin.None; this.ShareCounter = 0; this.Timestamp = 0; }
protected override IOrigin /*RetrieverBase<T1>.*/ CreateOriginFrom(string filePath) { var origin = new FileOrigin { IsForAllUsers = this.IsForAllUsers, Location = filePath }; return(origin); }
public MediaFile(string url, FileOrigin fileOrigin, bool initializeMetadataOnDemand = true) { this.Url = url; this.FileOrigin = fileOrigin; this.InitializeMetadataOnDemand = initializeMetadataOnDemand; if (!this.InitializeMetadataOnDemand) { this.metadata = this.InitializeMetadata(); } }
/// <summary> /// Get virtual file length: real file can be small but async thread can still writing on disk /// </summary> public long GetLength(FileOrigin origin) { if (origin == FileOrigin.Log) { return(_logLength + PAGE_SIZE); } else { return(_dataLength + PAGE_SIZE); } }
/// <summary> /// Read page from disk (dirty, wal or data) /// </summary> private T ReadPage <T>(uint pageID, out FileOrigin origin, out long position, out int walVersion) where T : BasePage { // if not inside local pages can be a dirty page saved in log file if (_transPages.DirtyPages.TryGetValue(pageID, out var walPosition)) { // read page from log file var buffer = _reader.ReadPage(walPosition.Position, _mode == LockMode.Write, FileOrigin.Log); var dirty = BasePage.ReadPage <T>(buffer); origin = FileOrigin.Log; position = walPosition.Position; walVersion = _readVersion; ENSURE(dirty.TransactionID == _transactionID, "this page must came from same transaction"); return(dirty); } // now, look inside wal-index var pos = _walIndex.GetPageIndex(pageID, _readVersion, out walVersion); if (pos != long.MaxValue) { // read page from log file var buffer = _reader.ReadPage(pos, _mode == LockMode.Write, FileOrigin.Log); var logPage = BasePage.ReadPage <T>(buffer); // clear some data inside this page (will be override when write on log file) logPage.TransactionID = 0; logPage.IsConfirmed = false; origin = FileOrigin.Log; position = pos; return(logPage); } else { // for last chance, look inside original disk data file var pagePosition = BasePage.GetPagePosition(pageID); // read page from data file var buffer = _reader.ReadPage(pagePosition, _mode == LockMode.Write, FileOrigin.Data); var diskpage = BasePage.ReadPage <T>(buffer); origin = FileOrigin.Data; position = pagePosition; ENSURE(diskpage.IsConfirmed == false || diskpage.TransactionID != 0, "page are not header-clear in data file"); return(diskpage); } }
public PageBuffer ReadPage(long position, bool writable, FileOrigin origin) { ENSURE(position % PAGE_SIZE == 0, "invalid page position"); var stream = origin == FileOrigin.Data ? _dataStream.Value : _logStream.Value; var page = writable ? _cache.GetWritablePage(position, origin, (pos, buf) => this.ReadStream(stream, pos, buf)) : _cache.GetReadablePage(position, origin, (pos, buf) => this.ReadStream(stream, pos, buf)); return(page); }
// System.IO.Path.GetTempFileName() private static void Main() { var args = Environment.GetCommandLineArgs(); var parameters = Parameters.parse(args); if (parameters == null) { printHelp(args); return; } if (parameters.outputFile == null) { parameters.outputFile = System.IO.Path.GetTempFileName(); } var inputFile = new FileOrigin(args[1]); try { var parseTree = CSTBuilder.Build(inputFile); Console.WriteLine(parseTree.ToString()); var backendFunctions = new Frontend().FromParseTreeToBackend(parseTree); var backend = new Backend.Backend(); string assemblyFile = parameters.outputFile; if (!parameters.onlyAssembly) { assemblyFile = System.IO.Path.GetTempFileName(); } generateAssemblyFile(assemblyFile, backend.FromFunctionsToNasm(backendFunctions)); if (!parameters.onlyAssembly) { // TODO invoke NASM (using parameters) var nasm = new Process(); nasm.StartInfo.FileName = "nasm"; // take $PATH into account nasm.StartInfo.Arguments = assemblyFile + " -o " + parameters.outputFile; nasm.Start(); nasm.WaitForExit(); } } catch (CSTBuilder.LexerFailure ex) { Console.WriteLine("Syntax error:"); var diagnostics = new SourceDiagnostic(); diagnostics.PrintFragmentInLine(ex.Fragment); } }
/// <summary> /// Set new length for file in sync mode. Queue must be empty before set length /// </summary> public void SetLength(long length, FileOrigin origin) { var stream = origin == FileOrigin.Log ? _logPool.Writer : _dataPool.Writer; if (origin == FileOrigin.Log) { ENSURE(_queue.Value.Length == 0, "queue must be empty before set new length"); Interlocked.Exchange(ref _logLength, length - PAGE_SIZE); } else { Interlocked.Exchange(ref _dataLength, length - PAGE_SIZE); } stream.SetLength(length); }
/// <summary> /// Get unique position in dictionary according with origin. Use positive/negative values /// </summary> private long GetReadableKey(long position, FileOrigin origin) { ENSURE(origin != FileOrigin.None, "file origin must be defined"); if (origin == FileOrigin.Data) { return(position); } else { if (position == 0) { return(long.MinValue); } return(-position); } }
/// <summary> /// Request for a writable page - no other can read this page and this page has no reference /// Writable pages can be MoveToReadable() or DiscardWritable() - but never Released() /// </summary> public PageBuffer GetWritablePage(long position, FileOrigin origin, Action <long, BufferSlice> factory) { var key = this.GetReadableKey(position, origin); // write pages always contains a new buffer array var writable = this.NewPage(position, origin); // if requested page already in cache, just copy buffer and avoid load from stream if (_readable.TryGetValue(key, out var clean)) { Buffer.BlockCopy(clean.Array, clean.Offset, writable.Array, writable.Offset, PAGE_SIZE); } else { factory(position, writable); } return(writable); }
/// <summary> /// Write pages DIRECT in disk with NO queue. This pages are not cached and are not shared - WORKS FOR DATA FILE ONLY /// </summary> public void Write(IEnumerable <PageBuffer> pages, FileOrigin origin) { ENSURE(origin == FileOrigin.Data); var stream = origin == FileOrigin.Data ? _dataPool.Writer : _logPool.Writer; foreach (var page in pages) { ENSURE(page.ShareCounter == 0, "this page can't be shared to use sync operation - do not use cached pages"); _dataLength = Math.Max(_dataLength, page.Position); stream.Position = page.Position; stream.Write(page.Array, page.Offset, PAGE_SIZE); } stream.FlushToDisk(); }
public static MediaFile CreateNew(string url) { if (url == null) { return(null); } FileOrigin urlOrigin = ParseFileOrigin(url); switch (urlOrigin) { case FileOrigin.Vbox7: return(new VboxFile(url)); case FileOrigin.SoundCloud: return(new SoundCloudFile(url)); default: throw new NotSupportedException("Url " + url + "Is not supported"); } }
/// <summary> /// Get a a valid page for this snapshot (must consider local-index and wal-index) /// </summary> public T GetPage<T>(uint pageID, out FileOrigin origin, out long position, out int walVersion) where T : BasePage { ENSURE(pageID <= _header.LastPageID, "request page must be less or equals lastest page in data file"); ENSURE(pageID != 0, "header page must be used by _header instance"); // check for header page (return header single instance) //TODO: remove this if (pageID == 0) { origin = FileOrigin.None; position = 0; walVersion = 0; return (T)(object)_header; } // look for this page inside local pages if (_localPages.TryGetValue(pageID, out var page)) { origin = FileOrigin.None; position = 0; walVersion = 0; return (T)page; } // if page is not in local cache, get from disk (log/wal/data) page = this.ReadPage<T>(pageID, out origin, out position, out walVersion); // add into local pages _localPages[pageID] = page; // increment transaction size counter _transPages.TransactionSize++; return (T)page; }
/// <summary> /// Create new page using an empty buffer block. Mark this page as writable. /// </summary> private PageBuffer NewPage(long position, FileOrigin origin) { var page = this.GetFreePage(); // set page position and page as writable page.Position = position; // define as writable page.ShareCounter = BUFFER_WRITABLE; // Timestamp = 0 means this page was never used (do not clear) if (page.Timestamp > 0) { page.Clear(); } ENSURE(page.All(0), "new page must be full zero empty before return"); page.Origin = origin; page.Timestamp = DateTime.UtcNow.Ticks; return(page); }
private IEnumerable <BsonDocument> SysDump(FileOrigin origin) { var collections = _header.GetCollections().ToDictionary(x => x.Value, x => x.Key); foreach (var buffer in _disk.ReadFull(origin)) { var page = new BasePage(buffer); var pageID = page.PageID; if (origin == FileOrigin.Data && buffer.Position > 0 && pageID == 0) { // this will fix print PageID in data file bubbles pages pageID = (uint)(buffer.Position / PAGE_SIZE); } var doc = new BsonDocument(); doc["_position"] = (int)buffer.Position; doc["pageID"] = (int)pageID; doc["pageType"] = page.PageType.ToString(); doc["nextPageID"] = dumpPageID(page.NextPageID); doc["prevPageID"] = dumpPageID(page.PrevPageID); doc["collection"] = collections.GetOrDefault(page.ColID, "-"); doc["transactionID"] = (int)page.TransactionID; doc["isConfirmed"] = page.IsConfirmed; doc["itemsCount"] = (int)page.ItemsCount; doc["freeBytes"] = page.FreeBytes; doc["usedBytes"] = (int)page.UsedBytes; doc["fragmentedBytes"] = (int)page.FragmentedBytes; doc["nextFreePosition"] = (int)page.NextFreePosition; doc["highestIndex"] = (int)page.HighestIndex; if (page.PageType == PageType.Header) { var header = new HeaderPage(buffer); doc["freeEmptyPageID"] = dumpPageID(header.FreeEmptyPageID); doc["lastPageID"] = (int)header.LastPageID; doc["creationTime"] = header.CreationTime; doc["userVersion"] = header.UserVersion; doc["collections"] = new BsonDocument(header.GetCollections().ToDictionary(x => x.Key, x => new BsonValue((int)x.Value))); } else if (page.PageType == PageType.Collection) { var collection = new CollectionPage(buffer); doc["lastAnalyzed"] = collection.LastAnalyzed; doc["creationTime"] = collection.CreationTime; doc["freeDataPageID"] = new BsonArray(collection.FreeDataPageID.Select(x => dumpPageID(x))); doc["freeIndexPageID"] = new BsonArray(collection.FreeIndexPageID.Select(x => dumpPageID(x))); doc["indexes"] = new BsonArray(collection.GetCollectionIndexes().Select(x => new BsonDocument { ["name"] = x.Name, ["expression"] = x.Expression, ["unique"] = x.Unique, ["headPageID"] = dumpPageID(x.Head.PageID), ["tailPageID"] = dumpPageID(x.Tail.PageID), ["maxLevel"] = (int)x.MaxLevel, ["keyCount"] = (int)x.KeyCount, ["uniqueKeyCount"] = (int)x.UniqueKeyCount })); } yield return(doc); } BsonValue dumpPageID(uint pageID) { return(pageID == uint.MaxValue ? BsonValue.Null : new BsonValue((int)pageID)); } }
/// <summary> /// Get Stream pool used inside disk service /// </summary> public StreamPool GetPool(FileOrigin origin) => origin == FileOrigin.Data ? _dataPool : _logPool;
/// <summary> /// Get file name (or Stream name) /// </summary> public string GetName(FileOrigin origin) { return(origin == FileOrigin.Data ? _dataFactory.Name : _logFactory.Name); }
private DbInfo GetFileInfo() { var origin = new FileOrigin(); return(origin.GetDbInfo()); }