public unsafe override int ReadForward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long pos = s.Position; // Version 2 expects all chunks to be aligned to 4096 bytes if (pos < BlockSize) { s.Position = BlockSize; pos = BlockSize; } long remaining = s.Length - pos; if (remaining == 0) { return(0); } int bytes = (int)Math.Min(remaining, BlockSize); int offset = bulk.Length - BlockSize; // NOTE: bulk and entriesPtr point to same thing // But we read into the end of the bulk array, thus the entriesPtr pointing // to start of array never ends up overlapping with the data being read BlockDBFile.ReadFully(s, bulk, offset, bytes); return(DecompressChunk(bulk, offset, entriesPtr)); }
/// <summary> Checks if the backing file exists on disc, and if not, creates it. /// Also recreates the backing file if dimensions on disc are less than those in memory. </summary> BlockDBFile ValidateBackingFile() { Vec3U16 fileDims; BlockDBFile format = BlockDBFile.V1; if (!File.Exists(FilePath)) { using (Stream s = OpenWrite()) { fileDims = Dims; BlockDBFile.WriteHeader(s, fileDims); } } else { using (Stream s = OpenRead()) { format = BlockDBFile.ReadHeader(s, out fileDims); } if (fileDims.X < Dims.X || fileDims.Y < Dims.Y || fileDims.Z < Dims.Z) { BlockDBFile.ResizeBackingFile(this); } } return(format); }
void ReadDimensions() { if (!File.Exists(FilePath)) { return; } using (Stream s = File.OpenRead(FilePath)) BlockDBFile.ReadHeader(s, out Dims); }
/// <summary> Counts the total number of entries of BlockDB entries in memory and on disc. </summary> public long TotalEntries() { using (IDisposable rLock = Locker.AccquireRead(5 * 1000)) { if (rLock != null) { return(Cache.Count + BlockDBFile.CountEntries(MapName)); } return(-1); } }
public unsafe override int ReadForward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long remaining = (s.Length - s.Position) / EntrySize; int count = (int)Math.Min(remaining, BulkEntries); if (count > 0) { BlockDBFile.ReadFully(s, bulk, 0, count * EntrySize); } return(count); }
public override long CountEntries(Stream s) { byte[] data = new byte[8]; s.Position = 16; BlockDBFile.ReadFully(s, data, 0, data.Length); uint lo = (uint)ReadInt32(data, 0); uint hi = (uint)ReadInt32(data, 4); return((long)((ulong)lo | ((ulong)hi << 32))); }
void WriteBuffer(bool force) { if (buffer.Count == 0) { return; } if (!force && buffer.Count < 4096) { return; } BlockDBFile.WriteEntries(stream, buffer); buffer.Count = 0; }
public unsafe override int ReadBackward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long pos = s.Position; long remaining = (pos / EntrySize) - HeaderEntries; int count = (int)Math.Min(remaining, BulkEntries); if (count > 0) { pos -= count * EntrySize; s.Position = pos; BlockDBFile.ReadFully(s, bulk, 0, count * EntrySize); s.Position = pos; // set correct position for next backward read } return(count); }
/// <summary> Returns number of entries in the backing file on disc if it exists. </summary> public static long CountEntries(string map) { string path = FilePath(map); if (!File.Exists(path)) { return(0); } using (Stream src = new FileStream(path, FileMode.OpenOrCreate, FileAccess.Read, FileShare.ReadWrite)) { Vec3U16 dims; BlockDBFile file = ReadHeader(src, out dims); return(file.CountEntries(src)); } }
/// <summary> Flushes the entries from the in-memory cache to disc. </summary> /// <remarks> You must lock using Locker.AccquireWrite() **before** entering this method. </remarks> public void FlushCache() { if (Cache.Head == null) { return; } BlockDBFile format = ValidateBackingFile(); using (Stream s = OpenWrite()) { // This truncates the lower 4 bits off - so e.g. if a power off occurred // and 21 bytes were in the file, this sets the position to byte 16 s.Position = s.Length & ~0x0F; format.WriteEntries(s, Cache); Cache.Clear(); } }
public unsafe override int ReadBackward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long pos = s.Position; if (pos > BlockSize) { int bytes = (int)Math.Min(pos - BlockSize, BlockSize); int offset = bulk.Length - BlockSize; pos -= bytes; s.Position = pos; BlockDBFile.ReadFully(s, bulk, offset, bytes); s.Position = pos; // set correct position for next backward read return(DecompressChunk(bulk, offset, entriesPtr)); } return(0); }
void SaveCbdbFile() { if (stream == null) { return; } stream.Close(); stream = null; string dumpPath = BlockDBFile.DumpPath(mapName); string filePath = BlockDBFile.FilePath(mapName); if (File.Exists(filePath)) { File.Delete(filePath); } File.Move(dumpPath, filePath); }
void AppendCbdbFile() { string path = BlockDBFile.FilePath(mapName); if (!File.Exists(path) || stream == null) { return; } byte[] bulk = new byte[4096]; using (Stream cbdb = File.OpenRead(path)) { cbdb.Read(bulk, 0, BlockDBFile.EntrySize); // header int read = 0; while ((read = cbdb.Read(bulk, 0, 4096)) > 0) { stream.Write(bulk, 0, read); } } }
/// <summary> Finds all block changes by the given player. </summary> public void FindChangesBy(int id, Action <BlockDBEntry> output, out Vec3U16 dims) { dims = default(Vec3U16); using (IDisposable readLock = locker.AccquireReadLock()) { if (!File.Exists(FilePath)) { return; } using (Stream s = File.OpenRead(FilePath)) { BlockDBFile.ReadHeader(s, out dims); BlockDBFile.IterateBackwards(s, e => { if (e.PlayerID == id) { output(e); } }); } } }
public void WriteEntries() { using (IDisposable writeLock = locker.AccquireWriteLock()) { if (Cache.Count == 0) { return; } ValidateBackingFile(); using (Stream s = File.OpenWrite(FilePath)) { // This truncates the lower 4 bits off - so e.g. if a power off occurred // and 21 bytes were in the file, this sets the position to byte 16 s.Position = s.Length & ~0x0F; BlockDBFile.WriteEntries(s, Cache); lock (CacheLock) Cache = new FastList <BlockDBEntry>(); } } }
/// <summary> Outputs all block changes which affect the given coordinates. </summary> /// <remarks> You must lock using Locker.AccquireRead() **before** entering this method. </remarks> public void FindChangesAt(ushort x, ushort y, ushort z, Action <BlockDBEntry> output) { if (!File.Exists(FilePath)) { FindInMemoryAt(x, y, z, output); return; } Vec3U16 dims; using (Stream s = OpenRead()) { BlockDBFile format = BlockDBFile.ReadHeader(s, out dims); if (x >= dims.X || y >= dims.Y || z >= dims.Z) { return; } int index = (y * dims.Z + z) * dims.X + x; format.FindChangesAt(s, index, output); } FindInMemoryAt(x, y, z, output); }
/// <summary> Finds all block changes which affect the given coordinates. </summary> public void FindChangesAt(ushort x, ushort y, ushort z, Action <BlockDBEntry> output) { using (IDisposable readLock = locker.AccquireReadLock()) { if (!File.Exists(FilePath)) { return; } using (Stream s = File.OpenRead(FilePath)) { Vec3U16 dims; BlockDBFile.ReadHeader(s, out dims); if (x >= dims.X || y >= dims.Y || z >= dims.Z) { return; } int index = (y * dims.Z + z) * dims.X + x; BlockDBFile.FindChangesAt(s, index, output); } } }
/// <summary> Outputs all block changes by the given players. </summary> /// <remarks> You must lock using Locker.AccquireRead() **before** entering this method. </remarks> /// <returns> whether an entry before start time was reached. </returns> public bool FindChangesBy(int[] ids, DateTime start, DateTime end, out Vec3U16 dims, Action <BlockDBEntry> output) { int startDelta = ClampDelta(start.Subtract(Epoch)); int endDelta = ClampDelta(end.Subtract(Epoch)); dims = Dims; if (FindInMemoryBy(ids, startDelta, endDelta, output)) { return(true); } if (!File.Exists(FilePath)) { return(false); } using (Stream s = OpenRead()) { BlockDBFile format = BlockDBFile.ReadHeader(s, out dims); return(format.FindChangesBy(s, ids, startDelta, endDelta, output)); } }
/// <summary> Checks if the backing file exists on disc, and if not, creates it. /// Also recreates the backing file if dimensions on disc are less than those in memory. </summary> void ValidateBackingFile() { Vec3U16 fileDims; if (!File.Exists(FilePath)) { using (Stream s = File.OpenWrite(FilePath)) { fileDims = Dims; BlockDBFile.WriteHeader(s, fileDims); } } else { using (Stream s = File.OpenRead(FilePath)) { BlockDBFile.ReadHeader(s, out fileDims); } if (fileDims.X < Dims.X || fileDims.Y < Dims.Y || fileDims.Z < Dims.Z) { BlockDBFile.ResizeBackingFile(this); } } }
object DumpRow(IDataRecord record, object arg) { if (errorOccurred) { return(arg); } try { if (stream == null) { stream = File.Create(BlockDBFile.DumpPath(mapName)); string lvlPath = LevelInfo.MapPath(mapName); dims = IMapImporter.Formats[0].ReadDimensions(lvlPath); BlockDBFile.WriteHeader(stream, dims); } // Only log maps which have a used BlockDB to avoid spam entriesWritten++; if (entriesWritten == 10) { string progress = " (" + DBUpgrader.Progress + ")"; Logger.Log(LogType.SystemActivity, "Dumping BlockDB for " + mapName + progress); } UpdateBlock(record); UpdateCoords(record); UpdatePlayerID(record); UpdateTimestamp(record); buffer.Add(entry); WriteBuffer(false); } catch (Exception ex) { Logger.LogError(ex); errorOccurred = true; } return(arg); }