/// <summary> Iterates from the very oldest to newest entry in the BlockDB. </summary> public void FindChangesAt(Stream s, int index, Action <BlockDBEntry> output) { byte[] bulk = new byte[BulkEntries * EntrySize]; fixed(byte *ptr = bulk) { while (true) { BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; int count = ReadForward(s, bulk, entryPtr); if (count == 0) { return; } for (int i = 0; i < count; i++) { if (entryPtr->Index == index) { output(*entryPtr); } entryPtr++; } } } }
public unsafe override int ReadForward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long pos = s.Position; // Version 2 expects all chunks to be aligned to 4096 bytes if (pos < BlockSize) { s.Position = BlockSize; pos = BlockSize; } long remaining = s.Length - pos; if (remaining == 0) { return(0); } int bytes = (int)Math.Min(remaining, BlockSize); int offset = bulk.Length - BlockSize; // NOTE: bulk and entriesPtr point to same thing // But we read into the end of the bulk array, thus the entriesPtr pointing // to start of array never ends up overlapping with the data being read BlockDBFile.ReadFully(s, bulk, offset, bytes); return(DecompressChunk(bulk, offset, entriesPtr)); }
// Higher level helpers /// <summary> Iterates from the very oldest to newest entry in the BlockDB, /// only outputting entries that are at the given packed coordinates. </summary> public static void FindChangesAt(Stream s, int index, Action <BlockDBEntry> output) { byte[] bulk = new byte[BulkEntries * EntrySize]; fixed(byte *ptr = bulk) { int entries = (int)(s.Length / EntrySize) - HeaderEntries; while (entries > 0) { int count = Math.Min(entries, BulkEntries); ReadFully(s, bulk, count * EntrySize); BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; for (int i = 0; i < count; i++) { if (entryPtr->Index == index) { output(*entryPtr); } entryPtr++; } entries -= count; } } }
/// <summary> Iterates from the very newest to oldest entry in the BlockDB. </summary> public static void IterateBackwards(Stream s, Action <BlockDBEntry> output) { byte[] bulk = new byte[BulkEntries * EntrySize]; fixed(byte *ptr = bulk) { int entries = (int)(s.Length / EntrySize) - HeaderEntries; s.Position = s.Length; while (entries > 0) { int count = Math.Min(entries, BulkEntries); s.Position -= count * EntrySize; ReadFully(s, bulk, count * EntrySize); BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; for (int i = 0; i < count; i++) { output(*entryPtr); entryPtr++; } entries -= count; } } }
public unsafe override int ReadForward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long remaining = (s.Length - s.Position) / EntrySize; int count = (int)Math.Min(remaining, BulkEntries); if (count > 0) { BlockDBFile.ReadFully(s, bulk, 0, count * EntrySize); } return(count); }
public BlockDBEntry[] Lookup(Vector3I coords) { if (!IsEnabled || !IsEnabledGlobally) { throw new InvalidOperationException("Trying to lookup on disabled BlockDB."); } List <BlockDBEntry> results = new List <BlockDBEntry>(); if (!World.LoadMap().InBounds(coords)) { return(results.ToArray()); } if (isPreloaded) { lock ( SyncRoot ) { fixed(BlockDBEntry *entries = cacheStore) { for (int i = 0; i < CacheSize; i++) { if (entries[i].X == coords.X && entries[i].Y == coords.Y && entries[i].Z == coords.Z) { results.Add(entries[i]); } } } } } else { Flush(); byte[] bytes = Load(); int entryCount = bytes.Length / BlockDBEntry.Size; fixed(byte *parr = bytes) { BlockDBEntry *entries = (BlockDBEntry *)parr; for (int i = 0; i < entryCount; i++) { if (entries[i].X == coords.X && entries[i].Y == coords.Y && entries[i].Z == coords.Z) { results.Add(entries[i]); } } } } return(results.ToArray()); }
public unsafe override int ReadBackward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long pos = s.Position; long remaining = (pos / EntrySize) - HeaderEntries; int count = (int)Math.Min(remaining, BulkEntries); if (count > 0) { pos -= count * EntrySize; s.Position = pos; BlockDBFile.ReadFully(s, bulk, 0, count * EntrySize); s.Position = pos; // set correct position for next backward read } return(count); }
public unsafe override int ReadBackward(Stream s, byte[] bulk, BlockDBEntry *entriesPtr) { long pos = s.Position; if (pos > BlockSize) { int bytes = (int)Math.Min(pos - BlockSize, BlockSize); int offset = bulk.Length - BlockSize; pos -= bytes; s.Position = pos; BlockDBFile.ReadFully(s, bulk, offset, bytes); s.Position = pos; // set correct position for next backward read return(DecompressChunk(bulk, offset, entriesPtr)); } return(0); }
/// <summary> Counts entries that are newer tha the given age. </summary> /// <param name="age"> Maximum age of entry </param> /// <returns> Number of entries newer than given age. /// 0 if all entries are older than given age. /// -1 if all entries are newer than given age. </returns> int CountNewerEntries(TimeSpan age) { if (age < TimeSpan.Zero) { throw new ArgumentOutOfRangeException("age", "Age must be non-negative."); } int minTimestamp = (int)DateTime.UtcNow.Subtract(age).ToUnixTime(); if (isPreloaded) { fixed(BlockDBEntry *ptr = cacheStore) { for (int i = 0; i < CacheSize; i++) { if (ptr[i].Timestamp < minTimestamp) { return(CacheSize - i); } } } return(-1); } else { byte[] bytes = Load(); int entryCount = bytes.Length / BlockDBEntry.Size; fixed(byte *parr = bytes) { BlockDBEntry *entries = (BlockDBEntry *)parr; for (int i = entryCount - 1; i >= 0; i--) { if (entries[i].Timestamp < minTimestamp) { return(entryCount - i); } } } return(-1); } }
public static void ResizeBackingFile(BlockDB db) { Logger.Log(LogType.BackgroundActivity, "Resizing BlockDB for " + db.MapName); string filePath = FilePath(db.MapName); string tempPath = TempPath(db.MapName); using (Stream src = File.OpenRead(filePath), dst = File.Create(tempPath)) { Vec3U16 dims; ReadHeader(src, out dims); WriteHeader(dst, db.Dims); int width = db.Dims.X, length = db.Dims.Z; byte[] bulk = new byte[BulkEntries * EntrySize]; fixed(byte *ptr = bulk) { BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; while (true) { int count = V1.ReadForward(src, bulk, entryPtr); if (count == 0) { break; } for (int i = 0; i < count; i++) { int index = entryPtr[i].Index; int x = index % dims.X; int y = (index / dims.X) / dims.Z; int z = (index / dims.X) % dims.Z; entryPtr[i].Index = (y * length + z) * width + x; } dst.Write(bulk, 0, count * EntrySize); } } } File.Delete(filePath); File.Move(tempPath, filePath); }
public static void ResizeBackingFile(BlockDB db) { Server.s.Log("Resizing BlockDB for " + db.MapName, true); string filePath = FilePath(db.MapName); string tempPath = TempPath(db.MapName); using (Stream src = File.OpenRead(filePath), dst = File.Create(tempPath)) { Vec3U16 dims; ReadHeader(src, out dims); WriteHeader(dst, db.Dims); int width = db.Dims.X, length = db.Dims.Z; byte[] bulk = new byte[BulkEntries * EntrySize]; fixed(byte *ptr = bulk) { int entries = (int)(src.Length / EntrySize) - 1; BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; while (entries > 0) { int read = Math.Min(entries, BulkEntries); ReadFully(src, bulk, read * EntrySize); for (int i = 0; i < read; i++) { int index = entryPtr[i].Index; int x = index % dims.X; int y = (index / dims.X) / dims.Z; int z = (index / dims.X) % dims.Z; entryPtr[i].Index = (y * length + z) * width + x; } dst.Write(bulk, 0, read * EntrySize); entries -= read; } } } File.Delete(filePath); File.Move(tempPath, filePath); }
static unsafe void Read(Stream s, BlockDBFile format, Action <BlockDBEntry> output) { byte[] bulk = new byte[BlockDBFile.BulkEntries * BlockDBFile.EntrySize]; fixed(byte *ptr = bulk) { while (true) { BlockDBEntry *e = (BlockDBEntry *)ptr; int count = format.ReadForward(s, bulk, e); if (count == 0) { break; } for (int i = 0; i < count; i++, e++) { output(*e); } } } }
/// <summary> Iterates from the very newest to oldest entry in the BlockDB. </summary> /// <returns> whether an entry before start time was reached. </returns> public bool FindChangesBy(Stream s, int[] ids, int start, int end, Action <BlockDBEntry> output) { byte[] bulk = new byte[BulkEntries * EntrySize]; s.Position = s.Length; fixed(byte *ptr = bulk) { while (true) { BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; int count = ReadBackward(s, bulk, entryPtr); if (count == 0) { break; } entryPtr += (count - 1); for (int i = count - 1; i >= 0; i--) { if (entryPtr->TimeDelta < start) { return(true); } if (entryPtr->TimeDelta <= end) { for (int j = 0; j < ids.Length; j++) { if (entryPtr->PlayerID != ids[j]) { continue; } output(*entryPtr); break; } } entryPtr--; } } } return(false); }
public List <BlockDBEntry> ReadEntries(int x, int y, int z, Stream stream) { byte[] bulk = new byte[bulkEntries * entrySize]; ReadFully(stream, bulk, 4 * entrySize); List <BlockDBEntry> matches = new List <BlockDBEntry>(); ushort width = ReadU16(bulk, 6); ushort height = ReadU16(bulk, 8); ushort length = ReadU16(bulk, 10); int index = x + width * (z + y * length); fixed(byte *ptr = bulk) { int entries = (int)(stream.Length / entrySize); while (entries > 0) { int read = Math.Min(entries, bulkEntries); ReadFully(stream, bulk, read * entrySize); BlockDBEntry *entryPtr = (BlockDBEntry *)ptr; for (int i = 0; i < read; i++) { if (entryPtr->Index != index) { continue; } matches.Add(*entryPtr); entryPtr++; } entries -= read; } } return(matches); }
public BlockDBEntry[] Lookup([NotNull] PlayerInfo info, [NotNull] BoundingBox area, TimeSpan span) { if (!IsEnabled || !IsEnabledGlobally) { throw new InvalidOperationException("Trying to lookup on disabled BlockDB."); } if (info == null) { throw new ArgumentNullException("info"); } if (area == null) { throw new ArgumentNullException("area"); } long ticks = DateTime.UtcNow.Subtract(span).ToUnixTime(); Dictionary <int, BlockDBEntry> results = new Dictionary <int, BlockDBEntry>(); Map map = World.LoadMap(); if (isPreloaded) { lock ( SyncRoot ) { fixed(BlockDBEntry *entries = cacheStore) { for (int i = CacheSize - 1; i >= 0; i--) { if (entries[i].Timestamp < ticks) { break; } if (entries[i].PlayerID == info.ID && area.Contains(entries[i].X, entries[i].Y, entries[i].Z)) { int index = map.Index(entries[i].X, entries[i].Y, entries[i].Z); results[index] = entries[i]; } } } } } else { Flush(); byte[] bytes = Load(); int entryCount = bytes.Length / BlockDBEntry.Size; fixed(byte *parr = bytes) { BlockDBEntry *entries = (BlockDBEntry *)parr; for (int i = entryCount - 1; i >= 0; i--) { if (entries[i].Timestamp < ticks) { break; } if (entries[i].PlayerID == info.ID && area.Contains(entries[i].X, entries[i].Y, entries[i].Z)) { int index = map.Index(entries[i].X, entries[i].Y, entries[i].Z); results[index] = entries[i]; } } } } return(results.Values.ToArray()); }
public BlockDBEntry[] Lookup([NotNull] PlayerInfo info, int max) { if (!IsEnabled || !IsEnabledGlobally) { throw new InvalidOperationException("Trying to lookup on disabled BlockDB."); } if (info == null) { throw new ArgumentNullException("info"); } Dictionary <int, BlockDBEntry> results = new Dictionary <int, BlockDBEntry>(); int count = 0; Map map = World.LoadMap(); if (isPreloaded) { lock ( SyncRoot ) { fixed(BlockDBEntry *entries = cacheStore) { for (int i = CacheSize - 1; i >= 0; i--) { if (entries[i].PlayerID == info.ID) { int index = map.Index(entries[i].X, entries[i].Y, entries[i].Z); results[index] = entries[i]; count++; if (count >= max) { break; } } } } } } else { Flush(); byte[] bytes = Load(); int entryCount = bytes.Length / BlockDBEntry.Size; fixed(byte *parr = bytes) { BlockDBEntry *entries = (BlockDBEntry *)parr; for (int i = entryCount - 1; i >= 0; i--) { if (entries[i].PlayerID == info.ID) { int index = map.Index(entries[i].X, entries[i].Y, entries[i].Z); results[index] = entries[i]; count++; if (count >= max) { break; } } } } } return(results.Values.ToArray()); }
/// <summary> Reads a block of BlockDB entries, in a backward streaming manner. </summary> /// <returns> The number of entries read. </returns> public abstract int ReadBackward(Stream s, byte[] bulk, BlockDBEntry *entryPtr);
unsafe static int DecompressChunk(byte[] bulk, int idx, BlockDBEntry *ptr) { byte comp = bulk[idx]; idx++; int count = bulk[idx] | (bulk[idx + 1] << 8); idx += 2; int playerID = 0; if ((comp & 0x01) < 0x01) { playerID = ReadInt32(bulk, idx); idx += 4; } int time = 0; if ((comp & 0x06) < 0x06) { time = ReadInt32(bulk, idx); idx += 4; } int index = 0; if ((comp & 0x18) < 0x18) { index = ReadInt32(bulk, idx); idx += 4; } byte oldRaw = 0; if ((comp & 0x20) < 0x20) { oldRaw = bulk[idx]; idx++; } byte newRaw = 0; if ((comp & 0x40) < 0x40) { newRaw = bulk[idx]; idx++; } ushort flags = 0; if ((comp & 0x80) < 0x80) { flags = (ushort)(bulk[idx] | (bulk[idx + 1] << 8)); idx += 2; } for (int i = 0; i < count; i++) { switch (comp & 0x01) { case 0x00: ptr->PlayerID = playerID; break; default: ptr->PlayerID = ReadInt32(bulk, idx); idx += 4; break; } switch ((comp & 0x06) >> 1) { case 0: ptr->TimeDelta = time; break; case 1: ptr->TimeDelta = time + bulk[idx]; idx++; break; case 2: ptr->TimeDelta = time + (bulk[idx] | (bulk[idx + 1] << 8)); idx += 2; break; case 3: ptr->TimeDelta = ReadInt32(bulk, idx); idx += 4; break; } switch ((comp & 0x18) >> 3) { case 0: ptr->Index = index; break; case 1: ptr->Index = index + (sbyte)bulk[idx]; idx++; break; case 2: ptr->Index = index + (short)(bulk[idx] | (bulk[idx + 1] << 8)); idx += 2; break; case 3: ptr->Index = ReadInt32(bulk, idx); idx += 4; break; } switch (comp & 0x20) { case 0x00: ptr->OldRaw = oldRaw; break; default: ptr->OldRaw = bulk[idx]; idx++; break; } switch (comp & 0x40) { case 0x00: ptr->NewRaw = newRaw; break; default: ptr->NewRaw = bulk[idx]; idx++; break; } switch (comp & 0x80) { case 0x00: ptr->Flags = flags; break; default: ptr->Flags = (ushort)(bulk[idx] | (bulk[idx + 1] << 8)); idx += 2; break; } } return(count); }