/// <summary> /// Save Record to Storage and save index /// </summary> /// <param name="key"></param> /// <param name="doc"></param> /// <returns></returns> public int SetObject(T key, object doc) { int recno = -1; recno = (int)_archive.WriteObject(key, doc); _index.Set(key, recno); return(recno); }
public int Set(T key, byte[] data) { int recno = -1; // save to storage recno = _archive.WriteData(key, data, false); // save to index _index.Set(key, recno); return(recno); }
public int SetObject(T key, object doc) { int recno = -1; // save to storage recno = (int)_archive.WriteObject(key, doc); // save to index _index.Set(key, recno); return(recno); }
public bool Set(T key, byte[] data) { int recno = -1; lock (_lock) { // save to storage recno = _archive.WriteData(key, data, false); // save to index _index.Set(key, recno); //_Count++; } return(true); }
private void SaveNew(string key, byte[] keybytes, object obj) { byte[] data; AllocationBlock ab = new AllocationBlock(); ab.key = key; ab.keylen = (byte)keybytes.Length; data = fastBinaryJSON.BJSON.ToBJSON(obj); ab.isBinaryJSON = true; if (data.Length > (int)Global.CompressDocumentOverKiloBytes * _KILOBYTE) { ab.isCompressed = true; data = MiniLZO.Compress(data); } ab.datalength = data.Length; int firstblock = internalSave(keybytes, data, ab); // save keys _keys.Set(key, firstblock); }
private void RebuildDataFiles() { MGIndex <string> keys = null; try { // remove old free list if (File.Exists(_Path + "data.bmp")) { File.Delete(_Path + "data.bmp"); } _datastore = new StorageFileHF(_Path + "data.mghf", Global.HighFrequencyKVDiskBlockSize); _BlockSize = _datastore.GetBlockSize(); if (File.Exists(_Path + "keys.idx")) { _log.Debug("removing old keys index"); foreach (var f in Directory.GetFiles(_Path, "keys.*")) { File.Delete(f); } } keys = new MGIndex <string>(_Path, "keys.idx", 255, /*Global.PageItemCount,*/ false); WAHBitArray visited = new WAHBitArray(); int c = _datastore.NumberofBlocks(); for (int i = 0; i < c; i++) // go through blocks { if (visited.Get(i)) { continue; } byte[] b = _datastore.ReadBlockBytes(i, _blockheader.Length + 255); int bnum = Helper.ToInt32(b, 0); if (bnum > 0) // check if a start block { visited.Set(i, true); _datastore.FreeBlock(i); // mark as free continue; } AllocationBlock ab = new AllocationBlock(); // start block found int blocknumexpected = 0; int next = ParseBlockHeader(ab, b, blocknumexpected); int last = 0; bool freelast = false; AllocationBlock old = null; if (keys.Get(ab.key, out last)) { old = this.FillAllocationBlock(last); freelast = true; } blocknumexpected++; bool failed = false; if (ab.deleteKey == false) { while (next > 0) // read the blocks { ab.Blocks.Add(next); b = _datastore.ReadBlockBytes(next, _blockheader.Length + ab.keylen); next = ParseBlockHeader(ab, b, blocknumexpected); if (next == -1) // non matching block { failed = true; break; } blocknumexpected++; } } else { failed = true; keys.RemoveKey(ab.key); } // new data ok if (failed == false) { keys.Set(ab.key, ab.blocknumber); // valid block found if (freelast) // free the old blocks { _datastore.FreeBlocks(old.Blocks); } } visited.Set(i, true); } // all ok delete temp.$ file if (File.Exists(_Path + _dirtyFilename)) { File.Delete(_Path + _dirtyFilename); } } catch (Exception ex) { _log.Error(ex); } finally { _log.Debug("Shutting down files and index"); _datastore.Shutdown(); keys.SaveIndex(); keys.Shutdown(); } }