/// <summary> /// Rewrite the entire BTree as a transaction to include the provided items. This method is Thread safe. /// If the input is already sorted, use BulkInsertOptions overload to specify InputIsSorted = true. /// </summary> public int BulkInsert(IEnumerable <KeyValuePair <TKey, TValue> > items, BulkInsertOptions bulkOptions) { NodePin oldRoot = null; if (bulkOptions.InputIsSorted == false) { KeyValueSerializer <TKey, TValue> kvserializer = new KeyValueSerializer <TKey, TValue>(_options.KeySerializer, _options.ValueSerializer); items = new OrderedKeyValuePairs <TKey, TValue>(_options.KeyComparer, items, kvserializer) { DuplicateHandling = bulkOptions.DuplicateHandling }; } List <IStorageHandle> handles = new List <IStorageHandle>(); try { int counter = 0; using (RootLock root = LockRoot(LockType.Insert, "Merge", false)) { if (root.Pin.Ptr.Count != 1) { throw new InvalidDataException(); } NodeHandle oldRootHandle = root.Pin.Ptr[0].ChildNode; oldRoot = _storage.Lock(root.Pin, oldRootHandle); if (oldRoot.Ptr.Count == 0 || bulkOptions.ReplaceContents) { // Currently empty, so just enforce duplicate keys... items = OrderedKeyValuePairs <TKey, TValue> .WithDuplicateHandling(items, new KeyValueComparer <TKey, TValue>(_options.KeyComparer), bulkOptions.DuplicateHandling); } else { // Merging with existing data and enforce duplicate keys... items = OrderedKeyValuePairs <TKey, TValue> .Merge(_options.KeyComparer, bulkOptions.DuplicateHandling, EnumerateNodeContents(oldRoot), items); } Node newtree = BulkWrite(handles, ref counter, items); if (newtree == null) // null when enumeration was empty { return(0); } using (NodeTransaction trans = _storage.BeginTransaction()) { Node rootNode = trans.BeginUpdate(root.Pin); rootNode.ReplaceChild(0, oldRootHandle, new NodeHandle(newtree.StorageHandle)); trans.Commit(); } _count = counter; } //point of no return... handles.Clear(); DeleteTree(oldRoot); oldRoot = null; if (bulkOptions.CommitOnCompletion) { //Since transaction logs do not deal with bulk-insert, we need to commit our current state Commit(); } return(counter); } catch { if (oldRoot != null) { oldRoot.Dispose(); } foreach (IStorageHandle sh in handles) { try { _storage.Storage.Destroy(sh); } catch (ThreadAbortException) { throw; } catch { continue; } } throw; } }
/// <summary> /// Recovers as much file content as possible into a newly created <see cref="BPlusTree{TKey, TValue}"/>, if the operation returns /// a non-zero result it was successful and the file has been replaced with a new database containing /// the recovered data. The original file remains in-tact but was renamed with a '.deleted' extension. /// </summary> /// <remarks> /// If an exception occurs during the parsing of the file and one or more records were recovered, they will /// be stored in a file by the same name with an added extension of '.recovered'. This recovered file can be /// opened as a normal <see cref="BPlusTree{TKey, TValue}"/> to view it's contents. During the restore it is possible that /// a single Key was found multiple times, in this case the first occurrence found will be used. /// </remarks> /// <param name="options"> The options normally used to create the <see cref="BPlusTree{TKey, TValue}"/> instance </param> /// <returns>Returns 0 on failure, or the number of records successfully retrieved from the original file </returns> public static int RecoverFile(Options options) { int recoveredCount = 0; string filename = options.FileName; if (String.IsNullOrEmpty(filename)) { throw new InvalidConfigurationValueException("FileName", "The FileName property was not specified."); } if (!File.Exists(filename)) { throw new InvalidConfigurationValueException("FileName", "The FileName specified does not exist."); } if (options.StorageType != StorageType.Disk) { throw new InvalidConfigurationValueException("StorageType", "The storage type is not set to 'Disk'."); } int ix = 0; string tmpfilename = filename + ".recovered"; while (File.Exists(tmpfilename)) { tmpfilename = filename + ".recovered" + ix++; } try { BPlusTreeOptions <TKey, TValue> tmpoptions = options.Clone(); tmpoptions.CreateFile = CreatePolicy.Always; tmpoptions.FileName = tmpfilename; tmpoptions.LockingFactory = new LockFactory <IgnoreLocking>(); using (BPlusTree <TKey, TValue> tmpFile = new BPlusTree <TKey, TValue>(tmpoptions)) { BulkInsertOptions bulkOptions = new BulkInsertOptions(); bulkOptions.DuplicateHandling = DuplicateHandling.LastValueWins; recoveredCount = tmpFile.BulkInsert(RecoveryScan(options, FileShare.None), bulkOptions); } } finally { if (recoveredCount == 0 && File.Exists(tmpfilename)) { File.Delete(tmpfilename); } } if (recoveredCount > 0) { ix = 0; string backupName = filename + ".deleted"; while (File.Exists(backupName)) { backupName = filename + ".deleted" + ix++; } File.Move(filename, backupName); try { File.Move(tmpfilename, filename); } catch { File.Move(backupName, filename); throw; } } return(recoveredCount); }
public BPlusTree(BPlusTreeOptions <TKey, TValue> ioptions) { bool fileExists = ioptions.StorageType == StorageType.Disk && ioptions.CreateFile != CreatePolicy.Always && File.Exists(ioptions.FileName) && new FileInfo(ioptions.FileName).Length > 0; _options = ioptions.Clone(); _selfLock = _options.CallLevelLock; _keyComparer = _options.KeyComparer; _itemComparer = new ElementComparer(_keyComparer); switch (_options.CachePolicy) { case CachePolicy.All: _storage = new NodeCacheFull(_options); break; case CachePolicy.Recent: _storage = new NodeCacheNormal(_options); break; case CachePolicy.None: _storage = new NodeCacheNone(_options); break; default: throw new InvalidConfigurationValueException("CachePolicy"); } try { _storage.Load(); } catch { _storage.Dispose(); throw; } if (_options.LogFile != null && !_options.ReadOnly) { if (_options.ExistingLogAction == ExistingLogAction.Truncate || _options.ExistingLogAction == ExistingLogAction.Default && !fileExists) { _options.LogFile.TruncateLog(); } else if (_options.LogFile.Size > 0 && ( _options.ExistingLogAction == ExistingLogAction.Replay || _options.ExistingLogAction == ExistingLogAction.ReplayAndCommit || (_options.ExistingLogAction == ExistingLogAction.Default && fileExists) )) { bool commit = (_options.ExistingLogAction == ExistingLogAction.ReplayAndCommit || (_options.ExistingLogAction == ExistingLogAction.Default && fileExists)); bool merge = false; if (_options.StorageType == StorageType.Disk) { merge = new FileInfo(_options.FileName).Length < _options.LogFile.Size; } if (merge) // log data is larger than we are... { BulkInsertOptions opts = new BulkInsertOptions(); opts.CommitOnCompletion = commit; opts.DuplicateHandling = DuplicateHandling.LastValueWins; opts.InputIsSorted = true; opts.ReplaceContents = true; BulkInsert( _options.LogFile.MergeLog(_options.KeyComparer, File.Exists(ioptions.FileName) ? EnumerateFile(ioptions) : new KeyValuePair <TKey, TValue> [0]), opts ); } else { _options.LogFile.ReplayLog(this); if (commit) //Now commit the recovered changes { Commit(); } } } } var nodeStoreWithCount = _storage.Storage as INodeStoreWithCount; if (nodeStoreWithCount != null) { _count = nodeStoreWithCount.Count; _hasCount = _count >= 0; } }