void Updateue(string old_val, string val, int id, WriteBatchWithConstraints batch, TableInfo table_info, string name) { if (old_val != null && old_val == val) { return; } var key = MakeStringValueKey(new IndexKeyInfo() { ColumnNumber = table_info.ColumnNumbers[name], TableNumber = table_info.TableNumber, Id = id }); if (val != null) { var vb = Encoding.Unicode.GetBytes(val); byte[] br = new byte[1 + vb.Length]; br[0] = BinaryOrStringValuePrefix[0]; for (int i = 0; i < vb.Length; i++) { br[i + 1] = vb[i]; } batch.Put(key, br); } else { batch.Put(key, BinaryOrStringValuePrefix); } }
public void UpdateBatchIncrement <T>(UpdateInfo info, Dictionary <int, object> values, TableInfo table_info, LinqdbTransactionInternal trans) { var _write_lock = GetTableWriteLock(typeof(T).Name); if (trans == null) { lock (_write_lock) { using (WriteBatchWithConstraints batch = new WriteBatchWithConstraints()) { Dictionary <string, KeyValuePair <byte[], HashSet <int> > > string_cache = new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >(); Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = BuildMetaOnIndex(table_info); UpdateBatch(info, values, table_info, batch, string_cache, meta_index); WriteStringCacheToBatch(batch, string_cache, table_info, null); var snapshots_dic = InsertIndexChanges(table_info, meta_index); foreach (var snap in snapshots_dic) { var skey = MakeSnapshotKey(table_info.TableNumber, table_info.ColumnNumbers[snap.Key]); batch.Put(skey, Encoding.UTF8.GetBytes(snap.Value)); } leveld_db.Write(batch._writeBatch); } } } else { var type_name = typeof(T).Name; info.TableInfo = table_info; if (!trans.data_to_update.ContainsKey(type_name)) { trans.data_to_update[type_name] = new List <KeyValuePair <UpdateInfo, Dictionary <int, object> > >(); } trans.data_to_update[type_name].Add(new KeyValuePair <UpdateInfo, Dictionary <int, object> >(info, values)); } }
public void DeleteBatch(HashSet <int> ids, TableInfo table_info, WriteBatchWithConstraints batch, Dictionary <string, int> trans_count_cache, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > string_cache, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > memory_index_meta) { var existing_ids = new HashSet <int>(); var historic_columns = GetAllColumnsWithHistoric(table_info.TableNumber); foreach (var id in ids) { var key = MakeIndexKey(new IndexKeyInfo() { TableNumber = table_info.TableNumber, ColumnNumber = table_info.ColumnNumbers["Id"], Val = BitConverter.GetBytes(id).MyReverseNoCopy(), Id = id }); var index_val = leveld_db.Get(key); if (index_val == null) { continue; } existing_ids.Add(id); foreach (var column in historic_columns) { if (column.Item2 != LinqDbTypes.binary_ && column.Item2 != LinqDbTypes.string_) { var column_name = table_info.ColumnNumbers.Select(f => new { f.Key, f.Value }).Where(f => f.Value == column.Item1).FirstOrDefault(); IndexDeletedData index_deleted = null; if (column_name != null && memory_index_meta.ContainsKey(column_name.Key)) { index_deleted = memory_index_meta[column_name.Key].Item2; } DeleteDataColumn(batch, table_info.TableNumber, column.Item1, column.Item2, id, index_deleted); } else if (column.Item2 == LinqDbTypes.binary_) { DeleteBinaryColumn(batch, table_info.TableNumber, column.Item1, column.Item2, id); } else { DeleteStringColumn(batch, table_info.TableNumber, column.Item1, table_info, column.Item2, id, string_cache); } } } DecrementCount(table_info, existing_ids, batch, trans_count_cache); //WriteStringCacheToBatch(batch, string_cache, table_info, trans_phase_cache); }
void DeleteStringColumn(WriteBatchWithConstraints batch, short TableNumber, short ColumnNumber, TableInfo table_info, LinqDbTypes ColumnType, int id, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > cache) { var key_info = new IndexKeyInfo() { ColumnNumber = ColumnNumber, TableNumber = TableNumber, ColumnType = ColumnType, Id = id }; var value_key = MakeStringValueKey(key_info); var old_val = leveld_db.Get(value_key); string old_val_string = null; if (old_val != null && old_val.Length != 1) { old_val_string = Encoding.Unicode.GetString(old_val.Skip(1).ToArray()); } var index_key = MakeIndexKey(new IndexKeyInfo() { TableNumber = TableNumber, ColumnNumber = ColumnNumber, ColumnType = ColumnType, Val = (old_val == null || old_val.Length == 1) ? NullConstant : CalculateMD5Hash(old_val_string.ToLower(CultureInfo.InvariantCulture)), Id = id }); batch.Delete(index_key); batch.Delete(value_key); var column_name = table_info.ColumnNumbers.Select(f => new { f.Key, f.Value }).Where(f => f.Value == ColumnNumber).FirstOrDefault().Key; if (column_name.ToLower().EndsWith("search")) { UpdateIndex(old_val_string, null, id, batch, ColumnNumber, TableNumber, cache, false); } if (column_name.ToLower().EndsWith("searchs")) { UpdateIndex(old_val_string, null, id, batch, ColumnNumber, TableNumber, cache, true); } }
void DeleteBinaryColumn(WriteBatchWithConstraints batch, short TableNumber, short ColumnNumber, LinqDbTypes ColumnType, int id) { var key_info = new IndexKeyInfo() { ColumnNumber = ColumnNumber, TableNumber = TableNumber, ColumnType = ColumnType, Id = id }; var value_key = MakeBinaryValueKey(key_info); var old_val = leveld_db.Get(value_key); var index_key = MakeIndexKey(new IndexKeyInfo() { TableNumber = TableNumber, ColumnNumber = ColumnNumber, ColumnType = ColumnType, Val = (old_val == null || old_val.Length == 1) ? NullConstant : NotNullFiller, Id = id }); batch.Delete(index_key); batch.Delete(value_key); }
void UpdateIndex(string old_val, string new_val, int id, WriteBatchWithConstraints batch, short ColumnNumber, short TableNumber, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > cache, bool bySpacesOnly) { int phase = id / PhaseStep; if (old_val != null) { old_val = old_val.ToLower(CultureInfo.InvariantCulture); } if (new_val != null) { new_val = new_val.ToLower(CultureInfo.InvariantCulture); } if (old_val == new_val) { return; } var removed = GetRemovedWords(old_val, new_val, bySpacesOnly); foreach (var r in removed) { var kinfo = new IndexKeyInfo() { ColumnNumber = ColumnNumber, TableNumber = TableNumber, Id = id }; var key = MakeStringIndexKey(kinfo, r, phase); HashSet <int> old_index = null; if (GetFromStringIndexCache(kinfo, r, cache, phase, out old_index, 0)) { old_index.Add(id); PutToStringIndexCache(kinfo, r, key, old_index, cache, phase, 0); } else { //var val = leveld_db.Get(key); //if (val != null) //{ // old_index = ReadHashsetFromBytes(val); old_index = new HashSet <int>(); old_index.Add(id); PutToStringIndexCache(kinfo, r, key, old_index, cache, phase, 0); //} } } var new_words = GetAddedWords(old_val, new_val, bySpacesOnly); foreach (var n in new_words) { var kinfo = new IndexKeyInfo() { ColumnNumber = ColumnNumber, TableNumber = TableNumber, Id = id }; var key = MakeStringIndexKey(kinfo, n, phase); HashSet <int> old_index = null; if (GetFromStringIndexCache(kinfo, n, cache, phase, out old_index, 1)) { old_index.Add(id); PutToStringIndexCache(kinfo, n, key, old_index, cache, phase, 1); } else { //var val = leveld_db.Get(key); //if (val != null) //{ // old_index = ReadHashsetFromBytes(val); // old_index.Add(id); // PutToStringIndexCache(kinfo, n, key, old_index, cache, phase); //} //else //{ var index = new HashSet <int>(); index.Add(id); PutToStringIndexCache(kinfo, n, key, index, cache, phase, 1); //} } } }
public void Commit() { var ids = new Dictionary <string, HashSet <int> >(); var update_ids = new Dictionary <string, HashSet <int> >(); var locks = new List <string>(); locks.AddRange(data_to_save.Keys); locks.AddRange(data_to_update.Keys); locks.AddRange(data_to_delete.Keys); if (!locks.Any()) { return; } locks = locks.Distinct().OrderBy(f => f).ToList(); //ordering avoids deadlocks var key = locks.Aggregate((a, b) => a + "|" + b); //checks for this individual transaction correctness foreach (var save_list in data_to_save) { if (!ids.ContainsKey(save_list.Key)) { ids[save_list.Key] = new HashSet <int>(); } foreach (var item in save_list.Value.Value) { var id = Convert.ToInt32(item.GetType().GetProperty("Id").GetValue(item)); if (ids[save_list.Key].Contains(id)) { throw new LinqDbException("Linqdb: same entity cannot be modified twice in a transaction. " + save_list.Key + ", id " + id); } else { ids[save_list.Key].Add(id); } } } foreach (var update_list in data_to_update) { if (!ids.ContainsKey(update_list.Key)) { ids[update_list.Key] = new HashSet <int>(); } foreach (var update_field in update_list.Value) { if (!update_ids.ContainsKey(update_field.Key.TableInfo.Name + "|" + update_field.Key.ColumnName)) { update_ids[update_field.Key.TableInfo.Name + "|" + update_field.Key.ColumnName] = new HashSet <int>(); } foreach (var id in update_field.Value.Keys) { if (ids[update_field.Key.TableInfo.Name].Contains(id)) { throw new LinqDbException("Linqdb: same entity cannot be modified twice in a transaction. " + update_field.Key.TableInfo.Name + ", id " + id); } if (update_ids[update_field.Key.TableInfo.Name + "|" + update_field.Key.ColumnName].Contains(id)) { throw new LinqDbException("Linqdb: same entity's field cannot be updated twice in a transaction. " + update_field.Key.TableInfo.Name + ", field " + update_field.Key.ColumnName + ", id " + id); } else { update_ids[update_field.Key.TableInfo.Name + "|" + update_field.Key.ColumnName].Add(id); } } } } foreach (var updated in update_ids) { var name = updated.Key.Split("|".ToCharArray(), StringSplitOptions.RemoveEmptyEntries)[0].Trim(); if (!ids.ContainsKey(name)) { ids[name] = updated.Value; } else { ids[name].UnionWith(updated.Value); } } foreach (var delete_list in data_to_delete) { if (!ids.ContainsKey(delete_list.Key)) { ids[delete_list.Key] = new HashSet <int>(); } foreach (var id in delete_list.Value.Value) { if (ids[delete_list.Key].Contains(id)) { throw new LinqDbException("Linqdb: same entity cannot be modified twice in a transaction. " + delete_list.Key + ", id " + id); } else { ids[delete_list.Key].Add(id); } } } bool no = false; //should we include this transaction in a batch or not (no if we have intersecting ids) bool done = false; string error = null; var ilock = ModifyBatchTransaction.GetTableTransBatchLock(key); lock (ilock) { if (!ModifyBatchTransaction._trans_batch.ContainsKey(key)) { ModifyBatchTransaction._trans_batch[key] = new TransBatchData() { Callbacks = new List <Action <string> >(), Ids = ids }; ModifyBatchTransaction._trans_batch[key].data_to_delete = data_to_delete; ModifyBatchTransaction._trans_batch[key].data_to_save = data_to_save; ModifyBatchTransaction._trans_batch[key].data_to_update = data_to_update; } else { foreach (var tids in ModifyBatchTransaction._trans_batch[key].Ids) { if (tids.Value.Intersect(ids[tids.Key]).Any()) { no = true; break; } } if (!no) { foreach (var tids in ModifyBatchTransaction._trans_batch[key].Ids) { tids.Value.UnionWith(ids[tids.Key]); } //merge foreach (var del in ModifyBatchTransaction._trans_batch[key].data_to_delete) { if (data_to_delete.ContainsKey(del.Key)) { del.Value.Value.UnionWith(data_to_delete[del.Key].Value); } } foreach (var del in data_to_delete) { if (!ModifyBatchTransaction._trans_batch[key].data_to_delete.ContainsKey(del.Key)) { ModifyBatchTransaction._trans_batch[key].data_to_delete[del.Key] = del.Value; } } foreach (var sav in ModifyBatchTransaction._trans_batch[key].data_to_save) { if (data_to_save.ContainsKey(sav.Key)) { sav.Value.Value.AddRange(data_to_save[sav.Key].Value); } } foreach (var sav in data_to_save) { if (!ModifyBatchTransaction._trans_batch[key].data_to_save.ContainsKey(sav.Key)) { ModifyBatchTransaction._trans_batch[key].data_to_save[sav.Key] = sav.Value; } } foreach (var upd in ModifyBatchTransaction._trans_batch[key].data_to_update) { if (data_to_update.ContainsKey(upd.Key)) { foreach (var colmn in data_to_update[upd.Key]) { if (upd.Value.Any(f => f.Key.ColumnName == colmn.Key.ColumnName)) { var dic = upd.Value.First(f => f.Key.ColumnName == colmn.Key.ColumnName).Value; foreach (var cv in colmn.Value) { dic[cv.Key] = cv.Value; } } else { upd.Value.Add(colmn); } } } } foreach (var upd in data_to_update) { if (!ModifyBatchTransaction._trans_batch[key].data_to_update.ContainsKey(upd.Key)) { ModifyBatchTransaction._trans_batch[key].data_to_update[upd.Key] = upd.Value; } } } } if (!no) { ModifyBatchTransaction._trans_batch[key].Callbacks.Add(f => { done = true; error = f; }); } } if (!no) { Dictionary <string, int> trans_count_cache = new Dictionary <string, int>(); bool lockAcquired = false; int maxWaitMs = 60000; var _write_locks = new List <object>(); TransBatchData _trans_data = null; try { DateTime start = DateTime.Now; while (!done) { var flock = locks.First(); var _write_lock = ldb.GetTableWriteLock(flock); lockAcquired = Monitor.TryEnter(_write_lock, 0); if (lockAcquired) { if (done) { Monitor.Exit(_write_lock); lockAcquired = false; break; } else { _write_locks.Add(_write_lock); foreach (var l in locks.Skip(1).ToList()) { var write_lock = ldb.GetTableWriteLock(l); Monitor.Enter(write_lock); _write_locks.Add(write_lock); } break; } } Thread.Sleep(250); //if ((DateTime.Now - start).TotalMilliseconds > maxWaitMs) //{ // throw new LinqDbException("Linqdb: Commit waited too long to acquire transaction write lock. Is the load too high?"); //} } if (done) { if (!string.IsNullOrEmpty(error)) { throw new LinqDbException(error); } else { return; } } //not done, but have write lock for the table lock (ilock) { _trans_data = ModifyBatchTransaction._trans_batch[key]; var oval = new TransBatchData(); ModifyBatchTransaction._trans_batch.TryRemove(key, out oval); } using (WriteBatchWithConstraints batch = new WriteBatchWithConstraints()) { //string cache var sc = new Dictionary <string, KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > > >(); var index_data = new List <Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > > >(); foreach (var save_list in _trans_data.data_to_save) { if (!sc.ContainsKey(save_list.Value.Key.Name)) { sc[save_list.Value.Key.Name] = new KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > >(save_list.Value.Key, new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >()); } Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = ldb.BuildMetaOnIndex(save_list.Value.Key); ldb.SaveItems(save_list.Value.Key, save_list.Key, save_list.Value.Value, batch, trans_count_cache, sc[save_list.Value.Key.Name].Value, meta_index, true); index_data.Add(new Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > >(save_list.Value.Key, meta_index)); } foreach (var update_list in _trans_data.data_to_update) { foreach (var update_field in update_list.Value) { if (!sc.ContainsKey(update_field.Key.TableInfo.Name)) { sc[update_field.Key.TableInfo.Name] = new KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > >(update_field.Key.TableInfo, new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >()); } Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = ldb.BuildMetaOnIndex(update_field.Key.TableInfo); ldb.UpdateBatch(update_field.Key, update_field.Value, update_field.Key.TableInfo, batch, sc[update_field.Key.TableInfo.Name].Value, meta_index); var snapshots_dic = ldb.InsertIndexChanges(update_field.Key.TableInfo, meta_index); index_data.Add(new Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > >(update_field.Key.TableInfo, meta_index)); } } foreach (var delete_list in _trans_data.data_to_delete) { if (!sc.ContainsKey(delete_list.Value.Key.Name)) { sc[delete_list.Value.Key.Name] = new KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > >(delete_list.Value.Key, new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >()); } Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = ldb.BuildMetaOnIndex(delete_list.Value.Key); ldb.DeleteBatch(delete_list.Value.Value, delete_list.Value.Key, batch, trans_count_cache, sc[delete_list.Value.Key.Name].Value, meta_index); index_data.Add(new Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > >(delete_list.Value.Key, meta_index)); } if (trans_count_cache != null && trans_count_cache.Any()) { ldb.FlushTransCountCache(trans_count_cache, batch); } foreach (var scache in sc) { ldb.WriteStringCacheToBatch(batch, scache.Value.Value, scache.Value.Key, null); } foreach (var idata in index_data) { var snapshots_dic = ldb.InsertIndexChanges(idata.Item1, idata.Item2); foreach (var snap in snapshots_dic) { var skey = ldb.MakeSnapshotKey(idata.Item1.TableNumber, idata.Item1.ColumnNumbers[snap.Key]); batch.Put(skey, Encoding.UTF8.GetBytes(snap.Value)); } } ldb.leveld_db.Write(batch._writeBatch); } foreach (var cb in _trans_data.Callbacks) { cb(null); } } catch (Exception ex) { if (_trans_data != null) { foreach (var cb in _trans_data.Callbacks) { cb(ex.Message); } } throw; } finally { if (lockAcquired) { foreach (var l in _write_locks) { Monitor.Exit(l); } } } } else { //the old way Dictionary <string, int> trans_count_cache = new Dictionary <string, int>(); var _write_locks = new List <object>(); try { foreach (var l in locks) { var write_lock = ldb.GetTableWriteLock(l); Monitor.Enter(write_lock); _write_locks.Add(write_lock); } using (WriteBatchWithConstraints batch = new WriteBatchWithConstraints()) { //string cache var sc = new Dictionary <string, KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > > >(); var index_data = new List <Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > > >(); foreach (var save_list in data_to_save) { if (!sc.ContainsKey(save_list.Value.Key.Name)) { sc[save_list.Value.Key.Name] = new KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > >(save_list.Value.Key, new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >()); } Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = ldb.BuildMetaOnIndex(save_list.Value.Key); ldb.SaveItems(save_list.Value.Key, save_list.Key, save_list.Value.Value, batch, trans_count_cache, sc[save_list.Value.Key.Name].Value, meta_index, true); index_data.Add(new Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > >(save_list.Value.Key, meta_index)); } foreach (var update_list in data_to_update) { foreach (var update_field in update_list.Value) { if (!sc.ContainsKey(update_field.Key.TableInfo.Name)) { sc[update_field.Key.TableInfo.Name] = new KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > >(update_field.Key.TableInfo, new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >()); } Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = ldb.BuildMetaOnIndex(update_field.Key.TableInfo); ldb.UpdateBatch(update_field.Key, update_field.Value, update_field.Key.TableInfo, batch, sc[update_field.Key.TableInfo.Name].Value, meta_index); var snapshots_dic = ldb.InsertIndexChanges(update_field.Key.TableInfo, meta_index); index_data.Add(new Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > >(update_field.Key.TableInfo, meta_index)); } } foreach (var delete_list in data_to_delete) { if (!sc.ContainsKey(delete_list.Value.Key.Name)) { sc[delete_list.Value.Key.Name] = new KeyValuePair <TableInfo, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > >(delete_list.Value.Key, new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >()); } Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = ldb.BuildMetaOnIndex(delete_list.Value.Key); ldb.DeleteBatch(delete_list.Value.Value, delete_list.Value.Key, batch, trans_count_cache, sc[delete_list.Value.Key.Name].Value, meta_index); index_data.Add(new Tuple <TableInfo, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > >(delete_list.Value.Key, meta_index)); } if (trans_count_cache != null && trans_count_cache.Any()) { ldb.FlushTransCountCache(trans_count_cache, batch); } foreach (var scache in sc) { ldb.WriteStringCacheToBatch(batch, scache.Value.Value, scache.Value.Key, null); } foreach (var idata in index_data) { var snapshots_dic = ldb.InsertIndexChanges(idata.Item1, idata.Item2); foreach (var snap in snapshots_dic) { var skey = ldb.MakeSnapshotKey(idata.Item1.TableNumber, idata.Item1.ColumnNumbers[snap.Key]); batch.Put(skey, Encoding.UTF8.GetBytes(snap.Value)); } } ldb.leveld_db.Write(batch._writeBatch); } } finally { foreach (var l in _write_locks) { Monitor.Exit(l); } } } }
public void DeleteBatch <T>(HashSet <int> ids, LinqdbTransactionInternal trans) { var _write_lock = GetTableWriteLock(typeof(T).Name); if (trans == null) { var table_info = GetTableInfo(typeof(T).Name); bool done = false; string error = null; var ilock = ModifyBatch.GetTableDeleteBatchLock(table_info.Name); lock (ilock) { if (!ModifyBatch._delete_batch.ContainsKey(table_info.Name)) { ModifyBatch._delete_batch[table_info.Name] = new DeleteData() { Callbacks = new List <Action <string> >(), ids = ids }; } else { ModifyBatch._delete_batch[table_info.Name].ids.UnionWith(ids); } ModifyBatch._delete_batch[table_info.Name].Callbacks.Add(f => { done = true; error = f; }); } bool lockAcquired = false; int maxWaitMs = 60000; DeleteData _delete_data = null; try { DateTime start = DateTime.Now; while (!done) { lockAcquired = Monitor.TryEnter(_write_lock, 0); if (lockAcquired) { if (done) { Monitor.Exit(_write_lock); lockAcquired = false; break; } else { break; } } Thread.Sleep(250); //if ((DateTime.Now - start).TotalMilliseconds > maxWaitMs) //{ // throw new LinqDbException("Linqdb: Delete waited too long to acquire write lock. Is the load too high?"); //} } if (done) { if (!string.IsNullOrEmpty(error)) { throw new LinqDbException(error); } else { return; } } //not done, but have write lock for the table lock (ilock) { _delete_data = ModifyBatch._delete_batch[table_info.Name]; var oval = new DeleteData(); ModifyBatch._delete_batch.TryRemove(table_info.Name, out oval); } if (_delete_data.ids.Any()) { using (WriteBatchWithConstraints batch = new WriteBatchWithConstraints()) { Dictionary <string, KeyValuePair <byte[], HashSet <int> > > string_cache = new Dictionary <string, KeyValuePair <byte[], HashSet <int> > >(); Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > meta_index = BuildMetaOnIndex(table_info); DeleteBatch(_delete_data.ids, table_info, batch, null, string_cache, meta_index); WriteStringCacheToBatch(batch, string_cache, table_info, null); var snapshots_dic = InsertIndexChanges(table_info, meta_index); foreach (var snap in snapshots_dic) { var skey = MakeSnapshotKey(table_info.TableNumber, table_info.ColumnNumbers[snap.Key]); batch.Put(skey, Encoding.UTF8.GetBytes(snap.Value)); } leveld_db.Write(batch._writeBatch); } } foreach (var cb in _delete_data.Callbacks) { cb(null); } } catch (Exception ex) { if (_delete_data != null) { var additionalInfo = ex.Message; if (_delete_data.Callbacks.Count() > 1) { additionalInfo += " This error could belong to another entity which happened to be in the same batch."; } foreach (var cb in _delete_data.Callbacks) { cb(additionalInfo); } } throw; } finally { if (lockAcquired) { Monitor.Exit(_write_lock); } } } else { var type_name = typeof(T).Name; var table_info = GetTableInfo(type_name); if (!trans.data_to_delete.ContainsKey(type_name)) { trans.data_to_delete[type_name] = new KeyValuePair <TableInfo, HashSet <int> >(table_info, new HashSet <int>()); } trans.data_to_delete[type_name].Value.UnionWith(ids); } }
void DeleteDataColumn(WriteBatchWithConstraints batch, short TableNumber, short ColumnNumber, LinqDbTypes ColumnType, int id, IndexDeletedData index_deleted) { var key_info = new IndexKeyInfo() { ColumnNumber = ColumnNumber, TableNumber = TableNumber, ColumnType = ColumnType, Id = id }; var value_key = MakeValueKey(key_info); var old_val = leveld_db.Get(value_key); if (old_val == null) //maybe new column added and delete invoked { return; } bool is_old_negative = false; if (ValsEqual(old_val, NullConstant)) { is_old_negative = false; } else if (ColumnType == LinqDbTypes.double_ && BitConverter.ToDouble(old_val.MyReverseWithCopy(), 0) < 0) { is_old_negative = true; old_val = BitConverter.GetBytes((BitConverter.ToDouble(old_val.MyReverseWithCopy(), 0) * -1)).MyReverseNoCopy(); } else if (ColumnType == LinqDbTypes.int_ && BitConverter.ToInt32(old_val.MyReverseWithCopy(), 0) < 0) { is_old_negative = true; old_val = BitConverter.GetBytes((BitConverter.ToInt32(old_val.MyReverseWithCopy(), 0) * -1)).MyReverseNoCopy(); } byte[] index_key = null; if (is_old_negative) { index_key = MakeIndexKey(new IndexKeyInfo() { TableNumber = TableNumber, ColumnNumber = (short)(-1 * ColumnNumber), ColumnType = ColumnType, Val = old_val, Id = id }); } else { index_key = MakeIndexKey(new IndexKeyInfo() { TableNumber = TableNumber, ColumnNumber = ColumnNumber, ColumnType = ColumnType, Val = old_val, Id = id }); } batch.Delete(value_key); batch.Delete(index_key); //keep up the memory index, if there is one if (index_deleted != null) { index_deleted.Ids.Add(id); } }
public void UpdateBatch(UpdateInfo info, Dictionary <int, object> values, TableInfo table_info, WriteBatchWithConstraints batch, Dictionary <string, KeyValuePair <byte[], HashSet <int> > > string_cache, Dictionary <string, Tuple <IndexNewData, IndexDeletedData, IndexChangedData> > memory_index_meta) { foreach (var item in values) { var key = MakeIndexKey(new IndexKeyInfo() { TableNumber = info.TableNumber, ColumnNumber = table_info.ColumnNumbers["Id"], Val = BitConverter.GetBytes(item.Key).MyReverseNoCopy(), Id = item.Key }); var index_val = leveld_db.Get(key); if (index_val == null) { continue; } object value = item.Value; if (info.ColumnType == LinqDbTypes.string_) { SaveStringData(batch, (string)item.Value, info.ColumnName, info.TableInfo, item.Key, string_cache, false); } else if (info.ColumnType == LinqDbTypes.binary_) { SaveBinaryColumn(batch, value, info.ColumnName, info.TableInfo, item.Key, false); } else { IndexDeletedData index_deleted = null; IndexNewData index_new = null; IndexChangedData index_changed = null; if (memory_index_meta.ContainsKey(info.ColumnName)) { index_deleted = memory_index_meta[info.ColumnName].Item2; index_new = memory_index_meta[info.ColumnName].Item1; index_changed = memory_index_meta[info.ColumnName].Item3; } SaveDataColumn(batch, value, info.ColumnName, info.ColumnType, info.TableInfo, item.Key, false, index_new, index_changed); } } //WriteStringCacheToBatch(batch, string_cache, table_info, null); }