public void AddTask(DatabaseTask task, DateTime addedAt) { var tasksByType = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByType); var tasksByIndex = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByIndex); var tasksByIndexAndType = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByIndexAndType); var type = task.GetType().FullName; var index = task.Index; var id = generator.CreateSequentialUuid(UuidType.Tasks); var idAsString = id.ToString(); tableStorage.Tasks.Add( writeBatch.Value, idAsString, new RavenJObject { { "index", index }, { "id", id.ToByteArray() }, { "time", addedAt }, { "type", type }, { "task", task.AsBytes() } }, 0); tasksByType.MultiAdd(writeBatch.Value, CreateKey(type), idAsString); tasksByIndex.MultiAdd(writeBatch.Value, CreateKey(index), idAsString); tasksByIndexAndType.MultiAdd(writeBatch.Value, CreateKey(index, type), idAsString); }
public void AddTask(DatabaseTask task, DateTime addedAt) { var tasksByType = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByType); var tasksByIndex = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByIndex); var tasksByIndexAndType = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByIndexAndType); var type = task.GetType().FullName; var index = task.Index; var id = generator.CreateSequentialUuid(UuidType.Tasks); var idAsString = (Slice)id.ToString(); var taskStructure = new Structure <TaskFields>(tableStorage.Tasks.Schema) .Set(TaskFields.IndexId, index) .Set(TaskFields.TaskId, id.ToByteArray()) .Set(TaskFields.AddedAt, addedAt.ToBinary()) .Set(TaskFields.Type, type) .Set(TaskFields.SerializedTask, task.AsBytes()); tableStorage.Tasks.AddStruct(writeBatch.Value, idAsString, taskStructure, 0); var indexKey = CreateKey(index); tasksByType.MultiAdd(writeBatch.Value, (Slice)CreateKey(type), idAsString); tasksByIndex.MultiAdd(writeBatch.Value, (Slice)indexKey, idAsString); tasksByIndexAndType.MultiAdd(writeBatch.Value, (Slice)AppendToKey(indexKey, type), idAsString); }
private void MergeSimilarTasks(DatabaseTask task, Etag taskId, Action <IComparable> updateMaxTaskId) { var type = task.GetType().FullName; var tasksByIndexAndType = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByIndexAndType); using (var iterator = tasksByIndexAndType.MultiRead(Snapshot, (Slice)CreateKey(task.Index, type))) { if (!iterator.Seek(Slice.BeforeAllKeys)) { return; } var totalKeysToProcess = task.NumberOfKeys; do { if (totalKeysToProcess >= 5 * 1024) { break; } var currentId = Etag.Parse(iterator.CurrentKey.ToString()); // this is the same task that we are trying to merge if (currentId == taskId) { continue; } ushort version; var value = LoadStruct(tableStorage.Tasks, iterator.CurrentKey, writeBatch.Value, out version); if (value == null) { continue; } DatabaseTask existingTask; try { existingTask = DatabaseTask.ToTask(value.ReadString(TaskFields.Type), value.ReadBytes(TaskFields.SerializedTask)); } catch (Exception e) { Logger.ErrorException(string.Format("Could not create instance of a task: {0}", value), e); RemoveTask(iterator.CurrentKey, task.Index, type); continue; } updateMaxTaskId(currentId); totalKeysToProcess += existingTask.NumberOfKeys; task.Merge(existingTask); RemoveTask(iterator.CurrentKey, task.Index, type); } while (iterator.MoveNext()); } }
private void MergeSimilarTasks(DatabaseTask task, byte[] taskId) { var id = Etag.Parse(taskId); var type = task.GetType().FullName; var tasksByIndexAndType = tableStorage.Tasks.GetIndex(Tables.Tasks.Indices.ByIndexAndType); using (var iterator = tasksByIndexAndType.MultiRead(Snapshot, (Slice)CreateKey(task.Index, type))) { if (!iterator.Seek(Slice.BeforeAllKeys)) { return; } int totalTaskCount = 0; do { var currentId = Etag.Parse(iterator.CurrentKey.ToString()); if (currentId == id) { continue; } ushort version; var value = LoadStruct(tableStorage.Tasks, iterator.CurrentKey, writeBatch.Value, out version); if (value == null) { continue; } DatabaseTask existingTask; try { existingTask = DatabaseTask.ToTask(value.ReadString(TaskFields.Type), value.ReadBytes(TaskFields.SerializedTask)); } catch (Exception e) { Logger.ErrorException( string.Format("Could not create instance of a task: {0}", value), e); RemoveTask(iterator.CurrentKey, task.Index, type); continue; } task.Merge(existingTask); RemoveTask(iterator.CurrentKey, task.Index, type); if (totalTaskCount++ > 1024) { break; } }while (iterator.MoveNext()); } }
public void AddTask(DatabaseTask task, DateTime addedAt) { int actualBookmarkSize; var bookmark = new byte[bookmarkMost]; using (var update = new Update(session, Tasks, JET_prep.Insert)) { Api.SetColumn(session, Tasks, tableColumnsCache.TasksColumns["task"], task.AsBytes()); Api.SetColumn(session, Tasks, tableColumnsCache.TasksColumns["for_index"], task.Index); Api.SetColumn(session, Tasks, tableColumnsCache.TasksColumns["task_type"], task.GetType().FullName, Encoding.ASCII); Api.SetColumn(session, Tasks, tableColumnsCache.TasksColumns["added_at"], addedAt.ToBinary()); update.Save(bookmark, bookmark.Length, out actualBookmarkSize); } Api.JetGotoBookmark(session, Tasks, bookmark, actualBookmarkSize); }
private void MergeSimilarTasks(DatabaseTask task, HashSet <IComparable> alreadySeen, List <int> indexesToSkip, int[] allIndexes) { var expectedTaskType = task.GetType().FullName; if (task.SeparateTasksByIndex) { Api.JetSetCurrentIndex(session, Tasks, "by_index_and_task_type"); Api.MakeKey(session, Tasks, task.Index, MakeKeyGrbit.NewKey); Api.MakeKey(session, Tasks, expectedTaskType, Encoding.ASCII, MakeKeyGrbit.None); if (Api.TrySeek(session, Tasks, SeekGrbit.SeekEQ) == false) { // there are no tasks matching the current one, just return return; } Api.MakeKey(session, Tasks, task.Index, MakeKeyGrbit.NewKey); Api.MakeKey(session, Tasks, expectedTaskType, Encoding.ASCII, MakeKeyGrbit.None); Api.JetSetIndexRange(session, Tasks, SetIndexRangeGrbit.RangeInclusive | SetIndexRangeGrbit.RangeUpperLimit); } else { Api.JetSetCurrentIndex(session, Tasks, "by_task_type"); Api.MakeKey(session, Tasks, expectedTaskType, Encoding.ASCII, MakeKeyGrbit.NewKey); if (Api.TrySeek(session, Tasks, SeekGrbit.SeekEQ) == false) { // there are no tasks matching the current one, just return return; } Api.MakeKey(session, Tasks, expectedTaskType, Encoding.ASCII, MakeKeyGrbit.NewKey); Api.JetSetIndexRange(session, Tasks, SetIndexRangeGrbit.RangeInclusive | SetIndexRangeGrbit.RangeUpperLimit); } var totalKeysToProcess = task.NumberOfKeys; do { if (totalKeysToProcess >= 5 * 1024) { break; } var taskType = Api.RetrieveColumnAsString(session, Tasks, tableColumnsCache.TasksColumns["task_type"], Encoding.ASCII); // esent index ranges are approximate, and we need to check them ourselves as well if (taskType != expectedTaskType) { //this shouldn't happen logger.Warn("Tasks type mismatch: expected task type: {0}, current task type: {1}", expectedTaskType, taskType); continue; } var currentId = Api.RetrieveColumnAsInt32(session, Tasks, tableColumnsCache.TasksColumns["id"]).Value; var index = Api.RetrieveColumnAsInt32(session, Tasks, tableColumnsCache.TasksColumns["for_index"]).Value; if (task.SeparateTasksByIndex == false && indexesToSkip.Contains(index)) { //need to check this only when not separating tasks by index if (logger.IsDebugEnabled) { logger.Debug("Skipping task id: {0} for index id: {1}", currentId, index); } continue; } if (alreadySeen.Add(currentId) == false) { continue; } if (task.SeparateTasksByIndex == false && allIndexes.Contains(index) == false) { //need to check this only when not separating tasks by index if (logger.IsDebugEnabled) { logger.Debug("Skipping task id: {0} for non existing index id: {0}", currentId, index); } continue; } var taskAsBytes = Api.RetrieveColumn(session, Tasks, tableColumnsCache.TasksColumns["task"]); DatabaseTask existingTask; try { existingTask = DatabaseTask.ToTask(taskType, taskAsBytes); } catch (Exception e) { logger.ErrorException( string.Format("Could not create instance of a task: {0}", taskAsBytes), e); alreadySeen.Add(currentId); continue; } totalKeysToProcess += existingTask.NumberOfKeys; task.Merge(existingTask); if (logger.IsDebugEnabled) { logger.Debug("Merged task id: {0} with task id: {1}", currentId, task.Id); } } while (Api.TryMoveNext(session, Tasks)); }
public void MergeSimilarTasks(DatabaseTask task) { var expectedTaskType = task.GetType().FullName; Api.JetSetCurrentIndex(session, Tasks, "by_index_and_task_type"); if (task.SeparateTasksByIndex) { Api.MakeKey(session, Tasks, task.Index, MakeKeyGrbit.NewKey); Api.MakeKey(session, Tasks, expectedTaskType, Encoding.Unicode, MakeKeyGrbit.None); // there are no tasks matching the current one, just return if (Api.TrySeek(session, Tasks, SeekGrbit.SeekEQ) == false) { return; } Api.MakeKey(session, Tasks, task.Index, MakeKeyGrbit.NewKey); Api.MakeKey(session, Tasks, expectedTaskType, Encoding.Unicode, MakeKeyGrbit.None); Api.JetSetIndexRange(session, Tasks, SetIndexRangeGrbit.RangeInclusive | SetIndexRangeGrbit.RangeUpperLimit); } else { if (Api.TryMoveFirst(session, Tasks) == false) { return; } } int totalTaskCount = 0; do { // esent index ranges are approximate, and we need to check them ourselves as well if (Api.RetrieveColumnAsString(session, Tasks, tableColumnsCache.TasksColumns["task_type"]) != expectedTaskType) { continue; } try { var taskAsBytes = Api.RetrieveColumn(session, Tasks, tableColumnsCache.TasksColumns["task"]); var taskType = Api.RetrieveColumnAsString(session, Tasks, tableColumnsCache.TasksColumns["task_type"], Encoding.Unicode); DatabaseTask existingTask; try { existingTask = DatabaseTask.ToTask(taskType, taskAsBytes); } catch (Exception e) { logger.ErrorException( string.Format("Could not create instance of a task: {0}", taskAsBytes), e); Api.JetDelete(session, Tasks); continue; } task.Merge(existingTask); Api.JetDelete(session, Tasks); } catch (EsentErrorException e) { if (e.Error == JET_err.WriteConflict) { continue; } throw; } totalTaskCount++; } while (Api.TryMoveNext(session, Tasks) && totalTaskCount < 1024); }
private void MergeSimilarTasks(DatabaseTask task, HashSet <IComparable> alreadySeen, List <int> indexesToSkip, int[] allIndexes) { string tree; Slice slice; var type = task.GetType().FullName; if (task.SeparateTasksByIndex) { tree = Tables.Tasks.Indices.ByIndexAndType; slice = (Slice)CreateKey(task.Index, type); } else { tree = Tables.Tasks.Indices.ByType; slice = (Slice)CreateKey(type); } using (var iterator = tableStorage.Tasks.GetIndex(tree).MultiRead(Snapshot, slice)) { if (!iterator.Seek(Slice.BeforeAllKeys)) { return; } var totalKeysToProcess = task.NumberOfKeys; do { if (totalKeysToProcess >= 5 * 1024) { break; } ushort version; var value = LoadStruct(tableStorage.Tasks, iterator.CurrentKey, writeBatch.Value, out version); if (value == null) { continue; } var currentId = Etag.Parse(iterator.CurrentKey.ToString()); var indexId = value.ReadInt(TaskFields.IndexId); if (task.SeparateTasksByIndex == false && indexesToSkip.Contains(indexId)) { //need to check this only when not separating tasks by index if (Logger.IsDebugEnabled) { Logger.Debug("Skipping task id: {0} for index id: {1}", currentId, indexId); } continue; } if (alreadySeen.Add(currentId) == false) { continue; } if (task.SeparateTasksByIndex == false && allIndexes.Contains(indexId) == false) { //need to check this only when not separating tasks by index if (Logger.IsDebugEnabled) { Logger.Debug("Skipping task id: {0} for non existing index id: {0}", currentId, indexId); } continue; } DatabaseTask existingTask; try { existingTask = DatabaseTask.ToTask(value.ReadString(TaskFields.Type), value.ReadBytes(TaskFields.SerializedTask)); } catch (Exception e) { Logger.ErrorException(string.Format("Could not create instance of a task: {0}", value), e); alreadySeen.Add(currentId); continue; } totalKeysToProcess += existingTask.NumberOfKeys; task.Merge(existingTask); if (Logger.IsDebugEnabled) { Logger.Debug("Merged task id: {0} with task id: {1}", currentId, task.Id); } } while (iterator.MoveNext()); } }