/// <summary> /// Check if the orchestrator database is outdated /// </summary> /// <param name="timeStampStart">Timestamp start. Used to limit the delete metadatas rows from now to this timestamp</param> /// <param name="cancellationToken">Cancellation token</param> /// <param name="progress">Progress args</param> public virtual async Task <bool> IsOutDated(ScopeInfo clientScopeInfo, ServerScopeInfo serverScopeInfo, CancellationToken cancellationToken = default, IProgress <ProgressArgs> progress = null) { if (!this.StartTime.HasValue) { this.StartTime = DateTime.UtcNow; } bool isOutdated = false; // Get context or create a new one var ctx = this.GetContext(); // if we have a new client, obviously the last server sync is < to server stored last clean up (means OutDated !) // so far we return directly false if (clientScopeInfo.IsNewScope) { return(false); } // Check if the provider is not outdated // We can have negative value where we want to compare anyway if (clientScopeInfo.LastServerSyncTimestamp != 0 || serverScopeInfo.LastCleanupTimestamp != 0) { isOutdated = clientScopeInfo.LastServerSyncTimestamp < serverScopeInfo.LastCleanupTimestamp; this.logger.LogInformation(SyncEventsId.IsOutdated, new { serverScopeInfo.LastCleanupTimestamp, clientScopeInfo.LastServerSyncTimestamp, IsOutDated = isOutdated }); } // Get a chance to make the sync even if it's outdated if (isOutdated) { var outdatedArgs = new OutdatedArgs(ctx, clientScopeInfo, serverScopeInfo); // Interceptor await this.InterceptAsync(outdatedArgs, cancellationToken).ConfigureAwait(false); if (outdatedArgs.Action != OutdatedAction.Rollback) { ctx.SyncType = outdatedArgs.Action == OutdatedAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload; this.logger.LogDebug(SyncEventsId.IsOutdated, outdatedArgs); } if (outdatedArgs.Action == OutdatedAction.Rollback) { throw new OutOfDateException(clientScopeInfo.LastServerSyncTimestamp, serverScopeInfo.LastCleanupTimestamp); } } return(isOutdated); }
/// <summary> /// Gets a batch of changes to synchronize when given batch size, /// destination knowledge, and change data retriever parameters. /// </summary> /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns> public virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> GetChangeBatchAsync( SyncContext context, MessageGetChangesBatch message) { try { if (message.ScopeInfo == null) { throw new ArgumentNullException("scopeInfo", "Client scope info is null"); } // Check if the provider is not outdated var isOutdated = this.IsRemoteOutdated(); // Get a chance to make the sync even if it's outdated if (isOutdated) { var outdatedArgs = new OutdatedArgs(context, null, null); // Interceptor await this.InterceptAsync(outdatedArgs); if (outdatedArgs.Action != OutdatedSyncAction.Rollback) { context.SyncType = outdatedArgs.Action == OutdatedSyncAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload; } if (outdatedArgs.Action == OutdatedSyncAction.Rollback) { throw new OutOfDateException("The provider is out of date ! Try to make a Reinitialize sync"); } } // create local directory if (this.Options.BatchSize > 0 && !string.IsNullOrEmpty(this.Options.BatchDirectory) && !Directory.Exists(this.Options.BatchDirectory)) { Directory.CreateDirectory(this.Options.BatchDirectory); } // batch info containing changes BatchInfo batchInfo; // Statistics about changes that are selected DatabaseChangesSelected changesSelected; // if we try a Reinitialize action, don't get any changes from client // else get changes from batch or in memory methods if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize) { (batchInfo, changesSelected) = this.GetEmptyChanges(context, message.ScopeInfo, this.Options.BatchSize, this.Options.BatchDirectory); } else if (this.Options.BatchSize == 0) { (batchInfo, changesSelected) = await this.EnumerateChangesInternalAsync(context, message.ScopeInfo, message.Schema, this.Options.BatchDirectory, message.Policy, message.Filters); } else { (batchInfo, changesSelected) = await this.EnumerateChangesInBatchesInternalAsync(context, message.ScopeInfo, this.Options.BatchSize, message.Schema, this.Options.BatchDirectory, message.Policy, message.Filters); } return(context, batchInfo, changesSelected); } catch (Exception ex) { throw new SyncException(ex, SyncStage.TableChangesSelecting); } }
/// <summary> /// Gets a batch of changes to synchronize when given batch size, /// destination knowledge, and change data retriever parameters. /// </summary> /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns> public virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> GetChangeBatchAsync( SyncContext context, MessageGetChangesBatch message, DbConnection connection, DbTransaction transaction, CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null) { // batch info containing changes BatchInfo batchInfo; // Statistics about changes that are selected DatabaseChangesSelected changesSelected; if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize) { (batchInfo, changesSelected) = this.GetEmptyChanges(message); return(context, batchInfo, changesSelected); } // Check if the provider is not outdated var isOutdated = this.IsRemoteOutdated(); // Get a chance to make the sync even if it's outdated if (isOutdated) { var outdatedArgs = new OutdatedArgs(context, null, null); // Interceptor await this.InterceptAsync(outdatedArgs).ConfigureAwait(false); if (outdatedArgs.Action != OutdatedAction.Rollback) { context.SyncType = outdatedArgs.Action == OutdatedAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload; } if (outdatedArgs.Action == OutdatedAction.Rollback) { throw new OutOfDateException(); } } // create local directory if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory)) { Directory.CreateDirectory(message.BatchDirectory); } // numbers of batch files generated var batchIndex = 0; // Check if we are in batch mode var isBatch = message.BatchSize > 0; // Create stats object to store changes count var changes = new DatabaseChangesSelected(); // create the in memory changes set var changesSet = new SyncSet(message.Schema.ScopeName); // Create a Schema set without readonly columns, attached to memory changes foreach (var table in message.Schema.Tables) { DbSyncAdapter.CreateChangesTable(message.Schema.Tables[table.TableName, table.SchemaName], changesSet); } // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch) // batchinfo generate a schema clone with scope columns if needed batchInfo = new BatchInfo(!isBatch, changesSet, message.BatchDirectory); // Clear tables, we will add only the ones we need in the batch info changesSet.Clear(); foreach (var syncTable in message.Schema.Tables) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly) { continue; } var tableBuilder = this.GetTableBuilder(syncTable); var syncAdapter = tableBuilder.CreateSyncAdapter(connection, transaction); // raise before event context.SyncStage = SyncStage.TableChangesSelecting; var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, syncTable.TableName, connection, transaction); // launch interceptor if any await this.InterceptAsync(tableChangesSelectingArgs).ConfigureAwait(false); // Get Command var selectIncrementalChangesCommand = this.GetSelectChangesCommand(context, syncAdapter, syncTable, message.IsNew); // Set parameters this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand); // Statistics var tableChangesSelected = new TableChangesSelected(syncTable.TableName); // Get the reader using (var dataReader = selectIncrementalChangesCommand.ExecuteReader()) { // memory size total double rowsMemorySize = 0L; // Create a chnages table with scope columns var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); while (dataReader.Read()) { // Create a row from dataReader var row = CreateSyncRowFromReader(dataReader, changesSetTable); // Add the row to the changes set changesSetTable.Rows.Add(row); // Set the correct state to be applied if (row.RowState == DataRowState.Deleted) { tableChangesSelected.Deletes++; } else if (row.RowState == DataRowState.Modified) { tableChangesSelected.Upserts++; } // calculate row size if in batch mode if (isBatch) { var fieldsSize = ContainerTable.GetRowSizeFromDataRow(row.ToArray()); var finalFieldSize = fieldsSize / 1024d; if (finalFieldSize > message.BatchSize) { throw new RowOverSizedException(finalFieldSize.ToString()); } // Calculate the new memory size rowsMemorySize += finalFieldSize; // Next line if we don't reach the batch size yet. if (rowsMemorySize <= message.BatchSize) { continue; } // add changes to batchinfo batchInfo.AddChanges(changesSet, batchIndex, false); // increment batch index batchIndex++; // we know the datas are serialized here, so we can flush the set changesSet.Clear(); // Recreate an empty ContainerSet and a ContainerTable changesSet = new SyncSet(message.Schema.ScopeName); changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); // Init the row memory size rowsMemorySize = 0L; } } } selectIncrementalChangesCommand.Dispose(); context.SyncStage = SyncStage.TableChangesSelected; if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0) { changes.TableChangesSelected.Add(tableChangesSelected); } // Event progress & interceptor context.SyncStage = SyncStage.TableChangesSelected; var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, tableChangesSelected, connection, transaction); this.ReportProgress(context, progress, tableChangesSelectedArgs); await this.InterceptAsync(tableChangesSelectedArgs).ConfigureAwait(false); } // We are in batch mode, and we are at the last batchpart info // Even if we don't have rows inside, we return the changesSet, since it contains at leaset schema if (changesSet != null && changesSet.HasTables) { batchInfo.AddChanges(changesSet, batchIndex, true); } // Check the last index as the last batch batchInfo.EnsureLastBatch(); return(context, batchInfo, changes); }