/// <summary> /// update configuration object with tables desc from server database /// </summary> public virtual async Task <SyncContext> CreateSnapshotAsync(SyncContext context, SyncSet schema, DbConnection connection, DbTransaction transaction, string batchDirectory, int batchSize, long remoteClientTimestamp, CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null) { // create local directory if (!Directory.Exists(batchDirectory)) { Directory.CreateDirectory(batchDirectory); } // numbers of batch files generated var batchIndex = 0; // create the in memory changes set var changesSet = new SyncSet(); // Create a Schema set without readonly tables, attached to memory changes foreach (var table in schema.Tables) { DbSyncAdapter.CreateChangesTable(schema.Tables[table.TableName, table.SchemaName], changesSet); } var sb = new StringBuilder(); var underscore = ""; if (context.Parameters != null) { foreach (var p in context.Parameters.OrderBy(p => p.Name)) { var cleanValue = new string(p.Value.ToString().Where(char.IsLetterOrDigit).ToArray()); var cleanName = new string(p.Name.Where(char.IsLetterOrDigit).ToArray()); sb.Append($"{underscore}{cleanName}_{cleanValue}"); underscore = "_"; } } var directoryName = sb.ToString(); directoryName = string.IsNullOrEmpty(directoryName) ? "ALL" : directoryName; var directoryFullPath = Path.Combine(batchDirectory, directoryName); if (Directory.Exists(directoryFullPath)) { Directory.Delete(directoryFullPath, true); } // batchinfo generate a schema clone with scope columns if needed var batchInfo = new BatchInfo(false, changesSet, batchDirectory, directoryName); // Clear tables, we will add only the ones we need in the batch info changesSet.Clear(); foreach (var syncTable in schema.Tables) { var tableBuilder = this.GetTableBuilder(syncTable); var syncAdapter = tableBuilder.CreateSyncAdapter(connection, transaction); // raise before event context.SyncStage = SyncStage.TableChangesSelecting; var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, syncTable.TableName, connection, transaction); // launch interceptor if any await this.InterceptAsync(tableChangesSelectingArgs).ConfigureAwait(false); // Get Select initialize changes command var selectIncrementalChangesCommand = this.GetSelectChangesCommand(context, syncAdapter, syncTable, true); // Set parameters this.SetSelectChangesCommonParameters(context, syncTable, null, true, 0, selectIncrementalChangesCommand); // Get the reader using (var dataReader = selectIncrementalChangesCommand.ExecuteReader()) { // memory size total double rowsMemorySize = 0L; // Create a chnages table with scope columns var changesSetTable = DbSyncAdapter.CreateChangesTable(schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); while (dataReader.Read()) { // Create a row from dataReader var row = CreateSyncRowFromReader(dataReader, changesSetTable); // Add the row to the changes set changesSetTable.Rows.Add(row); var fieldsSize = ContainerTable.GetRowSizeFromDataRow(row.ToArray()); var finalFieldSize = fieldsSize / 1024d; if (finalFieldSize > batchSize) { throw new RowOverSizedException(finalFieldSize.ToString()); } // Calculate the new memory size rowsMemorySize += finalFieldSize; // Next line if we don't reach the batch size yet. if (rowsMemorySize <= batchSize) { continue; } // add changes to batchinfo batchInfo.AddChanges(changesSet, batchIndex, false); // increment batch index batchIndex++; // we know the datas are serialized here, so we can flush the set changesSet.Clear(); // Recreate an empty ContainerSet and a ContainerTable changesSet = new SyncSet(); changesSetTable = DbSyncAdapter.CreateChangesTable(schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); // Init the row memory size rowsMemorySize = 0L; } } selectIncrementalChangesCommand.Dispose(); } if (changesSet != null && changesSet.HasTables) { batchInfo.AddChanges(changesSet, batchIndex, true); } // Check the last index as the last batch batchInfo.EnsureLastBatch(); batchInfo.Timestamp = remoteClientTimestamp; // Serialize on disk. var jsonConverter = new JsonConverter <BatchInfo>(); var summaryFileName = Path.Combine(directoryFullPath, "summary.json"); using (var f = new FileStream(summaryFileName, FileMode.CreateNew, FileAccess.ReadWrite)) { var bytes = jsonConverter.Serialize(batchInfo); f.Write(bytes, 0, bytes.Length); } return(context); }
/// <summary> /// Gets a batch of changes to synchronize when given batch size, /// destination knowledge, and change data retriever parameters. /// </summary> /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns> internal virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> InternalGetChangesAsync( SyncContext context, MessageGetChangesBatch message, DbConnection connection, DbTransaction transaction, CancellationToken cancellationToken, IProgress <ProgressArgs> progress) { // batch info containing changes BatchInfo batchInfo; // Statistics about changes that are selected DatabaseChangesSelected changesSelected; if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize) { (batchInfo, changesSelected) = await this.InternalGetEmptyChangesAsync(message).ConfigureAwait(false); return(context, batchInfo, changesSelected); } // Call interceptor await this.InterceptAsync(new DatabaseChangesSelectingArgs(context, message, connection, transaction), cancellationToken).ConfigureAwait(false); // create local directory if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory)) { Directory.CreateDirectory(message.BatchDirectory); } changesSelected = new DatabaseChangesSelected(); // numbers of batch files generated var batchIndex = 0; // Check if we are in batch mode var isBatch = message.BatchSize > 0; // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch) // batchinfo generate a schema clone with scope columns if needed batchInfo = new BatchInfo(!isBatch, message.Schema, message.BatchDirectory); // Clean SyncSet, we will add only tables we need in the batch info var changesSet = new SyncSet(); var cptSyncTable = 0; var currentProgress = context.ProgressPercentage; foreach (var syncTable in message.Schema.Tables) { // tmp count of table for report progress pct cptSyncTable++; // Only table schema is replicated, no datas are applied if (syncTable.SyncDirection == SyncDirection.None) { continue; } // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly) { continue; } // Get Command var selectIncrementalChangesCommand = await this.GetSelectChangesCommandAsync(context, syncTable, message.Setup, message.IsNew, connection, transaction); // Set parameters this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand); // launch interceptor if any var args = new TableChangesSelectingArgs(context, syncTable, selectIncrementalChangesCommand, connection, transaction); await this.InterceptAsync(args, cancellationToken).ConfigureAwait(false); if (!args.Cancel && args.Command != null) { // Statistics var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName); // Create a chnages table with scope columns var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); // Get the reader using var dataReader = await args.Command.ExecuteReaderAsync().ConfigureAwait(false); // memory size total double rowsMemorySize = 0L; while (dataReader.Read()) { // Create a row from dataReader var row = CreateSyncRowFromReader(dataReader, changesSetTable); // Add the row to the changes set changesSetTable.Rows.Add(row); // Set the correct state to be applied if (row.RowState == DataRowState.Deleted) { tableChangesSelected.Deletes++; } else if (row.RowState == DataRowState.Modified) { tableChangesSelected.Upserts++; } // calculate row size if in batch mode if (isBatch) { var fieldsSize = ContainerTable.GetRowSizeFromDataRow(row.ToArray()); var finalFieldSize = fieldsSize / 1024d; if (finalFieldSize > message.BatchSize) { throw new RowOverSizedException(finalFieldSize.ToString()); } // Calculate the new memory size rowsMemorySize += finalFieldSize; // Next line if we don't reach the batch size yet. if (rowsMemorySize <= message.BatchSize) { continue; } // Check interceptor var batchTableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction); await this.InterceptAsync(batchTableChangesSelectedArgs, cancellationToken).ConfigureAwait(false); // add changes to batchinfo await batchInfo.AddChangesAsync(changesSet, batchIndex, false, message.SerializerFactory, this).ConfigureAwait(false); // increment batch index batchIndex++; // we know the datas are serialized here, so we can flush the set changesSet.Clear(); // Recreate an empty ContainerSet and a ContainerTable changesSet = new SyncSet(); changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); // Init the row memory size rowsMemorySize = 0L; } } dataReader.Close(); // We don't report progress if no table changes is empty, to limit verbosity if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0) { changesSelected.TableChangesSelected.Add(tableChangesSelected); } // even if no rows raise the interceptor var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction); await this.InterceptAsync(tableChangesSelectedArgs, cancellationToken).ConfigureAwait(false); context.ProgressPercentage = currentProgress + (cptSyncTable * 0.2d / message.Schema.Tables.Count); // only raise report progress if we have something if (tableChangesSelectedArgs.TableChangesSelected.TotalChanges > 0) { this.ReportProgress(context, progress, tableChangesSelectedArgs); } } } // We are in batch mode, and we are at the last batchpart info // Even if we don't have rows inside, we return the changesSet, since it contains at least schema if (changesSet != null && changesSet.HasTables && changesSet.HasRows) { await batchInfo.AddChangesAsync(changesSet, batchIndex, true, message.SerializerFactory, this).ConfigureAwait(false); } //Set the total rows count contained in the batch info batchInfo.RowsCount = changesSelected.TotalChangesSelected; // Check the last index as the last batch batchInfo.EnsureLastBatch(); // Raise database changes selected if (changesSelected.TotalChangesSelected > 0 || changesSelected.TotalChangesSelectedDeletes > 0 || changesSelected.TotalChangesSelectedUpdates > 0) { var databaseChangesSelectedArgs = new DatabaseChangesSelectedArgs(context, message.LastTimestamp, batchInfo, changesSelected, connection); this.ReportProgress(context, progress, databaseChangesSelectedArgs); await this.InterceptAsync(databaseChangesSelectedArgs, cancellationToken).ConfigureAwait(false); } return(context, batchInfo, changesSelected); }
/// <summary> /// Enumerate all internal changes, no batch mode /// </summary> internal async Task <(BatchInfo, DatabaseChangesSelected)> EnumerateChangesInBatchesInternalAsync (SyncContext context, ScopeInfo scopeInfo, int downloadBatchSizeInKB, DmSet configTables, string batchDirectory, ConflictResolutionPolicy policy, ICollection <FilterClause> filters) { DmTable dmTable = null; // memory size total double memorySizeFromDmRows = 0L; var batchIndex = 0; // this batch info won't be in memory, it will be be batched var batchInfo = new BatchInfo(false, batchDirectory); // directory where all files will be stored batchInfo.GenerateNewDirectoryName(); // Create stats object to store changes count var changes = new DatabaseChangesSelected(); using (var connection = this.CreateConnection()) { try { // Open the connection await connection.OpenAsync(); using (var transaction = connection.BeginTransaction()) { // create the in memory changes set var changesSet = new DmSet(configTables.DmSetName); foreach (var tableDescription in configTables.Tables) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && tableDescription.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && tableDescription.SyncDirection == SyncDirection.UploadOnly) { continue; } var builder = this.GetDatabaseBuilder(tableDescription); var syncAdapter = builder.CreateSyncAdapter(connection, transaction); // raise before event context.SyncStage = SyncStage.TableChangesSelecting; var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, tableDescription.TableName, connection, transaction); // launc interceptor if any await this.InterceptAsync(tableChangesSelectingArgs); // Get Command DbCommand selectIncrementalChangesCommand; DbCommandType dbCommandType; if (this.CanBeServerProvider && context.Parameters != null && context.Parameters.Count > 0 && filters != null && filters.Count > 0) { var tableFilters = filters .Where(f => f.TableName.Equals(tableDescription.TableName, StringComparison.InvariantCultureIgnoreCase)); if (tableFilters != null && tableFilters.Count() > 0) { dbCommandType = DbCommandType.SelectChangesWitFilters; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType, tableFilters); if (selectIncrementalChangesCommand == null) { throw new Exception("Missing command 'SelectIncrementalChangesCommand' "); } syncAdapter.SetCommandParameters(dbCommandType, selectIncrementalChangesCommand, tableFilters); } else { dbCommandType = DbCommandType.SelectChanges; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType); if (selectIncrementalChangesCommand == null) { throw new Exception("Missing command 'SelectIncrementalChangesCommand' "); } syncAdapter.SetCommandParameters(dbCommandType, selectIncrementalChangesCommand); } } else { dbCommandType = DbCommandType.SelectChanges; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType); if (selectIncrementalChangesCommand == null) { throw new Exception("Missing command 'SelectIncrementalChangesCommand' "); } syncAdapter.SetCommandParameters(dbCommandType, selectIncrementalChangesCommand); } dmTable = this.BuildChangesTable(tableDescription.TableName, configTables); try { // Set commons parameters SetSelectChangesCommonParameters(context, scopeInfo, selectIncrementalChangesCommand); // Set filter parameters if any // Only on server side if (this.CanBeServerProvider && context.Parameters != null && context.Parameters.Count > 0 && filters != null && filters.Count > 0) { var filterTable = filters.Where(f => f.TableName.Equals(tableDescription.TableName, StringComparison.InvariantCultureIgnoreCase)).ToList(); if (filterTable != null && filterTable.Count > 0) { foreach (var filter in filterTable) { var parameter = context.Parameters.FirstOrDefault(p => p.ColumnName.Equals(filter.ColumnName, StringComparison.InvariantCultureIgnoreCase) && p.TableName.Equals(filter.TableName, StringComparison.InvariantCultureIgnoreCase)); if (parameter != null) { DbManager.SetParameterValue(selectIncrementalChangesCommand, parameter.ColumnName, parameter.Value); } } } } this.AddTrackingColumns <int>(dmTable, "sync_row_is_tombstone"); // Statistics var tableChangesSelected = new TableChangesSelected { TableName = tableDescription.TableName }; changes.TableChangesSelected.Add(tableChangesSelected); // Get the reader using (var dataReader = selectIncrementalChangesCommand.ExecuteReader()) { while (dataReader.Read()) { var dmRow = this.CreateRowFromReader(dataReader, dmTable); var state = DmRowState.Unchanged; state = this.GetStateFromDmRow(dmRow, scopeInfo); // If the row is not deleted inserted or modified, go next if (state != DmRowState.Deleted && state != DmRowState.Modified && state != DmRowState.Added) { continue; } var fieldsSize = DmTableSurrogate.GetRowSizeFromDataRow(dmRow); var dmRowSize = fieldsSize / 1024d; if (dmRowSize > downloadBatchSizeInKB) { var exc = $"Row is too big ({dmRowSize} kb.) for the current Configuration.DownloadBatchSizeInKB ({downloadBatchSizeInKB} kb.) Aborting Sync..."; throw new Exception(exc); } // Calculate the new memory size memorySizeFromDmRows = memorySizeFromDmRows + dmRowSize; // add row dmTable.Rows.Add(dmRow); tableChangesSelected.TotalChanges++; // acceptchanges before modifying dmRow.AcceptChanges(); // Set the correct state to be applied if (state == DmRowState.Deleted) { dmRow.Delete(); tableChangesSelected.Deletes++; } else if (state == DmRowState.Added) { dmRow.SetAdded(); tableChangesSelected.Inserts++; } else if (state == DmRowState.Modified) { dmRow.SetModified(); tableChangesSelected.Updates++; } // We exceed the memorySize, so we can add it to a batch if (memorySizeFromDmRows > downloadBatchSizeInKB) { // Since we dont need this column anymore, remove it this.RemoveTrackingColumns(dmTable, "sync_row_is_tombstone"); changesSet.Tables.Add(dmTable); // generate the batch part info batchInfo.GenerateBatchInfo(batchIndex, changesSet); // increment batch index batchIndex++; changesSet.Clear(); // Recreate an empty DmSet, then a dmTable clone changesSet = new DmSet(configTables.DmSetName); dmTable = dmTable.Clone(); this.AddTrackingColumns <int>(dmTable, "sync_row_is_tombstone"); // Init the row memory size memorySizeFromDmRows = 0L; // SyncProgress & interceptor context.SyncStage = SyncStage.TableChangesSelected; var loopTableChangesSelectedArgs = new TableChangesSelectedArgs(context, tableChangesSelected, connection, transaction); this.ReportProgress(context, loopTableChangesSelectedArgs); await this.InterceptAsync(loopTableChangesSelectedArgs); } } // Since we dont need this column anymore, remove it this.RemoveTrackingColumns(dmTable, "sync_row_is_tombstone"); context.SyncStage = SyncStage.TableChangesSelected; changesSet.Tables.Add(dmTable); // Init the row memory size memorySizeFromDmRows = 0L; // Event progress & interceptor context.SyncStage = SyncStage.TableChangesSelected; var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, tableChangesSelected, connection, transaction); this.ReportProgress(context, tableChangesSelectedArgs); await this.InterceptAsync(tableChangesSelectedArgs); } } catch (Exception) { throw; } finally { } } // We are in batch mode, and we are at the last batchpart info if (changesSet != null && changesSet.HasTables && changesSet.HasChanges()) { var batchPartInfo = batchInfo.GenerateBatchInfo(batchIndex, changesSet); if (batchPartInfo != null) { batchPartInfo.IsLastBatch = true; } } transaction.Commit(); } } catch (Exception) { throw; } finally { if (connection != null && connection.State == ConnectionState.Open) { connection.Close(); } } } return(batchInfo, changes); }
/// <summary> /// Gets changes rows count estimation, /// </summary> internal virtual async Task <(SyncContext, DatabaseChangesSelected)> InternalGetEstimatedChangesCountAsync( SyncContext context, MessageGetChangesBatch message, DbConnection connection, DbTransaction transaction, CancellationToken cancellationToken, IProgress <ProgressArgs> progress) { // Call interceptor await this.InterceptAsync(new DatabaseChangesSelectingArgs(context, message, connection, transaction), cancellationToken).ConfigureAwait(false); // Create stats object to store changes count var changes = new DatabaseChangesSelected(); if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize) { return(context, changes); } foreach (var syncTable in message.Schema.Tables) { // Only table schema is replicated, no datas are applied if (syncTable.SyncDirection == SyncDirection.None) { continue; } // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly) { continue; } // Get Command var command = await this.GetSelectChangesCommandAsync(context, syncTable, message.Setup, message.IsNew, connection, transaction); // Set parameters this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, command); // launch interceptor if any var args = new TableChangesSelectingArgs(context, syncTable, command, connection, transaction); await this.InterceptAsync(args, cancellationToken).ConfigureAwait(false); if (args.Cancel || args.Command == null) { continue; } // Statistics var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName); // Get the reader using var dataReader = await args.Command.ExecuteReaderAsync().ConfigureAwait(false); while (dataReader.Read()) { bool isTombstone = false; for (var i = 0; i < dataReader.FieldCount; i++) { if (dataReader.GetName(i) == "sync_row_is_tombstone") { isTombstone = Convert.ToInt64(dataReader.GetValue(i)) > 0; break; } } // Set the correct state to be applied if (isTombstone) { tableChangesSelected.Deletes++; } else { tableChangesSelected.Upserts++; } } dataReader.Close(); // Check interceptor var changesArgs = new TableChangesSelectedArgs(context, null, tableChangesSelected, connection, transaction); await this.InterceptAsync(changesArgs, cancellationToken).ConfigureAwait(false); if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0) { changes.TableChangesSelected.Add(tableChangesSelected); } } // Raise database changes selected var databaseChangesSelectedArgs = new DatabaseChangesSelectedArgs(context, message.LastTimestamp, null, changes, connection); this.ReportProgress(context, progress, databaseChangesSelectedArgs); await this.InterceptAsync(databaseChangesSelectedArgs, cancellationToken).ConfigureAwait(false); return(context, changes); }
/// <summary> /// Gets a batch of changes to synchronize when given batch size, /// destination knowledge, and change data retriever parameters. /// </summary> /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns> public virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> GetChangeBatchAsync( SyncContext context, MessageGetChangesBatch message, DbConnection connection, DbTransaction transaction, CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null) { // batch info containing changes BatchInfo batchInfo; // Statistics about changes that are selected DatabaseChangesSelected changesSelected; if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize) { (batchInfo, changesSelected) = this.GetEmptyChanges(message); return(context, batchInfo, changesSelected); } // Check if the provider is not outdated var isOutdated = this.IsRemoteOutdated(); // Get a chance to make the sync even if it's outdated if (isOutdated) { var outdatedArgs = new OutdatedArgs(context, null, null); // Interceptor await this.InterceptAsync(outdatedArgs).ConfigureAwait(false); if (outdatedArgs.Action != OutdatedAction.Rollback) { context.SyncType = outdatedArgs.Action == OutdatedAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload; } if (outdatedArgs.Action == OutdatedAction.Rollback) { throw new OutOfDateException(); } } // create local directory if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory)) { Directory.CreateDirectory(message.BatchDirectory); } // numbers of batch files generated var batchIndex = 0; // Check if we are in batch mode var isBatch = message.BatchSize > 0; // Create stats object to store changes count var changes = new DatabaseChangesSelected(); // create the in memory changes set var changesSet = new SyncSet(message.Schema.ScopeName); // Create a Schema set without readonly columns, attached to memory changes foreach (var table in message.Schema.Tables) { DbSyncAdapter.CreateChangesTable(message.Schema.Tables[table.TableName, table.SchemaName], changesSet); } // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch) // batchinfo generate a schema clone with scope columns if needed batchInfo = new BatchInfo(!isBatch, changesSet, message.BatchDirectory); // Clear tables, we will add only the ones we need in the batch info changesSet.Clear(); foreach (var syncTable in message.Schema.Tables) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly) { continue; } var tableBuilder = this.GetTableBuilder(syncTable); var syncAdapter = tableBuilder.CreateSyncAdapter(connection, transaction); // raise before event context.SyncStage = SyncStage.TableChangesSelecting; var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, syncTable.TableName, connection, transaction); // launch interceptor if any await this.InterceptAsync(tableChangesSelectingArgs).ConfigureAwait(false); // Get Command var selectIncrementalChangesCommand = this.GetSelectChangesCommand(context, syncAdapter, syncTable, message.IsNew); // Set parameters this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand); // Statistics var tableChangesSelected = new TableChangesSelected(syncTable.TableName); // Get the reader using (var dataReader = selectIncrementalChangesCommand.ExecuteReader()) { // memory size total double rowsMemorySize = 0L; // Create a chnages table with scope columns var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); while (dataReader.Read()) { // Create a row from dataReader var row = CreateSyncRowFromReader(dataReader, changesSetTable); // Add the row to the changes set changesSetTable.Rows.Add(row); // Set the correct state to be applied if (row.RowState == DataRowState.Deleted) { tableChangesSelected.Deletes++; } else if (row.RowState == DataRowState.Modified) { tableChangesSelected.Upserts++; } // calculate row size if in batch mode if (isBatch) { var fieldsSize = ContainerTable.GetRowSizeFromDataRow(row.ToArray()); var finalFieldSize = fieldsSize / 1024d; if (finalFieldSize > message.BatchSize) { throw new RowOverSizedException(finalFieldSize.ToString()); } // Calculate the new memory size rowsMemorySize += finalFieldSize; // Next line if we don't reach the batch size yet. if (rowsMemorySize <= message.BatchSize) { continue; } // add changes to batchinfo batchInfo.AddChanges(changesSet, batchIndex, false); // increment batch index batchIndex++; // we know the datas are serialized here, so we can flush the set changesSet.Clear(); // Recreate an empty ContainerSet and a ContainerTable changesSet = new SyncSet(message.Schema.ScopeName); changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet); // Init the row memory size rowsMemorySize = 0L; } } } selectIncrementalChangesCommand.Dispose(); context.SyncStage = SyncStage.TableChangesSelected; if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0) { changes.TableChangesSelected.Add(tableChangesSelected); } // Event progress & interceptor context.SyncStage = SyncStage.TableChangesSelected; var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, tableChangesSelected, connection, transaction); this.ReportProgress(context, progress, tableChangesSelectedArgs); await this.InterceptAsync(tableChangesSelectedArgs).ConfigureAwait(false); } // We are in batch mode, and we are at the last batchpart info // Even if we don't have rows inside, we return the changesSet, since it contains at leaset schema if (changesSet != null && changesSet.HasTables) { batchInfo.AddChanges(changesSet, batchIndex, true); } // Check the last index as the last batch batchInfo.EnsureLastBatch(); return(context, batchInfo, changes); }