/// <summary> /// Apply changes internal method for one Insert or Update or Delete for every dbSyncAdapter /// </summary> internal async Task ApplyChangesInternalAsync( SyncTable schemaTable, SyncContext context, MessageApplyChanges message, DbConnection connection, DbTransaction transaction, DataRowState applyType, DatabaseChangesApplied changesApplied, CancellationToken cancellationToken, IProgress <ProgressArgs> progress) { this.Orchestrator.logger.LogDebug(SyncEventsId.ApplyChanges, message); // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && schemaTable.SyncDirection == SyncDirection.DownloadOnly) { return; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && schemaTable.SyncDirection == SyncDirection.UploadOnly) { return; } var builder = this.GetTableBuilder(schemaTable, message.Setup); var syncAdapter = builder.CreateSyncAdapter(connection, transaction); syncAdapter.ApplyType = applyType; var hasChanges = await message.Changes.HasDataAsync(this.Orchestrator); // Each table in the messages contains scope columns. Don't forget it if (hasChanges) { // getting the table to be applied // we may have multiple batch files, so we can have multipe sync tables with the same name // We can say that dmTable may be contained in several files foreach (var syncTable in message.Changes.GetTable(schemaTable.TableName, schemaTable.SchemaName, this.Orchestrator)) { if (syncTable == null || syncTable.Rows == null || syncTable.Rows.Count == 0) { continue; } // Creating a filtered view of my rows with the correct applyType var filteredRows = syncTable.Rows.Where(r => r.RowState == applyType); // no filtered rows, go next container table if (filteredRows.Count() == 0) { continue; } // Conflicts occured when trying to apply rows var conflicts = new List <SyncConflict>(); // Create an empty Set that wil contains filtered rows to apply // Need Schema for culture & case sensitive properties var changesSet = syncTable.Schema.Clone(false); var schemaChangesTable = syncTable.Clone(); changesSet.Tables.Add(schemaChangesTable); schemaChangesTable.Rows.AddRange(filteredRows.ToList()); if (this.Orchestrator.logger.IsEnabled(LogLevel.Trace)) { foreach (var row in schemaChangesTable.Rows) { this.Orchestrator.logger.LogTrace(SyncEventsId.ApplyChanges, row); } } // Launch any interceptor if available await this.Orchestrator.InterceptAsync(new TableChangesApplyingArgs(context, schemaChangesTable, applyType, connection, transaction), cancellationToken).ConfigureAwait(false); int rowsApplied = 0; if (message.UseBulkOperations && this.SupportBulkOperations) { rowsApplied = await syncAdapter.ApplyBulkChangesAsync(message.LocalScopeId, message.SenderScopeId, schemaChangesTable, message.LastTimestamp, conflicts); } else { rowsApplied = await syncAdapter.ApplyChangesAsync(message.LocalScopeId, message.SenderScopeId, schemaChangesTable, message.LastTimestamp, conflicts); } // resolving conflicts var(rowsAppliedCount, conflictsResolvedCount, syncErrorsCount) = await ResolveConflictsAsync(context, message.LocalScopeId, message.SenderScopeId, syncAdapter, conflicts, message, connection, transaction).ConfigureAwait(false); // Add conflict rows applied that are correctly resolved, as applied rowsApplied += rowsAppliedCount; // Handle sync progress for this syncadapter (so this table) var changedFailed = filteredRows.Count() - conflictsResolvedCount - rowsApplied; // We may have multiple batch files, so we can have multipe sync tables with the same name // We can say that a syncTable may be contained in several files // That's why we should get an applied changes instance if already exists from a previous batch file var existAppliedChanges = changesApplied.TableChangesApplied.FirstOrDefault(tca => { var sc = SyncGlobalization.DataSourceStringComparison; var sn = tca.SchemaName == null ? string.Empty : tca.SchemaName; var otherSn = schemaTable.SchemaName == null ? string.Empty : schemaTable.SchemaName; return(tca.TableName.Equals(schemaTable.TableName, sc) && sn.Equals(otherSn, sc) && tca.State == applyType); }); if (existAppliedChanges == null) { existAppliedChanges = new TableChangesApplied { TableName = schemaTable.TableName, SchemaName = schemaTable.SchemaName, Applied = rowsApplied, ResolvedConflicts = conflictsResolvedCount, Failed = changedFailed, State = applyType }; changesApplied.TableChangesApplied.Add(existAppliedChanges); } else { existAppliedChanges.Applied += rowsApplied; existAppliedChanges.ResolvedConflicts += conflictsResolvedCount; existAppliedChanges.Failed += changedFailed; } var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, existAppliedChanges, connection, transaction); // We don't report progress if we do not have applied any changes on the table, to limit verbosity of Progress if (tableChangesAppliedArgs.TableChangesApplied.Applied > 0 || tableChangesAppliedArgs.TableChangesApplied.Failed > 0 || tableChangesAppliedArgs.TableChangesApplied.ResolvedConflicts > 0) { await this.Orchestrator.InterceptAsync(tableChangesAppliedArgs, cancellationToken).ConfigureAwait(false); this.Orchestrator.ReportProgress(context, progress, tableChangesAppliedArgs, connection, transaction); this.Orchestrator.logger.LogDebug(SyncEventsId.ApplyChanges, tableChangesAppliedArgs); } } } }
/// <summary> /// Apply changes internal method for one Insert or Update or Delete for every dbSyncAdapter /// </summary> internal async Task <ChangeApplicationAction> ApplyChangesInternalAsync( DmTable table, SyncContext context, MessageApplyChanges message, DbConnection connection, DbTransaction transaction, DmRowState applyType, DatabaseChangesApplied changesApplied) { var changeApplicationAction = ChangeApplicationAction.Continue; // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && table.SyncDirection == SyncDirection.DownloadOnly) { return(ChangeApplicationAction.Continue); } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && table.SyncDirection == SyncDirection.UploadOnly) { return(ChangeApplicationAction.Continue); } var builder = this.GetDatabaseBuilder(table); var syncAdapter = builder.CreateSyncAdapter(connection, transaction); syncAdapter.ApplyType = applyType; if (message.Changes.BatchPartsInfo != null && message.Changes.BatchPartsInfo.Count > 0) { // getting the table to be applied // we may have multiple batch files, so we can have multipe dmTable with the same Name // We can say that dmTable may be contained in several files foreach (var dmTablePart in message.Changes.GetTable(table.TableName)) { if (dmTablePart == null || dmTablePart.Rows.Count == 0) { continue; } // check and filter var dmChangesView = new DmView(dmTablePart, (r) => r.RowState == applyType); if (dmChangesView.Count == 0) { dmChangesView.Dispose(); dmChangesView = null; continue; } // Conflicts occured when trying to apply rows var conflicts = new List <SyncConflict>(); context.SyncStage = SyncStage.TableChangesApplying; // Launch any interceptor if available await this.InterceptAsync(new TableChangesApplyingArgs(context, table, applyType, connection, transaction)); int rowsApplied; // applying the bulkchanges command if (this.Options.UseBulkOperations && this.SupportBulkOperations) { rowsApplied = syncAdapter.ApplyBulkChanges(dmChangesView, message.FromScope, conflicts); } else { rowsApplied = syncAdapter.ApplyChanges(dmChangesView, message.FromScope, conflicts); } // If conflicts occured // Eventuall, conflicts are resolved on server side. if (conflicts != null && conflicts.Count > 0) { foreach (var conflict in conflicts) { //var scopeBuilder = this.GetScopeBuilder(); //var scopeInfoBuilder = scopeBuilder.CreateScopeInfoBuilder(message.ScopeInfoTableName, connection, transaction); //var localTimeStamp = scopeInfoBuilder.GetLocalTimestamp(); var fromScopeLocalTimeStamp = message.FromScope.Timestamp; var conflictCount = 0; DmRow resolvedRow = null; (changeApplicationAction, conflictCount, resolvedRow) = await this.HandleConflictAsync(syncAdapter, context, conflict, message.Policy, message.FromScope, fromScopeLocalTimeStamp, connection, transaction); if (changeApplicationAction == ChangeApplicationAction.Continue) { // row resolved if (resolvedRow != null) { context.TotalSyncConflicts += conflictCount; rowsApplied++; } } else { context.TotalSyncErrors++; // TODO : Should we break at the first error ? return(ChangeApplicationAction.Rollback); } } } // Handle sync progress for this syncadapter (so this table) var changedFailed = dmChangesView.Count - rowsApplied; // raise SyncProgress Event var existAppliedChanges = changesApplied.TableChangesApplied.FirstOrDefault( sc => string.Equals(sc.Table.TableName, table.TableName) && sc.State == applyType); if (existAppliedChanges == null) { existAppliedChanges = new TableChangesApplied { Table = new DmTableSurrogate(table), Applied = rowsApplied, Failed = changedFailed, State = applyType }; changesApplied.TableChangesApplied.Add(existAppliedChanges); } else { existAppliedChanges.Applied += rowsApplied; existAppliedChanges.Failed += changedFailed; } // Progress & Interceptor context.SyncStage = SyncStage.TableChangesApplied; var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, existAppliedChanges, connection, transaction); this.ReportProgress(context, tableChangesAppliedArgs, connection, transaction); await this.InterceptAsync(tableChangesAppliedArgs); } } return(ChangeApplicationAction.Continue); }
/// <summary> /// Apply changes internal method for one type of query: Insert, Update or Delete for every batch from a table /// </summary> private async Task <SyncContext> InternalApplyTableChangesAsync(IScopeInfo scopeInfo, SyncContext context, SyncTable schemaTable, MessageApplyChanges message, DbConnection connection, DbTransaction transaction, DataRowState applyType, DatabaseChangesApplied changesApplied, CancellationToken cancellationToken, IProgress <ProgressArgs> progress) { if (this.Provider == null) { return(context); } context.SyncStage = SyncStage.ChangesApplying; var setupTable = scopeInfo.Setup.Tables[schemaTable.TableName, schemaTable.SchemaName]; if (setupTable == null) { return(context); } // Only table schema is replicated, no datas are applied if (setupTable.SyncDirection == SyncDirection.None) { return(context); } // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && setupTable.SyncDirection == SyncDirection.DownloadOnly) { return(context); } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && setupTable.SyncDirection == SyncDirection.UploadOnly) { return(context); } var hasChanges = message.BatchInfo.HasData(schemaTable.TableName, schemaTable.SchemaName); // Each table in the messages contains scope columns. Don't forget it if (!hasChanges) { return(context); } // what kind of command to execute var init = message.IsNew || context.SyncType != SyncType.Normal; DbCommandType dbCommandType = applyType == DataRowState.Deleted ? DbCommandType.DeleteRows : (init ? DbCommandType.InsertRows : DbCommandType.UpdateRows); // tmp sync table with only writable columns var changesSet = schemaTable.Schema.Clone(false); var schemaChangesTable = DbSyncAdapter.CreateChangesTable(schemaTable, changesSet); // get executioning adapter var syncAdapter = this.GetSyncAdapter(schemaChangesTable, scopeInfo); syncAdapter.ApplyType = applyType; // Get command var(command, isBatch) = await syncAdapter.GetCommandAsync(dbCommandType, connection, transaction); if (command == null) { return(context); } var bpiTables = message.BatchInfo.GetBatchPartsInfo(schemaTable); // launch interceptor if any var args = new TableChangesApplyingArgs(context, message.BatchInfo, bpiTables, schemaTable, applyType, command, connection, transaction); await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false); if (args.Cancel || args.Command == null) { return(context); } command = args.Command; var cmdText = command.CommandText; TableChangesApplied tableChangesApplied = null; // Conflicts occured when trying to apply rows var conflictRows = new List <SyncRow>(); var localSerializer = new LocalJsonSerializer(); // If someone has an interceptor on deserializing, we read the row and intercept var interceptorsReading = this.interceptors.GetInterceptors <DeserializingRowArgs>(); if (interceptorsReading.Count > 0) { localSerializer.OnReadingRow(async(schemaTable, rowString) => { var args = new DeserializingRowArgs(context, schemaTable, rowString); await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false); return(args.Result); }); } // I've got all files for my table // applied rows for this bpi foreach (var batchPartInfo in bpiTables) { // Applied row for this particular BPI var appliedRowsTmp = 0; // Rows fetch (either of the good state or not) from the BPI var rowsFetched = 0; // Get full path of my batchpartinfo var fullPath = message.BatchInfo.GetBatchPartInfoPath(batchPartInfo).FullPath; // accumulating rows var batchRows = new List <SyncRow>(); if (isBatch) { foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaChangesTable)) { rowsFetched++; // Adding rows to the batch rows if (batchRows.Count < this.Provider.BulkBatchMaxLinesCount) { if (syncRow.RowState == applyType) { batchRows.Add(syncRow); } if (rowsFetched < batchPartInfo.RowsCount && batchRows.Count < this.Provider.BulkBatchMaxLinesCount) { continue; } } if (batchRows.Count <= 0) { continue; } var failedRows = schemaChangesTable.Schema.Clone().Tables[schemaChangesTable.TableName, schemaChangesTable.SchemaName]; command.CommandText = cmdText; var batchArgs = new RowsChangesApplyingArgs(context, message.BatchInfo, batchRows, schemaChangesTable, applyType, command, connection, transaction); await this.InterceptAsync(batchArgs, progress, cancellationToken).ConfigureAwait(false); if (batchArgs.Cancel || batchArgs.Command == null || batchArgs.SyncRows == null || batchArgs.SyncRows.Count <= 0) { continue; } // get the correct pointer to the command from the interceptor in case user change the whole instance command = batchArgs.Command; await this.InterceptAsync(new DbCommandArgs(context, command, connection, transaction), progress, cancellationToken).ConfigureAwait(false); // execute the batch, through the provider await syncAdapter.ExecuteBatchCommandAsync(command, message.SenderScopeId, batchArgs.SyncRows, schemaChangesTable, failedRows, message.LastTimestamp, connection, transaction).ConfigureAwait(false); foreach (var failedRow in failedRows.Rows) { conflictRows.Add(failedRow); } //rows minus failed rows appliedRowsTmp += batchRows.Count - failedRows.Rows.Count; batchRows.Clear(); } } else { foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaChangesTable)) { rowsFetched++; if (syncRow.RowState != applyType) { continue; } command.CommandText = cmdText; var batchArgs = new RowsChangesApplyingArgs(context, message.BatchInfo, new List <SyncRow> { syncRow }, schemaChangesTable, applyType, command, connection, transaction); await this.InterceptAsync(batchArgs, progress, cancellationToken).ConfigureAwait(false); if (batchArgs.Cancel || batchArgs.Command == null || batchArgs.SyncRows == null || batchArgs.SyncRows.Count() <= 0) { continue; } // get the correct pointer to the command from the interceptor in case user change the whole instance command = batchArgs.Command; // Set the parameters value from row syncAdapter.SetColumnParametersValues(command, batchArgs.SyncRows.First()); // Set the special parameters for update syncAdapter.AddScopeParametersValues(command, message.SenderScopeId, message.LastTimestamp, applyType == DataRowState.Deleted, false); await this.InterceptAsync(new DbCommandArgs(context, command, connection, transaction), progress, cancellationToken).ConfigureAwait(false); var rowAppliedCount = await command.ExecuteNonQueryAsync().ConfigureAwait(false); // Check if we have a return value instead var syncRowCountParam = DbSyncAdapter.GetParameter(command, "sync_row_count"); if (syncRowCountParam != null) { rowAppliedCount = (int)syncRowCountParam.Value; } if (rowAppliedCount > 0) { appliedRowsTmp++; } else { conflictRows.Add(syncRow); } } } // conflict rows applied int rowsAppliedCount = 0; // conflict resolved count int conflictsResolvedCount = 0; // If conflicts occured if (conflictRows.Count > 0) { foreach (var conflictRow in conflictRows) { int conflictResolvedCount; SyncRow resolvedRow; int rowAppliedCount; (context, conflictResolvedCount, resolvedRow, rowAppliedCount) = await this.HandleConflictAsync(scopeInfo, context, message.LocalScopeId, message.SenderScopeId, syncAdapter, conflictRow, schemaChangesTable, message.Policy, message.LastTimestamp, connection, transaction, cancellationToken, progress).ConfigureAwait(false); conflictsResolvedCount += conflictResolvedCount; rowsAppliedCount += rowAppliedCount; } // add rows with resolved conflicts appliedRowsTmp += rowsAppliedCount; } // Any failure ? var changedFailed = rowsFetched - conflictsResolvedCount - appliedRowsTmp; // Only Upsert DatabaseChangesApplied if we make an upsert/ delete from the batch or resolved any conflict if (appliedRowsTmp > 0 || conflictsResolvedCount > 0) { // We may have multiple batch files, so we can have multipe sync tables with the same name // We can say that a syncTable may be contained in several files // That's why we should get an applied changes instance if already exists from a previous batch file tableChangesApplied = changesApplied.TableChangesApplied.FirstOrDefault(tca => { var sc = SyncGlobalization.DataSourceStringComparison; var sn = tca.SchemaName == null ? string.Empty : tca.SchemaName; var otherSn = schemaTable.SchemaName == null ? string.Empty : schemaTable.SchemaName; return(tca.TableName.Equals(schemaTable.TableName, sc) && sn.Equals(otherSn, sc) && tca.State == applyType); }); if (tableChangesApplied == null) { tableChangesApplied = new TableChangesApplied { TableName = schemaTable.TableName, SchemaName = schemaTable.SchemaName, Applied = appliedRowsTmp, ResolvedConflicts = conflictsResolvedCount, Failed = changedFailed, State = applyType, TotalRowsCount = message.BatchInfo.RowsCount, TotalAppliedCount = changesApplied.TotalAppliedChanges + appliedRowsTmp }; changesApplied.TableChangesApplied.Add(tableChangesApplied); } else { tableChangesApplied.Applied += appliedRowsTmp; tableChangesApplied.TotalAppliedCount = changesApplied.TotalAppliedChanges; tableChangesApplied.ResolvedConflicts += conflictsResolvedCount; tableChangesApplied.Failed += changedFailed; } // we've got 0.25% to fill here var progresspct = appliedRowsTmp * 0.25d / tableChangesApplied.TotalRowsCount; context.ProgressPercentage += progresspct; } } schemaChangesTable.Dispose(); schemaChangesTable = null; changesSet.Dispose(); changesSet = null; // Report the overall changes applied for the current table if (tableChangesApplied != null) { var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, tableChangesApplied, connection, transaction); // We don't report progress if we do not have applied any changes on the table, to limit verbosity of Progress await this.InterceptAsync(tableChangesAppliedArgs, progress, cancellationToken).ConfigureAwait(false); } if (command != null) { command.Dispose(); } return(context); }
/// <summary> /// Apply changes internal method for one type of query: Insert, Update or Delete for every batch from a table /// </summary> private async Task InternalApplyTableChangesAsync(SyncContext context, SyncTable schemaTable, MessageApplyChanges message, DbConnection connection, DbTransaction transaction, DataRowState applyType, DatabaseChangesApplied changesApplied, CancellationToken cancellationToken, IProgress <ProgressArgs> progress) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && schemaTable.SyncDirection == SyncDirection.DownloadOnly) { return; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && schemaTable.SyncDirection == SyncDirection.UploadOnly) { return; } var hasChanges = message.Changes.HasData(schemaTable.TableName, schemaTable.SchemaName); // Each table in the messages contains scope columns. Don't forget it if (hasChanges) { // launch interceptor if any var args = new TableChangesApplyingArgs(context, schemaTable, applyType, connection, transaction); await this.InterceptAsync(args, cancellationToken).ConfigureAwait(false); if (args.Cancel) { return; } TableChangesApplied tableChangesApplied = null; var enumerableOfTables = message.Changes.GetTableAsync(schemaTable.TableName, schemaTable.SchemaName, this); var enumeratorOfTable = enumerableOfTables.GetAsyncEnumerator(); // getting the table to be applied // we may have multiple batch files, so we can have multipe sync tables with the same name // We can say that dmTable may be contained in several files while (await enumeratorOfTable.MoveNextAsync()) { var syncTable = enumeratorOfTable.Current; if (syncTable == null || syncTable.Rows == null || syncTable.Rows.Count == 0) { continue; } // Creating a filtered view of my rows with the correct applyType var filteredRows = syncTable.Rows.Where(r => r.RowState == applyType); // no filtered rows, go next container table if (filteredRows.Count() == 0) { continue; } // Create an empty Set that wil contains filtered rows to apply // Need Schema for culture & case sensitive properties var changesSet = syncTable.Schema.Clone(false); var schemaChangesTable = syncTable.Clone(); changesSet.Tables.Add(schemaChangesTable); schemaChangesTable.Rows.AddRange(filteredRows.ToList()); // Should we use bulk operations ? var usBulk = message.UseBulkOperations && this.Provider.SupportBulkOperations; // Apply the changes batch var(rowsApplied, conflictsResolvedCount) = await this.InternalApplyChangesBatchAsync(context, usBulk, schemaChangesTable, message, applyType, connection, transaction, cancellationToken).ConfigureAwait(false); // Any failure ? var changedFailed = filteredRows.Count() - conflictsResolvedCount - rowsApplied; // We may have multiple batch files, so we can have multipe sync tables with the same name // We can say that a syncTable may be contained in several files // That's why we should get an applied changes instance if already exists from a previous batch file tableChangesApplied = changesApplied.TableChangesApplied.FirstOrDefault(tca => { var sc = SyncGlobalization.DataSourceStringComparison; var sn = tca.SchemaName == null ? string.Empty : tca.SchemaName; var otherSn = schemaTable.SchemaName == null ? string.Empty : schemaTable.SchemaName; return(tca.TableName.Equals(schemaTable.TableName, sc) && sn.Equals(otherSn, sc) && tca.State == applyType); }); if (tableChangesApplied == null) { tableChangesApplied = new TableChangesApplied { TableName = schemaTable.TableName, SchemaName = schemaTable.SchemaName, Applied = rowsApplied, ResolvedConflicts = conflictsResolvedCount, Failed = changedFailed, State = applyType, TotalRowsCount = message.Changes.RowsCount, TotalAppliedCount = changesApplied.TotalAppliedChanges + rowsApplied }; changesApplied.TableChangesApplied.Add(tableChangesApplied); } else { tableChangesApplied.Applied += rowsApplied; tableChangesApplied.TotalAppliedCount = changesApplied.TotalAppliedChanges; tableChangesApplied.ResolvedConflicts += conflictsResolvedCount; tableChangesApplied.Failed += changedFailed; } // we've got 0.25% to fill here var progresspct = rowsApplied * 0.25d / tableChangesApplied.TotalRowsCount; context.ProgressPercentage += progresspct; var tableChangesBatchAppliedArgs = new TableChangesBatchAppliedArgs(context, tableChangesApplied, connection, transaction); // Report the batch changes applied // We don't report progress if we do not have applied any changes on the table, to limit verbosity of Progress if (tableChangesBatchAppliedArgs.TableChangesApplied.Applied > 0 || tableChangesBatchAppliedArgs.TableChangesApplied.Failed > 0 || tableChangesBatchAppliedArgs.TableChangesApplied.ResolvedConflicts > 0) { await this.InterceptAsync(tableChangesBatchAppliedArgs, cancellationToken).ConfigureAwait(false); this.ReportProgress(context, progress, tableChangesBatchAppliedArgs, connection, transaction); } } // Report the overall changes applied for the current table if (tableChangesApplied != null) { var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, tableChangesApplied, connection, transaction); // We don't report progress if we do not have applied any changes on the table, to limit verbosity of Progress if (tableChangesAppliedArgs.TableChangesApplied.Applied > 0 || tableChangesAppliedArgs.TableChangesApplied.Failed > 0 || tableChangesAppliedArgs.TableChangesApplied.ResolvedConflicts > 0) { await this.InterceptAsync(tableChangesAppliedArgs, cancellationToken).ConfigureAwait(false); } } } }
/// <summary> /// Apply changes internal method for one Insert or Update or Delete for every dbSyncAdapter /// </summary> internal async Task <ChangeApplicationAction> ApplyChangesInternalAsync( SyncTable schemaTable, SyncContext context, MessageApplyChanges message, DbConnection connection, DbTransaction transaction, DataRowState applyType, DatabaseChangesApplied changesApplied, CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && schemaTable.SyncDirection == SyncDirection.DownloadOnly) { return(ChangeApplicationAction.Continue); } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && schemaTable.SyncDirection == SyncDirection.UploadOnly) { return(ChangeApplicationAction.Continue); } var builder = this.GetTableBuilder(schemaTable); var syncAdapter = builder.CreateSyncAdapter(connection, transaction); syncAdapter.ApplyType = applyType; var hasChanges = await message.Changes.HasDataAsync(); // Each table in the messages contains scope columns. Don't forget it if (hasChanges) { // getting the table to be applied // we may have multiple batch files, so we can have multipe sync tables with the same name // We can say that dmTable may be contained in several files foreach (var syncTable in message.Changes.GetTable(schemaTable.TableName, schemaTable.SchemaName)) { if (syncTable == null || syncTable.Rows == null || syncTable.Rows.Count == 0) { continue; } // Creating a filtered view of my rows with the correct applyType var filteredRows = syncTable.Rows.Where(r => r.RowState == applyType); // no filtered rows, go next container table if (filteredRows.Count() == 0) { continue; } // Conflicts occured when trying to apply rows var conflicts = new List <SyncConflict>(); context.SyncStage = SyncStage.TableChangesApplying; // Launch any interceptor if available await this.InterceptAsync(new TableChangesApplyingArgs(context, filteredRows, schemaTable, applyType, connection, transaction)).ConfigureAwait(false); // Create an empty Set that wil contains filtered rows to apply // Need Schema for culture & case sensitive properties var changesSet = syncTable.Schema.Clone(false); var schemaChangesTable = syncTable.Clone(); changesSet.Tables.Add(schemaChangesTable); schemaChangesTable.Rows.AddRange(filteredRows.ToList()); int rowsApplied = 0; if (message.UseBulkOperations && this.SupportBulkOperations) { rowsApplied = syncAdapter.ApplyBulkChanges(message.LocalScopeId, message.SenderScopeId, schemaChangesTable, message.LastTimestamp, conflicts); } else { rowsApplied = syncAdapter.ApplyChanges(message.LocalScopeId, message.SenderScopeId, schemaChangesTable, message.LastTimestamp, conflicts); } // resolving conflicts (var changeApplicationAction, var conflictRowsApplied) = await ResolveConflictsAsync(context, message.LocalScopeId, message.SenderScopeId, syncAdapter, conflicts, message, connection, transaction).ConfigureAwait(false); if (changeApplicationAction == ChangeApplicationAction.Rollback) { return(ChangeApplicationAction.Rollback); } // Add conflict rows that are correctly resolved, as applied rowsApplied += conflictRowsApplied; // Handle sync progress for this syncadapter (so this table) var changedFailed = filteredRows.Count() - rowsApplied; // raise SyncProgress Event var existAppliedChanges = changesApplied.TableChangesApplied.FirstOrDefault( sc => string.Equals(sc.Table.TableName, schemaTable.TableName, SyncGlobalization.DataSourceStringComparison) && sc.State == applyType); if (existAppliedChanges == null) { existAppliedChanges = new TableChangesApplied { Table = schemaTable, Applied = rowsApplied, Failed = changedFailed, State = applyType }; changesApplied.TableChangesApplied.Add(existAppliedChanges); } else { existAppliedChanges.Applied += rowsApplied; existAppliedChanges.Failed += changedFailed; } // Progress & Interceptor context.SyncStage = SyncStage.TableChangesApplied; var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, existAppliedChanges, connection, transaction); this.ReportProgress(context, progress, tableChangesAppliedArgs, connection, transaction); await this.InterceptAsync(tableChangesAppliedArgs).ConfigureAwait(false); } } return(ChangeApplicationAction.Continue); }