Пример #1
0
        /// <summary>
        /// Load the Batch part info in memory, in a SyncTable
        /// </summary>
        internal virtual Task <(SyncContext context, SyncTable syncTable)> InternalLoadTableFromBatchInfoAsync(
            SyncContext context, BatchInfo batchInfo, string tableName, string schemaName = default, DataRowState?dataRowState = default)
        {
            if (batchInfo == null || batchInfo.SanitizedSchema == null)
            {
                return(Task.FromResult <(SyncContext, SyncTable)>((context, null)));
            }

            // get the sanitazed table (without any readonly / non updatable columns) from batchinfo
            var schemaTable = batchInfo.SanitizedSchema.Tables[tableName, schemaName];
            var table       = schemaTable.Clone();

            var localSerializer = new LocalJsonSerializer();

            var interceptorsReading = this.interceptors.GetInterceptors <DeserializingRowArgs>();

            if (interceptorsReading.Count > 0)
            {
                localSerializer.OnReadingRow(async(schemaTable, rowString) =>
                {
                    var args = new DeserializingRowArgs(context, schemaTable, rowString);
                    await this.InterceptAsync(args).ConfigureAwait(false);
                    return(args.Result);
                });
            }
            // Gets all BPI containing this table
            foreach (var bpi in batchInfo.GetBatchPartsInfo(tableName, schemaName))
            {
                // Get full path of my batchpartinfo
                var fullPath = batchInfo.GetBatchPartInfoPath(bpi).FullPath;

                if (!File.Exists(fullPath))
                {
                    continue;
                }

                if (bpi.Tables == null || bpi.Tables.Count() < 1)
                {
                    return(Task.FromResult <(SyncContext, SyncTable)>((context, null)));
                }

                foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaTable))
                {
                    if (!dataRowState.HasValue || dataRowState == default || syncRow.RowState == dataRowState)
                    {
                        table.Rows.Add(syncRow);
                    }
                }
            }


            return(Task.FromResult((context, table)));
        }
Пример #2
0
        /// <summary>
        /// Load the Batch part info in memory, in a SyncTable
        /// </summary>
        internal Task <SyncTable> InternalLoadTableFromBatchPartInfoAsync(SyncContext context, BatchInfo batchInfo, BatchPartInfo batchPartInfo, DataRowState?dataRowState = default)
        {
            if (batchInfo == null || batchInfo.SanitizedSchema == null)
            {
                return(Task.FromResult <SyncTable>(null));
            }

            var localSerializer = new LocalJsonSerializer();

            // Get full path of my batchpartinfo
            var fullPath = batchInfo.GetBatchPartInfoPath(batchPartInfo).FullPath;

            if (!File.Exists(fullPath))
            {
                return(Task.FromResult <SyncTable>(null));
            }

            if (batchPartInfo.Tables == null || batchPartInfo.Tables.Count() < 1)
            {
                return(Task.FromResult <SyncTable>(null));
            }

            var schemaTable = batchInfo.SanitizedSchema.Tables[batchPartInfo.Tables[0].TableName, batchPartInfo.Tables[0].SchemaName];

            var table = schemaTable.Clone();

            var interceptorsReading = this.interceptors.GetInterceptors <DeserializingRowArgs>();

            if (interceptorsReading.Count > 0)
            {
                localSerializer.OnReadingRow(async(schemaTable, rowString) =>
                {
                    var args = new DeserializingRowArgs(context, schemaTable, rowString);
                    await this.InterceptAsync(args).ConfigureAwait(false);
                    return(args.Result);
                });
            }

            foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaTable))
            {
                if (!dataRowState.HasValue || dataRowState == default || syncRow.RowState == dataRowState)
                {
                    table.Rows.Add(syncRow);
                }
            }

            return(Task.FromResult(table));
        }
        InternalApplyThenGetChangesAsync(ClientScopeInfo clientScopeInfo, SyncContext context, BatchInfo clientBatchInfo, DbConnection connection = default, DbTransaction transaction = default, CancellationToken cancellationToken = default, IProgress <ProgressArgs> progress = null)
        {
            await using var runner = await this.GetConnectionAsync(context, SyncMode.Reading, SyncStage.ChangesApplying, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

            SyncSet         schema;
            ServerScopeInfo serverScopeInfo;

            // is it something that could happens ?
            if (clientScopeInfo.Schema == null)
            {
                // Make a remote call to get Schema from remote provider
                (context, serverScopeInfo) = await this.InternalGetServerScopeInfoAsync(
                    context, null, runner.Connection, runner.Transaction, runner.CancellationToken, runner.Progress).ConfigureAwait(false);

                schema = serverScopeInfo.Schema;
            }
            else
            {
                schema = clientScopeInfo.Schema;
            }

            schema.EnsureSchema();

            // if we don't have any BatchPartsInfo, just generate a new one to get, at least, something to send to the server
            // and get a response with new data from server
            if (clientBatchInfo == null)
            {
                clientBatchInfo = new BatchInfo(schema);
            }

            // --------------------------------------------------------------
            // STEP 1 : Send everything to the server side
            // --------------------------------------------------------------

            HttpResponseMessage response = null;

            // If not in memory and BatchPartsInfo.Count == 0, nothing to send.
            // But we need to send something, so generate a little batch part
            if (clientBatchInfo.BatchPartsInfo.Count == 0)
            {
                var changesToSend = new HttpMessageSendChangesRequest(context, clientScopeInfo);

                var containerSet = new ContainerSet();
                changesToSend.Changes     = containerSet;
                changesToSend.IsLastBatch = true;
                changesToSend.BatchIndex  = 0;
                changesToSend.BatchCount  = clientBatchInfo.BatchPartsInfo == null ? 0 : clientBatchInfo.BatchPartsInfo.Count;
                var inMemoryRowsCount = changesToSend.Changes.RowsCount();

                context.ProgressPercentage += 0.125;

                await this.InterceptAsync(new HttpSendingClientChangesRequestArgs(changesToSend, inMemoryRowsCount, inMemoryRowsCount, this.GetServiceHost()), progress, cancellationToken).ConfigureAwait(false);

                // serialize message
                var serializer = this.SerializerFactory.GetSerializer <HttpMessageSendChangesRequest>();
                var binaryData = await serializer.SerializeAsync(changesToSend);

                response = await this.httpRequestHandler.ProcessRequestAsync
                               (this.HttpClient, context, this.ServiceUri, binaryData, HttpStep.SendChangesInProgress,
                               this.SerializerFactory, this.Converter, this.Options.BatchSize, this.SyncPolicy, cancellationToken, progress).ConfigureAwait(false);
            }
            else
            {
                int tmpRowsSendedCount = 0;

                // Foreach part, will have to send them to the remote
                // once finished, return context
                var initialPctProgress1 = context.ProgressPercentage;
                var localSerializer     = new LocalJsonSerializer();

                var interceptorsReading = this.interceptors.GetInterceptors <DeserializingRowArgs>();
                if (interceptorsReading.Count > 0)
                {
                    localSerializer.OnReadingRow(async(schemaTable, rowString) =>
                    {
                        var args = new DeserializingRowArgs(context, schemaTable, rowString);
                        await this.InterceptAsync(args);
                        return(args.Result);
                    });
                }
                foreach (var bpi in clientBatchInfo.BatchPartsInfo.OrderBy(bpi => bpi.Index))
                {
                    // Get the updatable schema for the only table contained in the batchpartinfo
                    var schemaTable = DbSyncAdapter.CreateChangesTable(schema.Tables[bpi.Tables[0].TableName, bpi.Tables[0].SchemaName]);

                    // Generate the ContainerSet containing rows to send to the user
                    var containerSet   = new ContainerSet();
                    var containerTable = new ContainerTable(schemaTable);
                    var fullPath       = Path.Combine(clientBatchInfo.GetDirectoryFullPath(), bpi.FileName);
                    containerSet.Tables.Add(containerTable);

                    // read rows from file
                    foreach (var row in localSerializer.ReadRowsFromFile(fullPath, schemaTable))
                    {
                        containerTable.Rows.Add(row.ToArray());
                    }

                    // Call the converter if needed
                    if (this.Converter != null && containerTable.HasRows)
                    {
                        BeforeSerializeRows(containerTable, schemaTable, this.Converter);
                    }

                    // Create the send changes request
                    var changesToSend = new HttpMessageSendChangesRequest(context, clientScopeInfo)
                    {
                        Changes     = containerSet,
                        IsLastBatch = bpi.IsLastBatch,
                        BatchIndex  = bpi.Index,
                        BatchCount  = clientBatchInfo.BatchPartsInfo.Count
                    };

                    tmpRowsSendedCount += containerTable.Rows.Count;

                    context.ProgressPercentage = initialPctProgress1 + ((changesToSend.BatchIndex + 1) * 0.2d / changesToSend.BatchCount);
                    await this.InterceptAsync(new HttpSendingClientChangesRequestArgs(changesToSend, tmpRowsSendedCount, clientBatchInfo.RowsCount, this.GetServiceHost()), progress, cancellationToken).ConfigureAwait(false);

                    // serialize message
                    var serializer = this.SerializerFactory.GetSerializer <HttpMessageSendChangesRequest>();
                    var binaryData = await serializer.SerializeAsync(changesToSend);

                    response = await this.httpRequestHandler.ProcessRequestAsync
                                   (this.HttpClient, context, this.ServiceUri, binaryData, HttpStep.SendChangesInProgress,
                                   this.SerializerFactory, this.Converter, this.Options.BatchSize, this.SyncPolicy, cancellationToken, progress).ConfigureAwait(false);

                    // See #721 for issue and #721 for PR from slagtejn
                    if (!bpi.IsLastBatch)
                    {
                        response.Dispose();
                    }
                }
            }

            // --------------------------------------------------------------
            // STEP 2 : Receive everything from the server side
            // --------------------------------------------------------------

            // Now we have sent all the datas to the server and now :
            // We have a FIRST response from the server with new datas
            // 1) Could be the only one response
            // 2) Could be the first response and we need to download all batchs

            context.SyncStage = SyncStage.ChangesSelecting;
            var initialPctProgress = 0.55;

            context.ProgressPercentage = initialPctProgress;

            // Create the BatchInfo
            var serverBatchInfo = new BatchInfo(schema);

            HttpMessageSummaryResponse summaryResponseContent = null;

            // Deserialize response incoming from server
            using (var streamResponse = await response.Content.ReadAsStreamAsync().ConfigureAwait(false))
            {
                var responseSerializer = this.SerializerFactory.GetSerializer <HttpMessageSummaryResponse>();
                summaryResponseContent = await responseSerializer.DeserializeAsync(streamResponse);
            }

            serverBatchInfo.RowsCount = summaryResponseContent.BatchInfo.RowsCount;
            serverBatchInfo.Timestamp = summaryResponseContent.RemoteClientTimestamp;
            context = summaryResponseContent.SyncContext;

            if (summaryResponseContent.BatchInfo.BatchPartsInfo != null)
            {
                foreach (var bpi in summaryResponseContent.BatchInfo.BatchPartsInfo)
                {
                    serverBatchInfo.BatchPartsInfo.Add(bpi);
                }
            }


            // From here, we need to serialize everything on disk

            // Generate the batch directory
            var batchDirectoryRoot = this.Options.BatchDirectory;
            var batchDirectoryName = string.Concat(DateTime.UtcNow.ToString("yyyy_MM_dd_ss"), Path.GetRandomFileName().Replace(".", ""));

            serverBatchInfo.DirectoryRoot = batchDirectoryRoot;
            serverBatchInfo.DirectoryName = batchDirectoryName;

            if (!Directory.Exists(serverBatchInfo.GetDirectoryFullPath()))
            {
                Directory.CreateDirectory(serverBatchInfo.GetDirectoryFullPath());
            }

            // If we have a snapshot we are raising the batches downloading process that will occurs
            await this.InterceptAsync(new HttpBatchesDownloadingArgs(context, serverBatchInfo, this.GetServiceHost()), progress, cancellationToken).ConfigureAwait(false);

            // function used to download one part
            var dl = new Func <BatchPartInfo, Task>(async(bpi) =>
            {
                if (cancellationToken.IsCancellationRequested)
                {
                    return;
                }

                var changesToSend3 = new HttpMessageGetMoreChangesRequest(context, bpi.Index);

                var serializer3 = this.SerializerFactory.GetSerializer <HttpMessageGetMoreChangesRequest>();
                var binaryData3 = await serializer3.SerializeAsync(changesToSend3).ConfigureAwait(false);
                var step3       = HttpStep.GetMoreChanges;

                await this.InterceptAsync(new HttpGettingServerChangesRequestArgs(bpi.Index, serverBatchInfo.BatchPartsInfo.Count, summaryResponseContent.SyncContext, this.GetServiceHost()), progress, cancellationToken).ConfigureAwait(false);

                // Raise get changes request
                context.ProgressPercentage = initialPctProgress + ((bpi.Index + 1) * 0.2d / serverBatchInfo.BatchPartsInfo.Count);

                var response = await this.httpRequestHandler.ProcessRequestAsync(
                    this.HttpClient, context, this.ServiceUri, binaryData3, step3,
                    this.SerializerFactory, this.Converter, 0, this.SyncPolicy, cancellationToken, progress).ConfigureAwait(false);

                if (this.SerializerFactory.Key != "json")
                {
                    var webSerializer        = this.SerializerFactory.GetSerializer <HttpMessageSendChangesResponse>();
                    using var responseStream = await response.Content.ReadAsStreamAsync().ConfigureAwait(false);
                    var getMoreChanges       = await webSerializer.DeserializeAsync(responseStream);

                    context = getMoreChanges.SyncContext;

                    if (getMoreChanges != null && getMoreChanges.Changes != null && getMoreChanges.Changes.HasRows)
                    {
                        var localSerializer = new LocalJsonSerializer();

                        var interceptorsWriting = this.interceptors.GetInterceptors <SerializingRowArgs>();
                        if (interceptorsWriting.Count > 0)
                        {
                            localSerializer.OnWritingRow(async(syncTable, rowArray) =>
                            {
                                var args = new SerializingRowArgs(context, syncTable, rowArray);
                                await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false);
                                return(args.Result);
                            });
                        }


                        // Should have only one table
                        var table       = getMoreChanges.Changes.Tables[0];
                        var schemaTable = DbSyncAdapter.CreateChangesTable(schema.Tables[table.TableName, table.SchemaName]);

                        var fullPath = Path.Combine(serverBatchInfo.GetDirectoryFullPath(), bpi.FileName);

                        // open the file and write table header
                        await localSerializer.OpenFileAsync(fullPath, schemaTable).ConfigureAwait(false);

                        foreach (var row in table.Rows)
                        {
                            await localSerializer.WriteRowToFileAsync(new SyncRow(schemaTable, row), schemaTable).ConfigureAwait(false);
                        }

                        // Close file
                        await localSerializer.CloseFileAsync(fullPath, schemaTable).ConfigureAwait(false);
                    }
                }
                else
                {
                    // Serialize
                    await SerializeAsync(response, bpi.FileName, serverBatchInfo.GetDirectoryFullPath(), this).ConfigureAwait(false);
                }

                // Raise response from server containing a batch changes
                await this.InterceptAsync(new HttpGettingServerChangesResponseArgs(serverBatchInfo, bpi.Index, bpi.RowsCount, summaryResponseContent.SyncContext, this.GetServiceHost()), progress, cancellationToken).ConfigureAwait(false);
            });

            // Parrallel download of all bpis (which will launch the delete directory on the server side)
            await serverBatchInfo.BatchPartsInfo.ForEachAsync(bpi => dl(bpi), this.MaxDownladingDegreeOfParallelism).ConfigureAwait(false);

            // Send order of end of download
            var lastBpi = serverBatchInfo.BatchPartsInfo.FirstOrDefault(bpi => bpi.IsLastBatch);

            if (lastBpi != null)
            {
                var endOfDownloadChanges = new HttpMessageGetMoreChangesRequest(context, lastBpi.Index);

                var serializerEndOfDownloadChanges = this.SerializerFactory.GetSerializer <HttpMessageGetMoreChangesRequest>();
                var binaryData3 = await serializerEndOfDownloadChanges.SerializeAsync(endOfDownloadChanges).ConfigureAwait(false);

                var endResponse = await this.httpRequestHandler.ProcessRequestAsync(
                    this.HttpClient, context, this.ServiceUri, binaryData3, HttpStep.SendEndDownloadChanges,
                    this.SerializerFactory, this.Converter, 0, this.SyncPolicy, cancellationToken, progress).ConfigureAwait(false);

                // Deserialize response incoming from server
                // This is the last response
                // Should contains step HttpStep.SendEndDownloadChanges
                using var streamResponse = await endResponse.Content.ReadAsStreamAsync().ConfigureAwait(false);

                var endResponseSerializer = this.SerializerFactory.GetSerializer <HttpMessageSendChangesResponse>();
                var endResponseContent    = await endResponseSerializer.DeserializeAsync(streamResponse);

                context = endResponseContent.SyncContext;
            }

            // generate the new scope item
            this.CompleteTime = DateTime.UtcNow;

            await this.InterceptAsync(new HttpBatchesDownloadedArgs(summaryResponseContent, summaryResponseContent.SyncContext, this.GetServiceHost()), progress, cancellationToken).ConfigureAwait(false);

            var serverSyncChanges = new ServerSyncChanges(
                summaryResponseContent.RemoteClientTimestamp,
                serverBatchInfo,
                summaryResponseContent.ServerChangesSelected
                );


            return(context, serverSyncChanges, summaryResponseContent.ClientChangesApplied, summaryResponseContent.ConflictResolutionPolicy);
        }
Пример #4
0
        /// <summary>
        /// Apply changes internal method for one type of query: Insert, Update or Delete for every batch from a table
        /// </summary>
        private async Task <SyncContext> InternalApplyTableChangesAsync(IScopeInfo scopeInfo, SyncContext context, SyncTable schemaTable, MessageApplyChanges message,
                                                                        DbConnection connection, DbTransaction transaction, DataRowState applyType, DatabaseChangesApplied changesApplied,
                                                                        CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            if (this.Provider == null)
            {
                return(context);
            }

            context.SyncStage = SyncStage.ChangesApplying;

            var setupTable = scopeInfo.Setup.Tables[schemaTable.TableName, schemaTable.SchemaName];

            if (setupTable == null)
            {
                return(context);
            }

            // Only table schema is replicated, no datas are applied
            if (setupTable.SyncDirection == SyncDirection.None)
            {
                return(context);
            }

            // if we are in upload stage, so check if table is not download only
            if (context.SyncWay == SyncWay.Upload && setupTable.SyncDirection == SyncDirection.DownloadOnly)
            {
                return(context);
            }

            // if we are in download stage, so check if table is not download only
            if (context.SyncWay == SyncWay.Download && setupTable.SyncDirection == SyncDirection.UploadOnly)
            {
                return(context);
            }

            var hasChanges = message.BatchInfo.HasData(schemaTable.TableName, schemaTable.SchemaName);

            // Each table in the messages contains scope columns. Don't forget it
            if (!hasChanges)
            {
                return(context);
            }

            // what kind of command to execute
            var           init          = message.IsNew || context.SyncType != SyncType.Normal;
            DbCommandType dbCommandType = applyType == DataRowState.Deleted ? DbCommandType.DeleteRows : (init ? DbCommandType.InsertRows : DbCommandType.UpdateRows);

            // tmp sync table with only writable columns
            var changesSet         = schemaTable.Schema.Clone(false);
            var schemaChangesTable = DbSyncAdapter.CreateChangesTable(schemaTable, changesSet);

            // get executioning adapter
            var syncAdapter = this.GetSyncAdapter(schemaChangesTable, scopeInfo);

            syncAdapter.ApplyType = applyType;

            // Get command
            var(command, isBatch) = await syncAdapter.GetCommandAsync(dbCommandType, connection, transaction);

            if (command == null)
            {
                return(context);
            }

            var bpiTables = message.BatchInfo.GetBatchPartsInfo(schemaTable);

            // launch interceptor if any
            var args = new TableChangesApplyingArgs(context, message.BatchInfo, bpiTables, schemaTable, applyType, command, connection, transaction);

            await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false);

            if (args.Cancel || args.Command == null)
            {
                return(context);
            }

            command = args.Command;
            var cmdText = command.CommandText;

            TableChangesApplied tableChangesApplied = null;

            // Conflicts occured when trying to apply rows
            var conflictRows = new List <SyncRow>();

            var localSerializer = new LocalJsonSerializer();

            // If someone has an interceptor on deserializing, we read the row and intercept
            var interceptorsReading = this.interceptors.GetInterceptors <DeserializingRowArgs>();

            if (interceptorsReading.Count > 0)
            {
                localSerializer.OnReadingRow(async(schemaTable, rowString) =>
                {
                    var args = new DeserializingRowArgs(context, schemaTable, rowString);
                    await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false);
                    return(args.Result);
                });
            }

            // I've got all files for my table
            // applied rows for this bpi
            foreach (var batchPartInfo in bpiTables)
            {
                // Applied row for this particular BPI
                var appliedRowsTmp = 0;
                // Rows fetch (either of the good state or not) from the BPI
                var rowsFetched = 0;

                // Get full path of my batchpartinfo
                var fullPath = message.BatchInfo.GetBatchPartInfoPath(batchPartInfo).FullPath;

                // accumulating rows
                var batchRows = new List <SyncRow>();

                if (isBatch)
                {
                    foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaChangesTable))
                    {
                        rowsFetched++;

                        // Adding rows to the batch rows
                        if (batchRows.Count < this.Provider.BulkBatchMaxLinesCount)
                        {
                            if (syncRow.RowState == applyType)
                            {
                                batchRows.Add(syncRow);
                            }

                            if (rowsFetched < batchPartInfo.RowsCount && batchRows.Count < this.Provider.BulkBatchMaxLinesCount)
                            {
                                continue;
                            }
                        }
                        if (batchRows.Count <= 0)
                        {
                            continue;
                        }

                        var failedRows = schemaChangesTable.Schema.Clone().Tables[schemaChangesTable.TableName, schemaChangesTable.SchemaName];

                        command.CommandText = cmdText;
                        var batchArgs = new RowsChangesApplyingArgs(context, message.BatchInfo, batchRows, schemaChangesTable, applyType, command, connection, transaction);
                        await this.InterceptAsync(batchArgs, progress, cancellationToken).ConfigureAwait(false);

                        if (batchArgs.Cancel || batchArgs.Command == null || batchArgs.SyncRows == null || batchArgs.SyncRows.Count <= 0)
                        {
                            continue;
                        }

                        // get the correct pointer to the command from the interceptor in case user change the whole instance
                        command = batchArgs.Command;

                        await this.InterceptAsync(new DbCommandArgs(context, command, connection, transaction), progress, cancellationToken).ConfigureAwait(false);

                        // execute the batch, through the provider
                        await syncAdapter.ExecuteBatchCommandAsync(command, message.SenderScopeId, batchArgs.SyncRows, schemaChangesTable, failedRows, message.LastTimestamp, connection, transaction).ConfigureAwait(false);

                        foreach (var failedRow in failedRows.Rows)
                        {
                            conflictRows.Add(failedRow);
                        }

                        //rows minus failed rows
                        appliedRowsTmp += batchRows.Count - failedRows.Rows.Count;
                        batchRows.Clear();
                    }
                }
                else
                {
                    foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaChangesTable))
                    {
                        rowsFetched++;

                        if (syncRow.RowState != applyType)
                        {
                            continue;
                        }

                        command.CommandText = cmdText;
                        var batchArgs = new RowsChangesApplyingArgs(context, message.BatchInfo, new List <SyncRow> {
                            syncRow
                        }, schemaChangesTable, applyType, command, connection, transaction);
                        await this.InterceptAsync(batchArgs, progress, cancellationToken).ConfigureAwait(false);

                        if (batchArgs.Cancel || batchArgs.Command == null || batchArgs.SyncRows == null || batchArgs.SyncRows.Count() <= 0)
                        {
                            continue;
                        }

                        // get the correct pointer to the command from the interceptor in case user change the whole instance
                        command = batchArgs.Command;

                        // Set the parameters value from row
                        syncAdapter.SetColumnParametersValues(command, batchArgs.SyncRows.First());

                        // Set the special parameters for update
                        syncAdapter.AddScopeParametersValues(command, message.SenderScopeId, message.LastTimestamp, applyType == DataRowState.Deleted, false);

                        await this.InterceptAsync(new DbCommandArgs(context, command, connection, transaction), progress, cancellationToken).ConfigureAwait(false);

                        var rowAppliedCount = await command.ExecuteNonQueryAsync().ConfigureAwait(false);

                        // Check if we have a return value instead
                        var syncRowCountParam = DbSyncAdapter.GetParameter(command, "sync_row_count");

                        if (syncRowCountParam != null)
                        {
                            rowAppliedCount = (int)syncRowCountParam.Value;
                        }

                        if (rowAppliedCount > 0)
                        {
                            appliedRowsTmp++;
                        }
                        else
                        {
                            conflictRows.Add(syncRow);
                        }
                    }
                }


                // conflict rows applied
                int rowsAppliedCount = 0;
                // conflict resolved count
                int conflictsResolvedCount = 0;

                // If conflicts occured
                if (conflictRows.Count > 0)
                {
                    foreach (var conflictRow in conflictRows)
                    {
                        int     conflictResolvedCount;
                        SyncRow resolvedRow;
                        int     rowAppliedCount;
                        (context, conflictResolvedCount, resolvedRow, rowAppliedCount) =
                            await this.HandleConflictAsync(scopeInfo, context, message.LocalScopeId, message.SenderScopeId, syncAdapter, conflictRow, schemaChangesTable,
                                                           message.Policy, message.LastTimestamp, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                        conflictsResolvedCount += conflictResolvedCount;
                        rowsAppliedCount       += rowAppliedCount;
                    }

                    // add rows with resolved conflicts
                    appliedRowsTmp += rowsAppliedCount;
                }

                // Any failure ?
                var changedFailed = rowsFetched - conflictsResolvedCount - appliedRowsTmp;

                // Only Upsert DatabaseChangesApplied if we make an upsert/ delete from the batch or resolved any conflict
                if (appliedRowsTmp > 0 || conflictsResolvedCount > 0)
                {
                    // We may have multiple batch files, so we can have multipe sync tables with the same name
                    // We can say that a syncTable may be contained in several files
                    // That's why we should get an applied changes instance if already exists from a previous batch file
                    tableChangesApplied = changesApplied.TableChangesApplied.FirstOrDefault(tca =>
                    {
                        var sc = SyncGlobalization.DataSourceStringComparison;

                        var sn      = tca.SchemaName == null ? string.Empty : tca.SchemaName;
                        var otherSn = schemaTable.SchemaName == null ? string.Empty : schemaTable.SchemaName;

                        return(tca.TableName.Equals(schemaTable.TableName, sc) &&
                               sn.Equals(otherSn, sc) &&
                               tca.State == applyType);
                    });

                    if (tableChangesApplied == null)
                    {
                        tableChangesApplied = new TableChangesApplied
                        {
                            TableName         = schemaTable.TableName,
                            SchemaName        = schemaTable.SchemaName,
                            Applied           = appliedRowsTmp,
                            ResolvedConflicts = conflictsResolvedCount,
                            Failed            = changedFailed,
                            State             = applyType,
                            TotalRowsCount    = message.BatchInfo.RowsCount,
                            TotalAppliedCount = changesApplied.TotalAppliedChanges + appliedRowsTmp
                        };
                        changesApplied.TableChangesApplied.Add(tableChangesApplied);
                    }
                    else
                    {
                        tableChangesApplied.Applied           += appliedRowsTmp;
                        tableChangesApplied.TotalAppliedCount  = changesApplied.TotalAppliedChanges;
                        tableChangesApplied.ResolvedConflicts += conflictsResolvedCount;
                        tableChangesApplied.Failed            += changedFailed;
                    }

                    // we've got 0.25% to fill here
                    var progresspct = appliedRowsTmp * 0.25d / tableChangesApplied.TotalRowsCount;
                    context.ProgressPercentage += progresspct;
                }
            }

            schemaChangesTable.Dispose();
            schemaChangesTable = null;
            changesSet.Dispose();
            changesSet = null;

            // Report the overall changes applied for the current table
            if (tableChangesApplied != null)
            {
                var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, tableChangesApplied, connection, transaction);
                // We don't report progress if we do not have applied any changes on the table, to limit verbosity of Progress
                await this.InterceptAsync(tableChangesAppliedArgs, progress, cancellationToken).ConfigureAwait(false);
            }


            if (command != null)
            {
                command.Dispose();
            }

            return(context);
        }