/// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual (SyncContext, BatchInfo) GetSnapshot(
            SyncContext context, SyncSet schema, string batchDirectory,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            var sb         = new StringBuilder();
            var underscore = "";

            if (context.Parameters != null)
            {
                foreach (var p in context.Parameters.OrderBy(p => p.Name))
                {
                    var cleanValue = new string(p.Value.ToString().Where(char.IsLetterOrDigit).ToArray());
                    var cleanName  = new string(p.Name.Where(char.IsLetterOrDigit).ToArray());

                    sb.Append($"{underscore}{cleanName}_{cleanValue}");
                    underscore = "_";
                }
            }

            var directoryName = sb.ToString();

            directoryName = string.IsNullOrEmpty(directoryName) ? "ALL" : directoryName;

            var directoryFullPath = Path.Combine(batchDirectory, directoryName);

            // if no snapshot present, just return null value.
            if (!Directory.Exists(directoryFullPath))
            {
                return(context, null);
            }

            // Serialize on disk.
            var jsonConverter = new JsonConverter <BatchInfo>();

            var summaryFileName = Path.Combine(directoryFullPath, "summary.json");

            BatchInfo batchInfo = null;

            // Create the schema changeset
            var changesSet = new SyncSet(schema.ScopeName);

            // Create a Schema set without readonly columns, attached to memory changes
            foreach (var table in schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(schema.Tables[table.TableName, table.SchemaName], changesSet);
            }


            using (var fs = new FileStream(summaryFileName, FileMode.Open, FileAccess.Read))
            {
                batchInfo = jsonConverter.Deserialize(fs);
            }

            batchInfo.SetSchema(changesSet);

            return(context, batchInfo);
        }
        /// <summary>
        /// Try to get a source row
        /// </summary>
        private async Task <SyncRow> InternalGetConflictRowAsync(SyncContext context, DbSyncAdapter syncAdapter, Guid localScopeId, SyncRow primaryKeyRow, SyncTable schema, DbConnection connection, DbTransaction transaction)
        {
            // Get the row in the local repository
            var command = await syncAdapter.GetCommandAsync(DbCommandType.SelectRow, connection, transaction);

            // set the primary keys columns as parameters
            syncAdapter.SetColumnParametersValues(command, primaryKeyRow);

            // Create a select table based on the schema in parameter + scope columns
            var changesSet  = schema.Schema.Clone(false);
            var selectTable = DbSyncAdapter.CreateChangesTable(schema, changesSet);

            using var dataReader = await command.ExecuteReaderAsync().ConfigureAwait(false);

            if (!dataReader.Read())
            {
                dataReader.Close();
                return(null);
            }

            // Create a new empty row
            var syncRow = selectTable.NewRow();

            for (var i = 0; i < dataReader.FieldCount; i++)
            {
                var columnName = dataReader.GetName(i);

                // if we have the tombstone value, do not add it to the table
                if (columnName == "sync_row_is_tombstone")
                {
                    var isTombstone = Convert.ToInt64(dataReader.GetValue(i)) > 0;
                    syncRow.RowState = isTombstone ? DataRowState.Deleted : DataRowState.Modified;
                    continue;
                }
                if (columnName == "update_scope_id")
                {
                    // var readerScopeId = dataReader.GetValue(i);
                    continue;
                }

                var columnValueObject = dataReader.GetValue(i);
                var columnValue       = columnValueObject == DBNull.Value ? null : columnValueObject;
                syncRow[columnName] = columnValue;
            }


            // if syncRow is not a deleted row, we can check for which kind of row it is.
            if (syncRow != null && syncRow.RowState == DataRowState.Unchanged)
            {
                syncRow.RowState = DataRowState.Modified;
            }

            dataReader.Close();

            return(syncRow);
        }
        /// <summary>
        /// Generate an empty BatchInfo
        /// </summary>
        internal (BatchInfo, DatabaseChangesSelected) GetEmptyChanges(MessageGetChangesBatch message)
        {
            // Get config
            var isBatched = message.BatchSize > 0;

            // create the in memory changes set
            var changesSet = new SyncSet(message.Schema.ScopeName);

            // Create a Schema set without readonly tables, attached to memory changes
            foreach (var table in message.Schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(message.Schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            // Create the batch info, in memory
            var batchInfo = new BatchInfo(!isBatched, changesSet, message.BatchDirectory);;

            // add changes to batchInfo
            batchInfo.AddChanges(new SyncSet());

            // Create a new empty in-memory batch info
            return(batchInfo, new DatabaseChangesSelected());
        }
        /// <summary>
        /// update configuration object with tables desc from server database
        /// </summary>
        public virtual async Task <SyncContext> CreateSnapshotAsync(SyncContext context, SyncSet schema,
                                                                    DbConnection connection, DbTransaction transaction, string batchDirectory, int batchSize, long remoteClientTimestamp,
                                                                    CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            // create local directory
            if (!Directory.Exists(batchDirectory))
            {
                Directory.CreateDirectory(batchDirectory);
            }

            // numbers of batch files generated
            var batchIndex = 0;

            // create the in memory changes set
            var changesSet = new SyncSet();

            // Create a Schema set without readonly tables, attached to memory changes
            foreach (var table in schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            var sb         = new StringBuilder();
            var underscore = "";

            if (context.Parameters != null)
            {
                foreach (var p in context.Parameters.OrderBy(p => p.Name))
                {
                    var cleanValue = new string(p.Value.ToString().Where(char.IsLetterOrDigit).ToArray());
                    var cleanName  = new string(p.Name.Where(char.IsLetterOrDigit).ToArray());

                    sb.Append($"{underscore}{cleanName}_{cleanValue}");
                    underscore = "_";
                }
            }

            var directoryName = sb.ToString();

            directoryName = string.IsNullOrEmpty(directoryName) ? "ALL" : directoryName;

            var directoryFullPath = Path.Combine(batchDirectory, directoryName);

            if (Directory.Exists(directoryFullPath))
            {
                Directory.Delete(directoryFullPath, true);
            }

            // batchinfo generate a schema clone with scope columns if needed
            var batchInfo = new BatchInfo(false, changesSet, batchDirectory, directoryName);

            // Clear tables, we will add only the ones we need in the batch info
            changesSet.Clear();

            foreach (var syncTable in schema.Tables)
            {
                var tableBuilder = this.GetTableBuilder(syncTable);
                var syncAdapter  = tableBuilder.CreateSyncAdapter(connection, transaction);

                // raise before event
                context.SyncStage = SyncStage.TableChangesSelecting;
                var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, syncTable.TableName, connection, transaction);
                // launch interceptor if any
                await this.InterceptAsync(tableChangesSelectingArgs).ConfigureAwait(false);

                // Get Select initialize changes command
                var selectIncrementalChangesCommand = this.GetSelectChangesCommand(context, syncAdapter, syncTable, true);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, null, true, 0, selectIncrementalChangesCommand);

                // Get the reader
                using (var dataReader = selectIncrementalChangesCommand.ExecuteReader())
                {
                    // memory size total
                    double rowsMemorySize = 0L;

                    // Create a chnages table with scope columns
                    var changesSetTable = DbSyncAdapter.CreateChangesTable(schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                        var finalFieldSize = fieldsSize / 1024d;

                        if (finalFieldSize > batchSize)
                        {
                            throw new RowOverSizedException(finalFieldSize.ToString());
                        }

                        // Calculate the new memory size
                        rowsMemorySize += finalFieldSize;

                        // Next line if we don't reach the batch size yet.
                        if (rowsMemorySize <= batchSize)
                        {
                            continue;
                        }

                        // add changes to batchinfo
                        batchInfo.AddChanges(changesSet, batchIndex, false);

                        // increment batch index
                        batchIndex++;

                        // we know the datas are serialized here, so we can flush  the set
                        changesSet.Clear();

                        // Recreate an empty ContainerSet and a ContainerTable
                        changesSet = new SyncSet();

                        changesSetTable = DbSyncAdapter.CreateChangesTable(schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                        // Init the row memory size
                        rowsMemorySize = 0L;
                    }
                }

                selectIncrementalChangesCommand.Dispose();
            }


            if (changesSet != null && changesSet.HasTables)
            {
                batchInfo.AddChanges(changesSet, batchIndex, true);
            }

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            batchInfo.Timestamp = remoteClientTimestamp;

            // Serialize on disk.
            var jsonConverter = new JsonConverter <BatchInfo>();

            var summaryFileName = Path.Combine(directoryFullPath, "summary.json");

            using (var f = new FileStream(summaryFileName, FileMode.CreateNew, FileAccess.ReadWrite))
            {
                var bytes = jsonConverter.Serialize(batchInfo);
                f.Write(bytes, 0, bytes.Length);
            }


            return(context);
        }
示例#5
0
        InternalGetSnapshotAsync(ServerScopeInfo serverScopeInfo, SyncContext context, DbConnection connection = default, DbTransaction transaction = default, CancellationToken cancellationToken = default, IProgress <ProgressArgs> progress = null)
        {
            try
            {
                await using var runner = await this.GetConnectionAsync(context, SyncMode.Reading, SyncStage.ScopeLoading, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                // Get context or create a new one
                var changesSelected = new DatabaseChangesSelected();

                BatchInfo serverBatchInfo = null;
                if (string.IsNullOrEmpty(this.Options.SnapshotsDirectory))
                {
                    return(context, 0, null, changesSelected);
                }

                //Direction set to Download
                context.SyncWay = SyncWay.Download;

                if (cancellationToken.IsCancellationRequested)
                {
                    cancellationToken.ThrowIfCancellationRequested();
                }

                // Get Schema from remote provider if no schema passed from args
                if (serverScopeInfo.Schema == null)
                {
                    (context, serverScopeInfo) = await this.InternalGetServerScopeInfoAsync(context, serverScopeInfo.Setup, false, runner.Connection, runner.Transaction, runner.CancellationToken, runner.Progress).ConfigureAwait(false);
                }

                // When we get the changes from server, we create the batches if it's requested by the client
                // the batch decision comes from batchsize from client
                var(rootDirectory, nameDirectory) = await this.InternalGetSnapshotDirectoryPathAsync(serverScopeInfo.Name, context.Parameters, runner.CancellationToken, runner.Progress).ConfigureAwait(false);

                if (!string.IsNullOrEmpty(rootDirectory))
                {
                    var directoryFullPath = Path.Combine(rootDirectory, nameDirectory);

                    // if no snapshot present, just return null value.
                    if (Directory.Exists(directoryFullPath))
                    {
                        // Serialize on disk.
                        var jsonConverter = new Serialization.JsonConverter <BatchInfo>();

                        var summaryFileName = Path.Combine(directoryFullPath, "summary.json");

                        using (var fs = new FileStream(summaryFileName, FileMode.Open, FileAccess.Read))
                        {
                            serverBatchInfo = await jsonConverter.DeserializeAsync(fs).ConfigureAwait(false);
                        }

                        // Create the schema changeset
                        var changesSet = new SyncSet();

                        // Create a Schema set without readonly columns, attached to memory changes
                        foreach (var table in serverScopeInfo.Schema.Tables)
                        {
                            DbSyncAdapter.CreateChangesTable(serverScopeInfo.Schema.Tables[table.TableName, table.SchemaName], changesSet);

                            // Get all stats about this table
                            var bptis = serverBatchInfo.BatchPartsInfo.SelectMany(bpi => bpi.Tables.Where(t =>
                            {
                                var sc = SyncGlobalization.DataSourceStringComparison;

                                var sn      = t.SchemaName == null ? string.Empty : t.SchemaName;
                                var otherSn = table.SchemaName == null ? string.Empty : table.SchemaName;

                                return(table.TableName.Equals(t.TableName, sc) && sn.Equals(otherSn, sc));
                            }));

                            if (bptis != null)
                            {
                                // Statistics
                                var tableChangesSelected = new TableChangesSelected(table.TableName, table.SchemaName)
                                {
                                    // we are applying a snapshot where it can't have any deletes, obviously
                                    Upserts = bptis.Sum(bpti => bpti.RowsCount)
                                };

                                if (tableChangesSelected.Upserts > 0)
                                {
                                    changesSelected.TableChangesSelected.Add(tableChangesSelected);
                                }
                            }
                        }
                        serverBatchInfo.SanitizedSchema = changesSet;
                    }
                }
                if (serverBatchInfo == null)
                {
                    return(context, 0, null, changesSelected);
                }


                await runner.CommitAsync().ConfigureAwait(false);

                return(context, serverBatchInfo.Timestamp, serverBatchInfo, changesSelected);
            }
            catch (Exception ex)
            {
                throw GetSyncError(context, ex);
            }
        }
示例#6
0
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        internal virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> InternalGetChangesAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            // batch info containing changes
            BatchInfo batchInfo;


            // Statistics about changes that are selected
            DatabaseChangesSelected changesSelected;

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                (batchInfo, changesSelected) = await this.InternalGetEmptyChangesAsync(message).ConfigureAwait(false);

                return(context, batchInfo, changesSelected);
            }

            // Call interceptor
            await this.InterceptAsync(new DatabaseChangesSelectingArgs(context, message, connection, transaction), cancellationToken).ConfigureAwait(false);

            // create local directory
            if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory))
            {
                Directory.CreateDirectory(message.BatchDirectory);
            }

            changesSelected = new DatabaseChangesSelected();

            // numbers of batch files generated
            var batchIndex = 0;

            // Check if we are in batch mode
            var isBatch = message.BatchSize > 0;

            // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch)
            // batchinfo generate a schema clone with scope columns if needed
            batchInfo = new BatchInfo(!isBatch, message.Schema, message.BatchDirectory);

            // Clean SyncSet, we will add only tables we need in the batch info
            var changesSet = new SyncSet();

            var cptSyncTable    = 0;
            var currentProgress = context.ProgressPercentage;

            foreach (var syncTable in message.Schema.Tables)
            {
                // tmp count of table for report progress pct
                cptSyncTable++;

                // Only table schema is replicated, no datas are applied
                if (syncTable.SyncDirection == SyncDirection.None)
                {
                    continue;
                }

                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                // Get Command
                var selectIncrementalChangesCommand = await this.GetSelectChangesCommandAsync(context, syncTable, message.Setup, message.IsNew, connection, transaction);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand);

                // launch interceptor if any
                var args = new TableChangesSelectingArgs(context, syncTable, selectIncrementalChangesCommand, connection, transaction);
                await this.InterceptAsync(args, cancellationToken).ConfigureAwait(false);

                if (!args.Cancel && args.Command != null)
                {
                    // Statistics
                    var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName);

                    // Create a chnages table with scope columns
                    var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                    // Get the reader
                    using var dataReader = await args.Command.ExecuteReaderAsync().ConfigureAwait(false);

                    // memory size total
                    double rowsMemorySize = 0L;

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        // Set the correct state to be applied
                        if (row.RowState == DataRowState.Deleted)
                        {
                            tableChangesSelected.Deletes++;
                        }
                        else if (row.RowState == DataRowState.Modified)
                        {
                            tableChangesSelected.Upserts++;
                        }

                        // calculate row size if in batch mode
                        if (isBatch)
                        {
                            var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                            var finalFieldSize = fieldsSize / 1024d;

                            if (finalFieldSize > message.BatchSize)
                            {
                                throw new RowOverSizedException(finalFieldSize.ToString());
                            }

                            // Calculate the new memory size
                            rowsMemorySize += finalFieldSize;

                            // Next line if we don't reach the batch size yet.
                            if (rowsMemorySize <= message.BatchSize)
                            {
                                continue;
                            }

                            // Check interceptor
                            var batchTableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction);
                            await this.InterceptAsync(batchTableChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

                            // add changes to batchinfo
                            await batchInfo.AddChangesAsync(changesSet, batchIndex, false, message.SerializerFactory, this).ConfigureAwait(false);

                            // increment batch index
                            batchIndex++;

                            // we know the datas are serialized here, so we can flush  the set
                            changesSet.Clear();

                            // Recreate an empty ContainerSet and a ContainerTable
                            changesSet = new SyncSet();

                            changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                            // Init the row memory size
                            rowsMemorySize = 0L;
                        }
                    }

                    dataReader.Close();

                    // We don't report progress if no table changes is empty, to limit verbosity
                    if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                    {
                        changesSelected.TableChangesSelected.Add(tableChangesSelected);
                    }

                    // even if no rows raise the interceptor
                    var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction);
                    await this.InterceptAsync(tableChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

                    context.ProgressPercentage = currentProgress + (cptSyncTable * 0.2d / message.Schema.Tables.Count);

                    // only raise report progress if we have something
                    if (tableChangesSelectedArgs.TableChangesSelected.TotalChanges > 0)
                    {
                        this.ReportProgress(context, progress, tableChangesSelectedArgs);
                    }
                }
            }

            // We are in batch mode, and we are at the last batchpart info
            // Even if we don't have rows inside, we return the changesSet, since it contains at least schema
            if (changesSet != null && changesSet.HasTables && changesSet.HasRows)
            {
                await batchInfo.AddChangesAsync(changesSet, batchIndex, true, message.SerializerFactory, this).ConfigureAwait(false);
            }

            //Set the total rows count contained in the batch info
            batchInfo.RowsCount = changesSelected.TotalChangesSelected;

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            // Raise database changes selected
            if (changesSelected.TotalChangesSelected > 0 || changesSelected.TotalChangesSelectedDeletes > 0 || changesSelected.TotalChangesSelectedUpdates > 0)
            {
                var databaseChangesSelectedArgs = new DatabaseChangesSelectedArgs(context, message.LastTimestamp, batchInfo, changesSelected, connection);
                this.ReportProgress(context, progress, databaseChangesSelectedArgs);
                await this.InterceptAsync(databaseChangesSelectedArgs, cancellationToken).ConfigureAwait(false);
            }

            return(context, batchInfo, changesSelected);
        }
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual async Task <(SyncContext, BatchInfo)> GetSnapshotAsync(
            SyncContext context, SyncSet schema, string snapshotDirectory,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            // TODO : Get a snapshot based on scope name

            var sb         = new StringBuilder();
            var underscore = "";

            if (context.Parameters != null)
            {
                foreach (var p in context.Parameters.OrderBy(p => p.Name))
                {
                    var cleanValue = new string(p.Value.ToString().Where(char.IsLetterOrDigit).ToArray());
                    var cleanName  = new string(p.Name.Where(char.IsLetterOrDigit).ToArray());

                    sb.Append($"{underscore}{cleanName}_{cleanValue}");
                    underscore = "_";
                }
            }

            var directoryName = sb.ToString();

            directoryName = string.IsNullOrEmpty(directoryName) ? "ALL" : directoryName;

            this.Orchestrator.logger.LogDebug(SyncEventsId.GetSnapshot, new { DirectoryName = directoryName });

            // cleansing scope name
            var directoryScopeName = new string(context.ScopeName.Where(char.IsLetterOrDigit).ToArray());

            // Get full path
            var directoryFullPath = Path.Combine(snapshotDirectory, directoryScopeName, directoryName);

            // if no snapshot present, just return null value.
            if (!Directory.Exists(directoryFullPath))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.DirectoryNotExists, new { DirectoryPath = directoryFullPath });
                return(context, null);
            }

            // Serialize on disk.
            var jsonConverter = new JsonConverter <BatchInfo>();

            var summaryFileName = Path.Combine(directoryFullPath, "summary.json");

            BatchInfo batchInfo = null;

            // Create the schema changeset
            var changesSet = new SyncSet();

            // Create a Schema set without readonly columns, attached to memory changes
            foreach (var table in schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            using (var fs = new FileStream(summaryFileName, FileMode.Open, FileAccess.Read))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.LoadSnapshotSummary, new { FileName = summaryFileName });
                batchInfo = await jsonConverter.DeserializeAsync(fs).ConfigureAwait(false);

                this.Orchestrator.logger.LogDebug(SyncEventsId.LoadSnapshotSummary, batchInfo);
            }

            batchInfo.SanitizedSchema = changesSet;

            return(context, batchInfo);
        }
示例#8
0
        /// <summary>
        /// Apply changes internal method for one type of query: Insert, Update or Delete for every batch from a table
        /// </summary>
        private async Task <SyncContext> InternalApplyTableChangesAsync(IScopeInfo scopeInfo, SyncContext context, SyncTable schemaTable, MessageApplyChanges message,
                                                                        DbConnection connection, DbTransaction transaction, DataRowState applyType, DatabaseChangesApplied changesApplied,
                                                                        CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            if (this.Provider == null)
            {
                return(context);
            }

            context.SyncStage = SyncStage.ChangesApplying;

            var setupTable = scopeInfo.Setup.Tables[schemaTable.TableName, schemaTable.SchemaName];

            if (setupTable == null)
            {
                return(context);
            }

            // Only table schema is replicated, no datas are applied
            if (setupTable.SyncDirection == SyncDirection.None)
            {
                return(context);
            }

            // if we are in upload stage, so check if table is not download only
            if (context.SyncWay == SyncWay.Upload && setupTable.SyncDirection == SyncDirection.DownloadOnly)
            {
                return(context);
            }

            // if we are in download stage, so check if table is not download only
            if (context.SyncWay == SyncWay.Download && setupTable.SyncDirection == SyncDirection.UploadOnly)
            {
                return(context);
            }

            var hasChanges = message.BatchInfo.HasData(schemaTable.TableName, schemaTable.SchemaName);

            // Each table in the messages contains scope columns. Don't forget it
            if (!hasChanges)
            {
                return(context);
            }

            // what kind of command to execute
            var           init          = message.IsNew || context.SyncType != SyncType.Normal;
            DbCommandType dbCommandType = applyType == DataRowState.Deleted ? DbCommandType.DeleteRows : (init ? DbCommandType.InsertRows : DbCommandType.UpdateRows);

            // tmp sync table with only writable columns
            var changesSet         = schemaTable.Schema.Clone(false);
            var schemaChangesTable = DbSyncAdapter.CreateChangesTable(schemaTable, changesSet);

            // get executioning adapter
            var syncAdapter = this.GetSyncAdapter(schemaChangesTable, scopeInfo);

            syncAdapter.ApplyType = applyType;

            // Get command
            var(command, isBatch) = await syncAdapter.GetCommandAsync(dbCommandType, connection, transaction);

            if (command == null)
            {
                return(context);
            }

            var bpiTables = message.BatchInfo.GetBatchPartsInfo(schemaTable);

            // launch interceptor if any
            var args = new TableChangesApplyingArgs(context, message.BatchInfo, bpiTables, schemaTable, applyType, command, connection, transaction);

            await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false);

            if (args.Cancel || args.Command == null)
            {
                return(context);
            }

            command = args.Command;
            var cmdText = command.CommandText;

            TableChangesApplied tableChangesApplied = null;

            // Conflicts occured when trying to apply rows
            var conflictRows = new List <SyncRow>();

            var localSerializer = new LocalJsonSerializer();

            // If someone has an interceptor on deserializing, we read the row and intercept
            var interceptorsReading = this.interceptors.GetInterceptors <DeserializingRowArgs>();

            if (interceptorsReading.Count > 0)
            {
                localSerializer.OnReadingRow(async(schemaTable, rowString) =>
                {
                    var args = new DeserializingRowArgs(context, schemaTable, rowString);
                    await this.InterceptAsync(args, progress, cancellationToken).ConfigureAwait(false);
                    return(args.Result);
                });
            }

            // I've got all files for my table
            // applied rows for this bpi
            foreach (var batchPartInfo in bpiTables)
            {
                // Applied row for this particular BPI
                var appliedRowsTmp = 0;
                // Rows fetch (either of the good state or not) from the BPI
                var rowsFetched = 0;

                // Get full path of my batchpartinfo
                var fullPath = message.BatchInfo.GetBatchPartInfoPath(batchPartInfo).FullPath;

                // accumulating rows
                var batchRows = new List <SyncRow>();

                if (isBatch)
                {
                    foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaChangesTable))
                    {
                        rowsFetched++;

                        // Adding rows to the batch rows
                        if (batchRows.Count < this.Provider.BulkBatchMaxLinesCount)
                        {
                            if (syncRow.RowState == applyType)
                            {
                                batchRows.Add(syncRow);
                            }

                            if (rowsFetched < batchPartInfo.RowsCount && batchRows.Count < this.Provider.BulkBatchMaxLinesCount)
                            {
                                continue;
                            }
                        }
                        if (batchRows.Count <= 0)
                        {
                            continue;
                        }

                        var failedRows = schemaChangesTable.Schema.Clone().Tables[schemaChangesTable.TableName, schemaChangesTable.SchemaName];

                        command.CommandText = cmdText;
                        var batchArgs = new RowsChangesApplyingArgs(context, message.BatchInfo, batchRows, schemaChangesTable, applyType, command, connection, transaction);
                        await this.InterceptAsync(batchArgs, progress, cancellationToken).ConfigureAwait(false);

                        if (batchArgs.Cancel || batchArgs.Command == null || batchArgs.SyncRows == null || batchArgs.SyncRows.Count <= 0)
                        {
                            continue;
                        }

                        // get the correct pointer to the command from the interceptor in case user change the whole instance
                        command = batchArgs.Command;

                        await this.InterceptAsync(new DbCommandArgs(context, command, connection, transaction), progress, cancellationToken).ConfigureAwait(false);

                        // execute the batch, through the provider
                        await syncAdapter.ExecuteBatchCommandAsync(command, message.SenderScopeId, batchArgs.SyncRows, schemaChangesTable, failedRows, message.LastTimestamp, connection, transaction).ConfigureAwait(false);

                        foreach (var failedRow in failedRows.Rows)
                        {
                            conflictRows.Add(failedRow);
                        }

                        //rows minus failed rows
                        appliedRowsTmp += batchRows.Count - failedRows.Rows.Count;
                        batchRows.Clear();
                    }
                }
                else
                {
                    foreach (var syncRow in localSerializer.ReadRowsFromFile(fullPath, schemaChangesTable))
                    {
                        rowsFetched++;

                        if (syncRow.RowState != applyType)
                        {
                            continue;
                        }

                        command.CommandText = cmdText;
                        var batchArgs = new RowsChangesApplyingArgs(context, message.BatchInfo, new List <SyncRow> {
                            syncRow
                        }, schemaChangesTable, applyType, command, connection, transaction);
                        await this.InterceptAsync(batchArgs, progress, cancellationToken).ConfigureAwait(false);

                        if (batchArgs.Cancel || batchArgs.Command == null || batchArgs.SyncRows == null || batchArgs.SyncRows.Count() <= 0)
                        {
                            continue;
                        }

                        // get the correct pointer to the command from the interceptor in case user change the whole instance
                        command = batchArgs.Command;

                        // Set the parameters value from row
                        syncAdapter.SetColumnParametersValues(command, batchArgs.SyncRows.First());

                        // Set the special parameters for update
                        syncAdapter.AddScopeParametersValues(command, message.SenderScopeId, message.LastTimestamp, applyType == DataRowState.Deleted, false);

                        await this.InterceptAsync(new DbCommandArgs(context, command, connection, transaction), progress, cancellationToken).ConfigureAwait(false);

                        var rowAppliedCount = await command.ExecuteNonQueryAsync().ConfigureAwait(false);

                        // Check if we have a return value instead
                        var syncRowCountParam = DbSyncAdapter.GetParameter(command, "sync_row_count");

                        if (syncRowCountParam != null)
                        {
                            rowAppliedCount = (int)syncRowCountParam.Value;
                        }

                        if (rowAppliedCount > 0)
                        {
                            appliedRowsTmp++;
                        }
                        else
                        {
                            conflictRows.Add(syncRow);
                        }
                    }
                }


                // conflict rows applied
                int rowsAppliedCount = 0;
                // conflict resolved count
                int conflictsResolvedCount = 0;

                // If conflicts occured
                if (conflictRows.Count > 0)
                {
                    foreach (var conflictRow in conflictRows)
                    {
                        int     conflictResolvedCount;
                        SyncRow resolvedRow;
                        int     rowAppliedCount;
                        (context, conflictResolvedCount, resolvedRow, rowAppliedCount) =
                            await this.HandleConflictAsync(scopeInfo, context, message.LocalScopeId, message.SenderScopeId, syncAdapter, conflictRow, schemaChangesTable,
                                                           message.Policy, message.LastTimestamp, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                        conflictsResolvedCount += conflictResolvedCount;
                        rowsAppliedCount       += rowAppliedCount;
                    }

                    // add rows with resolved conflicts
                    appliedRowsTmp += rowsAppliedCount;
                }

                // Any failure ?
                var changedFailed = rowsFetched - conflictsResolvedCount - appliedRowsTmp;

                // Only Upsert DatabaseChangesApplied if we make an upsert/ delete from the batch or resolved any conflict
                if (appliedRowsTmp > 0 || conflictsResolvedCount > 0)
                {
                    // We may have multiple batch files, so we can have multipe sync tables with the same name
                    // We can say that a syncTable may be contained in several files
                    // That's why we should get an applied changes instance if already exists from a previous batch file
                    tableChangesApplied = changesApplied.TableChangesApplied.FirstOrDefault(tca =>
                    {
                        var sc = SyncGlobalization.DataSourceStringComparison;

                        var sn      = tca.SchemaName == null ? string.Empty : tca.SchemaName;
                        var otherSn = schemaTable.SchemaName == null ? string.Empty : schemaTable.SchemaName;

                        return(tca.TableName.Equals(schemaTable.TableName, sc) &&
                               sn.Equals(otherSn, sc) &&
                               tca.State == applyType);
                    });

                    if (tableChangesApplied == null)
                    {
                        tableChangesApplied = new TableChangesApplied
                        {
                            TableName         = schemaTable.TableName,
                            SchemaName        = schemaTable.SchemaName,
                            Applied           = appliedRowsTmp,
                            ResolvedConflicts = conflictsResolvedCount,
                            Failed            = changedFailed,
                            State             = applyType,
                            TotalRowsCount    = message.BatchInfo.RowsCount,
                            TotalAppliedCount = changesApplied.TotalAppliedChanges + appliedRowsTmp
                        };
                        changesApplied.TableChangesApplied.Add(tableChangesApplied);
                    }
                    else
                    {
                        tableChangesApplied.Applied           += appliedRowsTmp;
                        tableChangesApplied.TotalAppliedCount  = changesApplied.TotalAppliedChanges;
                        tableChangesApplied.ResolvedConflicts += conflictsResolvedCount;
                        tableChangesApplied.Failed            += changedFailed;
                    }

                    // we've got 0.25% to fill here
                    var progresspct = appliedRowsTmp * 0.25d / tableChangesApplied.TotalRowsCount;
                    context.ProgressPercentage += progresspct;
                }
            }

            schemaChangesTable.Dispose();
            schemaChangesTable = null;
            changesSet.Dispose();
            changesSet = null;

            // Report the overall changes applied for the current table
            if (tableChangesApplied != null)
            {
                var tableChangesAppliedArgs = new TableChangesAppliedArgs(context, tableChangesApplied, connection, transaction);
                // We don't report progress if we do not have applied any changes on the table, to limit verbosity of Progress
                await this.InterceptAsync(tableChangesAppliedArgs, progress, cancellationToken).ConfigureAwait(false);
            }


            if (command != null)
            {
                command.Dispose();
            }

            return(context);
        }
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> GetChangeBatchAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            // batch info containing changes
            BatchInfo batchInfo;

            // Statistics about changes that are selected
            DatabaseChangesSelected changesSelected;

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                (batchInfo, changesSelected) = this.GetEmptyChanges(message);
                return(context, batchInfo, changesSelected);
            }

            // Check if the provider is not outdated
            var isOutdated = this.IsRemoteOutdated();

            // Get a chance to make the sync even if it's outdated
            if (isOutdated)
            {
                var outdatedArgs = new OutdatedArgs(context, null, null);

                // Interceptor
                await this.InterceptAsync(outdatedArgs).ConfigureAwait(false);

                if (outdatedArgs.Action != OutdatedAction.Rollback)
                {
                    context.SyncType = outdatedArgs.Action == OutdatedAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload;
                }

                if (outdatedArgs.Action == OutdatedAction.Rollback)
                {
                    throw new OutOfDateException();
                }
            }

            // create local directory
            if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory))
            {
                Directory.CreateDirectory(message.BatchDirectory);
            }

            // numbers of batch files generated
            var batchIndex = 0;

            // Check if we are in batch mode
            var isBatch = message.BatchSize > 0;

            // Create stats object to store changes count
            var changes = new DatabaseChangesSelected();

            // create the in memory changes set
            var changesSet = new SyncSet(message.Schema.ScopeName);

            // Create a Schema set without readonly columns, attached to memory changes
            foreach (var table in message.Schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(message.Schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch)
            // batchinfo generate a schema clone with scope columns if needed
            batchInfo = new BatchInfo(!isBatch, changesSet, message.BatchDirectory);

            // Clear tables, we will add only the ones we need in the batch info
            changesSet.Clear();

            foreach (var syncTable in message.Schema.Tables)
            {
                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                var tableBuilder = this.GetTableBuilder(syncTable);
                var syncAdapter  = tableBuilder.CreateSyncAdapter(connection, transaction);

                // raise before event
                context.SyncStage = SyncStage.TableChangesSelecting;
                var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, syncTable.TableName, connection, transaction);
                // launch interceptor if any
                await this.InterceptAsync(tableChangesSelectingArgs).ConfigureAwait(false);

                // Get Command
                var selectIncrementalChangesCommand = this.GetSelectChangesCommand(context, syncAdapter, syncTable, message.IsNew);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand);

                // Statistics
                var tableChangesSelected = new TableChangesSelected(syncTable.TableName);

                // Get the reader
                using (var dataReader = selectIncrementalChangesCommand.ExecuteReader())
                {
                    // memory size total
                    double rowsMemorySize = 0L;

                    // Create a chnages table with scope columns
                    var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        // Set the correct state to be applied
                        if (row.RowState == DataRowState.Deleted)
                        {
                            tableChangesSelected.Deletes++;
                        }
                        else if (row.RowState == DataRowState.Modified)
                        {
                            tableChangesSelected.Upserts++;
                        }

                        // calculate row size if in batch mode
                        if (isBatch)
                        {
                            var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                            var finalFieldSize = fieldsSize / 1024d;

                            if (finalFieldSize > message.BatchSize)
                            {
                                throw new RowOverSizedException(finalFieldSize.ToString());
                            }

                            // Calculate the new memory size
                            rowsMemorySize += finalFieldSize;

                            // Next line if we don't reach the batch size yet.
                            if (rowsMemorySize <= message.BatchSize)
                            {
                                continue;
                            }

                            // add changes to batchinfo
                            batchInfo.AddChanges(changesSet, batchIndex, false);

                            // increment batch index
                            batchIndex++;

                            // we know the datas are serialized here, so we can flush  the set
                            changesSet.Clear();

                            // Recreate an empty ContainerSet and a ContainerTable
                            changesSet = new SyncSet(message.Schema.ScopeName);

                            changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                            // Init the row memory size
                            rowsMemorySize = 0L;
                        }
                    }
                }

                selectIncrementalChangesCommand.Dispose();

                context.SyncStage = SyncStage.TableChangesSelected;

                if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                {
                    changes.TableChangesSelected.Add(tableChangesSelected);
                }

                // Event progress & interceptor
                context.SyncStage = SyncStage.TableChangesSelected;
                var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, tableChangesSelected, connection, transaction);
                this.ReportProgress(context, progress, tableChangesSelectedArgs);
                await this.InterceptAsync(tableChangesSelectedArgs).ConfigureAwait(false);
            }

            // We are in batch mode, and we are at the last batchpart info
            // Even if we don't have rows inside, we return the changesSet, since it contains at leaset schema
            if (changesSet != null && changesSet.HasTables)
            {
                batchInfo.AddChanges(changesSet, batchIndex, true);
            }

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            return(context, batchInfo, changes);
        }