/// <summary>
        /// update configuration object with tables desc from server database
        /// </summary>
        public virtual async Task <(SyncContext, BatchInfo)> CreateSnapshotAsync(SyncContext context, SyncSet schema, SyncSetup setup,
                                                                                 DbConnection connection, DbTransaction transaction, string snapshotDirectory, int batchSize, long remoteClientTimestamp,
                                                                                 CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            // create local directory
            if (!Directory.Exists(snapshotDirectory))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.CreateDirectory, new { SnapshotDirectory = snapshotDirectory });
                Directory.CreateDirectory(snapshotDirectory);
            }

            // cleansing scope name
            var directoryScopeName = new string(context.ScopeName.Where(char.IsLetterOrDigit).ToArray());

            var directoryFullPath = Path.Combine(snapshotDirectory, directoryScopeName);

            // create local directory with scope inside
            if (!Directory.Exists(directoryFullPath))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.CreateDirectory, new { DirectoryFullPath = directoryFullPath });
                Directory.CreateDirectory(directoryFullPath);
            }

            // numbers of batch files generated
            var batchIndex = 0;

            // create the in memory changes set
            var changesSet = new SyncSet();

            var sb         = new StringBuilder();
            var underscore = "";

            if (context.Parameters != null)
            {
                foreach (var p in context.Parameters.OrderBy(p => p.Name))
                {
                    var cleanValue = new string(p.Value.ToString().Where(char.IsLetterOrDigit).ToArray());
                    var cleanName  = new string(p.Name.Where(char.IsLetterOrDigit).ToArray());

                    sb.Append($"{underscore}{cleanName}_{cleanValue}");
                    underscore = "_";
                }
            }

            var directoryName = sb.ToString();

            directoryName = string.IsNullOrEmpty(directoryName) ? "ALL" : directoryName;

            // batchinfo generate a schema clone with scope columns if needed
            var batchInfo = new BatchInfo(false, schema, directoryFullPath, directoryName);

            // Delete directory if already exists
            directoryFullPath = Path.Combine(directoryFullPath, directoryName);

            if (Directory.Exists(directoryFullPath))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.DropDirectory, new { DirectoryFullPath = directoryFullPath });
                Directory.Delete(directoryFullPath, true);
            }

            foreach (var syncTable in schema.Tables)
            {
                var tableBuilder = this.GetTableBuilder(syncTable, setup);
                var syncAdapter  = tableBuilder.CreateSyncAdapter();

                // launch interceptor if any
                await this.Orchestrator.InterceptAsync(new TableChangesSelectingArgs(context, syncTable, connection, transaction), cancellationToken).ConfigureAwait(false);

                // Get Select initialize changes command
                var selectIncrementalChangesCommand = await this.GetSelectChangesCommandAsync(context, syncAdapter, syncTable, true, connection, transaction);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, null, true, 0, selectIncrementalChangesCommand);

                // log
                this.Orchestrator.logger.LogDebug(SyncEventsId.CreateSnapshot, new
                {
                    SelectChangesCommandText = selectIncrementalChangesCommand.CommandText,
                    ExcludingScopeId         = Guid.Empty,
                    IsNew         = true,
                    LastTimestamp = 0
                });

                // Get the reader
                using (var dataReader = await selectIncrementalChangesCommand.ExecuteReaderAsync().ConfigureAwait(false))
                {
                    // memory size total
                    double rowsMemorySize = 0L;

                    // Create a chnages table with scope columns
                    var changesSetTable = SyncAdapter.CreateChangesTable(schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        // Log trace row
                        this.Orchestrator.logger.LogTrace(SyncEventsId.CreateSnapshot, row);

                        var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                        var finalFieldSize = fieldsSize / 1024d;

                        if (finalFieldSize > batchSize)
                        {
                            throw new RowOverSizedException(finalFieldSize.ToString());
                        }

                        // Calculate the new memory size
                        rowsMemorySize += finalFieldSize;

                        // Next line if we don't reach the batch size yet.
                        if (rowsMemorySize <= batchSize)
                        {
                            continue;
                        }

                        // add changes to batchinfo
                        await batchInfo.AddChangesAsync(changesSet, batchIndex, false, this.Orchestrator).ConfigureAwait(false);

                        this.Orchestrator.logger.LogDebug(SyncEventsId.CreateBatch, changesSet);

                        // increment batch index
                        batchIndex++;

                        // we know the datas are serialized here, so we can flush  the set
                        changesSet.Clear();

                        // Recreate an empty ContainerSet and a ContainerTable
                        changesSet = new SyncSet();

                        changesSetTable = SyncAdapter.CreateChangesTable(schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                        // Init the row memory size
                        rowsMemorySize = 0L;
                    }
                }

                //selectIncrementalChangesCommand.Dispose();
            }


            if (changesSet != null && changesSet.HasTables)
            {
                await batchInfo.AddChangesAsync(changesSet, batchIndex, true, this.Orchestrator).ConfigureAwait(false);

                this.Orchestrator.logger.LogDebug(SyncEventsId.CreateBatch, changesSet);
            }

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            batchInfo.Timestamp = remoteClientTimestamp;

            // Serialize on disk.
            var jsonConverter = new JsonConverter <BatchInfo>();

            var summaryFileName = Path.Combine(directoryFullPath, "summary.json");

            using (var f = new FileStream(summaryFileName, FileMode.CreateNew, FileAccess.ReadWrite))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.CreateSnapshotSummary, batchInfo);
                var bytes = await jsonConverter.SerializeAsync(batchInfo).ConfigureAwait(false);

                f.Write(bytes, 0, bytes.Length);
            }

            return(context, batchInfo);
        }
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual async Task <(SyncContext, BatchInfo)> GetSnapshotAsync(
            SyncContext context, SyncSet schema, string snapshotDirectory,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            // TODO : Get a snapshot based on scope name

            var sb         = new StringBuilder();
            var underscore = "";

            if (context.Parameters != null)
            {
                foreach (var p in context.Parameters.OrderBy(p => p.Name))
                {
                    var cleanValue = new string(p.Value.ToString().Where(char.IsLetterOrDigit).ToArray());
                    var cleanName  = new string(p.Name.Where(char.IsLetterOrDigit).ToArray());

                    sb.Append($"{underscore}{cleanName}_{cleanValue}");
                    underscore = "_";
                }
            }

            var directoryName = sb.ToString();

            directoryName = string.IsNullOrEmpty(directoryName) ? "ALL" : directoryName;

            this.Orchestrator.logger.LogDebug(SyncEventsId.GetSnapshot, new { DirectoryName = directoryName });

            // cleansing scope name
            var directoryScopeName = new string(context.ScopeName.Where(char.IsLetterOrDigit).ToArray());

            // Get full path
            var directoryFullPath = Path.Combine(snapshotDirectory, directoryScopeName, directoryName);

            // if no snapshot present, just return null value.
            if (!Directory.Exists(directoryFullPath))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.DirectoryNotExists, new { DirectoryPath = directoryFullPath });
                return(context, null);
            }

            // Serialize on disk.
            var jsonConverter = new JsonConverter <BatchInfo>();

            var summaryFileName = Path.Combine(directoryFullPath, "summary.json");

            BatchInfo batchInfo = null;

            // Create the schema changeset
            var changesSet = new SyncSet();

            // Create a Schema set without readonly columns, attached to memory changes
            foreach (var table in schema.Tables)
            {
                SyncAdapter.CreateChangesTable(schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            using (var fs = new FileStream(summaryFileName, FileMode.Open, FileAccess.Read))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.LoadSnapshotSummary, new { FileName = summaryFileName });
                batchInfo = await jsonConverter.DeserializeAsync(fs).ConfigureAwait(false);

                this.Orchestrator.logger.LogDebug(SyncEventsId.LoadSnapshotSummary, batchInfo);
            }

            batchInfo.SanitizedSchema = changesSet;

            return(context, batchInfo);
        }
Esempio n. 3
0
        /// <summary>
        /// Handle a conflict
        /// The int returned is the conflict count I need
        /// </summary>
        /// changeApplicationAction, conflictCount, resolvedRow, conflictApplyInt
        internal async Task <(int conflictResolvedCount, SyncRow resolvedRow, int rowAppliedCount)> HandleConflictAsync(
            Guid localScopeId, Guid senderScopeId,
            SyncAdapter syncAdapter, SyncContext context, SyncConflict conflict,
            ConflictResolutionPolicy policy, long lastTimestamp,
            DbConnection connection, DbTransaction transaction)
        {
            SyncRow     finalRow;
            ApplyAction conflictApplyAction;
            int         rowAppliedCount       = 0;
            Guid?       nullableSenderScopeId = senderScopeId;


            (conflictApplyAction, finalRow, nullableSenderScopeId) = await this.GetConflictActionAsync(context, conflict, policy, senderScopeId, connection, transaction).ConfigureAwait(false);

            // Conflict rollbacked by user
            if (conflictApplyAction == ApplyAction.Rollback)
            {
                throw new RollbackException("Rollback action taken on conflict");
            }

            // Local provider wins, update metadata
            if (conflictApplyAction == ApplyAction.Continue)
            {
                var isMergeAction = finalRow != null;
                var row           = isMergeAction ? finalRow : conflict.LocalRow;

                // Conflict on a line that is not present on the datasource
                if (row == null)
                {
                    return(conflictResolvedCount : 1, finalRow, rowAppliedCount : 0);
                }

                // if we have a merge action, we apply the row on the server
                if (isMergeAction)
                {
                    // if merge, we update locally the row and let the update_scope_id set to null
                    var isUpdated = await syncAdapter.ApplyUpdateAsync(row, lastTimestamp, null, true, connection, transaction);

                    // We don't update metadatas so the row is updated (on server side)
                    // and is mark as updated locally.
                    // and will be returned back to sender, since it's a merge, and we need it on the client

                    if (!isUpdated)
                    {
                        throw new Exception("Can't update the merge row.");
                    }
                }

                finalRow = isMergeAction ? row : conflict.LocalRow;

                // We don't do anything, since we let the original row. so we resolved one conflict but applied no rows
                return(conflictResolvedCount : 1, finalRow, rowAppliedCount : 0);
            }

            // We gonna apply with force the line
            if (conflictApplyAction == ApplyAction.RetryWithForceWrite)
            {
                // TODO : Should Raise an error ?
                if (conflict.RemoteRow == null)
                {
                    return(0, finalRow, 0);
                }

                bool operationComplete = false;

                switch (conflict.Type)
                {
                // Remote source has row, Local don't have the row, so insert it
                case ConflictType.RemoteExistsLocalExists:
                    operationComplete = await syncAdapter.ApplyUpdateAsync(conflict.RemoteRow, lastTimestamp, nullableSenderScopeId, true, connection, transaction);

                    rowAppliedCount = 1;
                    break;

                case ConflictType.RemoteExistsLocalNotExists:
                case ConflictType.RemoteExistsLocalIsDeleted:
                case ConflictType.UniqueKeyConstraint:
                    operationComplete = await syncAdapter.ApplyUpdateAsync(conflict.RemoteRow, lastTimestamp, nullableSenderScopeId, true, connection, transaction);

                    rowAppliedCount = 1;
                    break;

                // Conflict, but both have delete the row, so just update the metadata to the right winner
                case ConflictType.RemoteIsDeletedLocalIsDeleted:
                    operationComplete = await syncAdapter.UpdateMetadatasAsync(conflict.RemoteRow, nullableSenderScopeId, true, connection, transaction);

                    rowAppliedCount = 0;
                    break;

                // The row does not exists locally, and since it's coming from a deleted state, we can forget it
                case ConflictType.RemoteIsDeletedLocalNotExists:
                    operationComplete = true;
                    rowAppliedCount   = 0;
                    break;

                // The remote has delete the row, and local has insert or update it
                // So delete the local row
                case ConflictType.RemoteIsDeletedLocalExists:
                    operationComplete = await syncAdapter.ApplyDeleteAsync(conflict.RemoteRow, lastTimestamp, nullableSenderScopeId, true, connection, transaction);

                    rowAppliedCount = 1;
                    break;

                case ConflictType.ErrorsOccurred:
                    return(0, finalRow, 0);
                }

                finalRow = conflict.RemoteRow;

                //After a force update, there is a problem, so raise exception
                if (!operationComplete)
                {
                    throw new UnknownException("Force update should always work.. contact the author :)");
                    //finalRow = null;
                    //return (0, finalRow, rowAppliedCount);
                }

                return(1, finalRow, rowAppliedCount);
            }

            return(0, finalRow, 0);
        }
Esempio n. 4
0
        private async Task <(int rowsAppliedCount, int conflictsResolvedCount, int syncErrorsCount)> ResolveConflictsAsync(SyncContext context, Guid localScopeId, Guid senderScopeId, SyncAdapter syncAdapter, List <SyncConflict> conflicts, MessageApplyChanges message, DbConnection connection, DbTransaction transaction)
        {
            int rowsAppliedCount       = 0;
            int conflictsResolvedCount = 0;
            int syncErrorsCount        = 0;

            // If conflicts occured
            // Eventuall, conflicts are resolved on server side.
            if (conflicts == null || conflicts.Count <= 0)
            {
                return(rowsAppliedCount, conflictsResolvedCount, syncErrorsCount);
            }


            foreach (var conflict in conflicts)
            {
                var fromScopeLocalTimeStamp = message.LastTimestamp;

                this.Orchestrator.logger.LogDebug(SyncEventsId.ResolveConflicts, conflict);
                this.Orchestrator.logger.LogDebug(SyncEventsId.ResolveConflicts, new
                {
                    LocalScopeId            = localScopeId,
                    SenderScopeId           = senderScopeId,
                    FromScopeLocalTimeStamp = fromScopeLocalTimeStamp,
                    message.Policy
                });;


                var(conflictResolvedCount, resolvedRow, rowAppliedCount) =
                    await this.HandleConflictAsync(localScopeId, senderScopeId, syncAdapter, context, conflict,
                                                   message.Policy,
                                                   fromScopeLocalTimeStamp, connection, transaction).ConfigureAwait(false);

                conflictsResolvedCount += conflictResolvedCount;
                rowsAppliedCount       += rowAppliedCount;
            }

            return(rowsAppliedCount, conflictsResolvedCount, syncErrorsCount);
        }
Esempio n. 5
0
        /// <summary>
        /// Get the correct Select changes command
        /// Can be either
        /// - SelectInitializedChanges              : All changes for first sync
        /// - SelectChanges                         : All changes filtered by timestamp
        /// - SelectInitializedChangesWithFilters   : All changes for first sync with filters
        /// - SelectChangesWithFilters              : All changes filtered by timestamp with filters
        /// </summary>
        private async Task <DbCommand> GetSelectChangesCommandAsync(SyncContext context, SyncAdapter syncAdapter, SyncTable syncTable, bool isNew, DbConnection connection, DbTransaction transaction)
        {
            DbCommand     command;
            DbCommandType dbCommandType;

            SyncFilter tableFilter = null;

            // Check if we have parameters specified

            // Sqlite does not have any filter, since he can't be server side
            if (this.CanBeServerProvider)
            {
                tableFilter = syncTable.GetFilter();
            }

            var hasFilters = tableFilter != null;

            // Determing the correct DbCommandType
            if (isNew && hasFilters)
            {
                dbCommandType = DbCommandType.SelectInitializedChangesWithFilters;
            }
            else if (isNew && !hasFilters)
            {
                dbCommandType = DbCommandType.SelectInitializedChanges;
            }
            else if (!isNew && hasFilters)
            {
                dbCommandType = DbCommandType.SelectChangesWithFilters;
            }
            else
            {
                dbCommandType = DbCommandType.SelectChanges;
            }

            // Get correct Select incremental changes command
            command = await syncAdapter.PrepareCommandAsync(dbCommandType, connection, transaction, tableFilter);

            return(command);
        }
Esempio n. 6
0
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> GetChangeBatchAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            // batch info containing changes
            BatchInfo batchInfo;

            this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, message);

            // Statistics about changes that are selected
            DatabaseChangesSelected changesSelected;

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                (batchInfo, changesSelected) = await this.GetEmptyChangesAsync(message).ConfigureAwait(false);

                return(context, batchInfo, changesSelected);
            }

            // create local directory
            if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory))
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.CreateDirectory, new { DirectoryPath = message.BatchDirectory });
                Directory.CreateDirectory(message.BatchDirectory);
            }

            // numbers of batch files generated
            var batchIndex = 0;

            // Check if we are in batch mode
            var isBatch = message.BatchSize > 0;

            // Create stats object to store changes count
            var changes = new DatabaseChangesSelected();

            // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch)
            // batchinfo generate a schema clone with scope columns if needed
            batchInfo = new BatchInfo(!isBatch, message.Schema, message.BatchDirectory);

            // Clean SyncSet, we will add only tables we need in the batch info
            var changesSet = new SyncSet();

            foreach (var syncTable in message.Schema.Tables)
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, syncTable);

                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                var tableBuilder = this.GetTableBuilder(syncTable, message.Setup);
                var syncAdapter  = tableBuilder.CreateSyncAdapter();

                // launch interceptor if any
                await this.Orchestrator.InterceptAsync(new TableChangesSelectingArgs(context, syncTable, connection, transaction), cancellationToken).ConfigureAwait(false);

                // Get Command
                var selectIncrementalChangesCommand = await this.GetSelectChangesCommandAsync(context, syncAdapter, syncTable, message.IsNew, connection, transaction);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand);

                // log
                this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, selectIncrementalChangesCommand);

                // Statistics
                var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName);

                // Create a chnages table with scope columns
                var changesSetTable = SyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                // Get the reader
                using (var dataReader = await selectIncrementalChangesCommand.ExecuteReaderAsync().ConfigureAwait(false))
                {
                    // memory size total
                    double rowsMemorySize = 0L;

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        // Log trace row
                        this.Orchestrator.logger.LogTrace(SyncEventsId.GetChanges, row);

                        // Set the correct state to be applied
                        if (row.RowState == DataRowState.Deleted)
                        {
                            tableChangesSelected.Deletes++;
                        }
                        else if (row.RowState == DataRowState.Modified)
                        {
                            tableChangesSelected.Upserts++;
                        }

                        // calculate row size if in batch mode
                        if (isBatch)
                        {
                            var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                            var finalFieldSize = fieldsSize / 1024d;

                            if (finalFieldSize > message.BatchSize)
                            {
                                throw new RowOverSizedException(finalFieldSize.ToString());
                            }

                            // Calculate the new memory size
                            rowsMemorySize += finalFieldSize;

                            // Next line if we don't reach the batch size yet.
                            if (rowsMemorySize <= message.BatchSize)
                            {
                                continue;
                            }

                            // Check interceptor
                            var batchTableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction);
                            await this.Orchestrator.InterceptAsync(batchTableChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

                            // add changes to batchinfo
                            await batchInfo.AddChangesAsync(changesSet, batchIndex, false, this.Orchestrator).ConfigureAwait(false);

                            this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, changesSet);

                            // increment batch index
                            batchIndex++;

                            // we know the datas are serialized here, so we can flush  the set
                            changesSet.Clear();

                            // Recreate an empty ContainerSet and a ContainerTable
                            changesSet = new SyncSet();

                            changesSetTable = SyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                            // Init the row memory size
                            rowsMemorySize = 0L;
                        }
                    }
                }

                // be sure it's disposed
                // selectIncrementalChangesCommand.Dispose();

                // We don't report progress if no table changes is empty, to limit verbosity
                if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                {
                    changes.TableChangesSelected.Add(tableChangesSelected);
                    var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction);

                    await this.Orchestrator.InterceptAsync(tableChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

                    if (tableChangesSelectedArgs.TableChangesSelected.TotalChanges > 0)
                    {
                        this.Orchestrator.ReportProgress(context, progress, tableChangesSelectedArgs);
                    }
                }
            }

            // We are in batch mode, and we are at the last batchpart info
            // Even if we don't have rows inside, we return the changesSet, since it contains at least schema
            if (changesSet != null && changesSet.HasTables && changesSet.HasRows)
            {
                await batchInfo.AddChangesAsync(changesSet, batchIndex, true, this.Orchestrator).ConfigureAwait(false);

                this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, changesSet);
            }

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            return(context, batchInfo, changes);
        }