Exemple #1
0
        /// <summary>
        /// Generate an empty BatchInfo
        /// </summary>
        internal Task <(BatchInfo, DatabaseChangesSelected)> GetEmptyChangesAsync(MessageGetChangesBatch message)
        {
            // Get config
            var isBatched = message.BatchSize > 0;

            // Create the batch info, in memory
            var batchInfo = new BatchInfo(!isBatched, message.Schema, message.BatchDirectory);;

            this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, new DatabaseChangesSelected());

            // Create a new empty in-memory batch info
            return(Task.FromResult((batchInfo, new DatabaseChangesSelected())));
        }
        /// <summary>
        /// Generate an empty BatchInfo
        /// </summary>
        internal (BatchInfo, DatabaseChangesSelected) GetEmptyChanges(MessageGetChangesBatch message)
        {
            // Get config
            var isBatched = message.BatchSize > 0;

            // create the in memory changes set
            var changesSet = new SyncSet(message.Schema.ScopeName);

            // Create a Schema set without readonly tables, attached to memory changes
            foreach (var table in message.Schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(message.Schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            // Create the batch info, in memory
            var batchInfo = new BatchInfo(!isBatched, changesSet, message.BatchDirectory);;

            // add changes to batchInfo
            batchInfo.AddChanges(new SyncSet());

            // Create a new empty in-memory batch info
            return(batchInfo, new DatabaseChangesSelected());
        }
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual async Task <(SyncContext, BatchInfo, ChangesSelected)> GetChangeBatchAsync(
            SyncContext context, MessageGetChangesBatch message)
        {
            try
            {
                if (message.ScopeInfo == null)
                {
                    throw new ArgumentNullException("scopeInfo", "Client scope info is null");
                }

                // Check if the provider is not outdated
                var isOutdated = this.IsRemoteOutdated();

                // Get a chance to make the sync even if it's outdated
                if (isOutdated)
                {
                    var outdatedEventArgs = new OutdatedEventArgs();
                    this.SyncOutdated?.Invoke(this, outdatedEventArgs);

                    if (outdatedEventArgs.Action != OutdatedSyncAction.Rollback)
                    {
                        context.SyncType = outdatedEventArgs.Action == OutdatedSyncAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload;
                    }

                    if (outdatedEventArgs.Action == OutdatedSyncAction.Rollback)
                    {
                        throw new OutOfDateException("The provider is out of date ! Try to make a Reinitialize sync");
                    }
                }

                // create local directory
                if (message.DownloadBatchSizeInKB > 0 && !String.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory))
                {
                    Directory.CreateDirectory(message.BatchDirectory);
                }

                // batch info containing changes
                BatchInfo batchInfo;

                // Statistics about changes that are selected
                ChangesSelected changesSelected;

                // if we try a Reinitialize action, don't get any changes from client
                // else get changes from batch or in memory methods
                if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
                {
                    (batchInfo, changesSelected) = this.GetEmptyChanges(context, message.ScopeInfo, message.DownloadBatchSizeInKB, message.BatchDirectory);
                }
                else if (message.DownloadBatchSizeInKB == 0)
                {
                    (batchInfo, changesSelected) = await this.EnumerateChangesInternal(context, message.ScopeInfo, message.Schema, message.BatchDirectory, message.Policy, message.Filters);
                }
                else
                {
                    (batchInfo, changesSelected) = await this.EnumerateChangesInBatchesInternal(context, message.ScopeInfo, message.DownloadBatchSizeInKB, message.Schema, message.BatchDirectory, message.Policy, message.Filters);
                }

                return(context, batchInfo, changesSelected);
            }
            catch (Exception ex)
            {
                throw new SyncException(ex, SyncStage.TableChangesSelecting, this.ProviderTypeName);
            }
        }
        ApplyThenGetChangesAsync(ScopeInfo clientScope, BatchInfo clientBatchInfo,
                                 CancellationToken cancellationToken = default, IProgress <ProgressArgs> progress = null)
        {
            if (!this.StartTime.HasValue)
            {
                this.StartTime = DateTime.UtcNow;
            }

            // Get context or create a new one
            var ctx = this.GetContext();

            long remoteClientTimestamp = 0L;
            DatabaseChangesSelected serverChangesSelected = null;
            DatabaseChangesApplied  clientChangesApplied  = null;
            BatchInfo serverBatchInfo = null;
            SyncSet   schema          = null;

            using var connection = this.Provider.CreateConnection();

            try
            {
                ctx.SyncStage = SyncStage.ChangesApplying;

                //Direction set to Upload
                ctx.SyncWay = SyncWay.Upload;

                // Open connection
                await this.OpenConnectionAsync(connection, cancellationToken).ConfigureAwait(false);

                DbTransaction transaction;

                // Create two transactions
                // First one to commit changes
                // Second one to get changes now that everything is commited
                using (transaction = connection.BeginTransaction())
                {
                    await this.InterceptAsync(new TransactionOpenedArgs(ctx, connection, transaction), cancellationToken).ConfigureAwait(false);

                    // Maybe here, get the schema from server, issue from client scope name
                    // Maybe then compare the schema version from client scope with schema version issued from server
                    // Maybe if different, raise an error ?
                    // Get scope if exists

                    // Getting server scope assumes we have already created the schema on server

                    var scopeBuilder = this.GetScopeBuilder(this.Options.ScopeInfoTableName);

                    var serverScopeInfo = await this.InternalGetScopeAsync <ServerScopeInfo>(ctx, DbScopeType.Server, clientScope.Name, scopeBuilder, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                    // Should we ?
                    if (serverScopeInfo.Schema == null)
                    {
                        throw new MissingRemoteOrchestratorSchemaException();
                    }

                    // Deserialiaze schema
                    schema = serverScopeInfo.Schema;

                    // Create message containing everything we need to apply on server side
                    var applyChanges = new MessageApplyChanges(Guid.Empty, clientScope.Id, false, clientScope.LastServerSyncTimestamp, schema, this.Setup, this.Options.ConflictResolutionPolicy,
                                                               this.Options.DisableConstraintsOnApplyChanges, this.Options.UseBulkOperations, this.Options.CleanMetadatas, this.Options.CleanFolder, false, clientBatchInfo, this.Options.SerializerFactory);

                    // Call provider to apply changes
                    (ctx, clientChangesApplied) = await this.InternalApplyChangesAsync(ctx, applyChanges, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                    await this.InterceptAsync(new TransactionCommitArgs(ctx, connection, transaction), cancellationToken).ConfigureAwait(false);

                    // commit first transaction
                    transaction.Commit();
                }

                ctx.SyncStage          = SyncStage.ChangesSelecting;
                ctx.ProgressPercentage = 0.55;


                using (transaction = connection.BeginTransaction())
                {
                    await this.InterceptAsync(new TransactionOpenedArgs(ctx, connection, transaction), cancellationToken).ConfigureAwait(false);

                    //Direction set to Download
                    ctx.SyncWay = SyncWay.Download;

                    // JUST Before get changes, get the timestamp, to be sure to
                    // get rows inserted / updated elsewhere since the sync is not over
                    remoteClientTimestamp = await this.InternalGetLocalTimestampAsync(ctx, connection, transaction, cancellationToken, progress);

                    // Get if we need to get all rows from the datasource
                    var fromScratch = clientScope.IsNewScope || ctx.SyncType == SyncType.Reinitialize || ctx.SyncType == SyncType.ReinitializeWithUpload;

                    var message = new MessageGetChangesBatch(clientScope.Id, Guid.Empty, fromScratch, clientScope.LastServerSyncTimestamp, schema, this.Setup, this.Options.BatchSize, this.Options.BatchDirectory, this.Options.SerializerFactory);

                    // Call interceptor
                    await this.InterceptAsync(new DatabaseChangesSelectingArgs(ctx, message, connection, transaction), cancellationToken).ConfigureAwait(false);

                    // When we get the chnages from server, we create the batches if it's requested by the client
                    // the batch decision comes from batchsize from client
                    (ctx, serverBatchInfo, serverChangesSelected) =
                        await this.InternalGetChangesAsync(ctx, message, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                    if (cancellationToken.IsCancellationRequested)
                    {
                        cancellationToken.ThrowIfCancellationRequested();
                    }

                    // generate the new scope item
                    this.CompleteTime = DateTime.UtcNow;

                    var scopeHistory = new ServerHistoryScopeInfo
                    {
                        Id   = clientScope.Id,
                        Name = clientScope.Name,
                        LastSyncTimestamp = remoteClientTimestamp,
                        LastSync          = this.CompleteTime,
                        LastSyncDuration  = this.CompleteTime.Value.Subtract(this.StartTime.Value).Ticks,
                    };

                    // Write scopes locally
                    var scopeBuilder = this.GetScopeBuilder(this.Options.ScopeInfoTableName);

                    await this.InternalSaveScopeAsync(ctx, DbScopeType.ServerHistory, scopeHistory, scopeBuilder, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                    // Commit second transaction for getting changes
                    await this.InterceptAsync(new TransactionCommitArgs(ctx, connection, transaction), cancellationToken).ConfigureAwait(false);

                    transaction.Commit();
                }

                // Event progress & interceptor
                await this.CloseConnectionAsync(connection, cancellationToken).ConfigureAwait(false);
            }
            catch (Exception ex)
            {
                RaiseError(ex);
            }
            finally
            {
                await this.CloseConnectionAsync(connection, cancellationToken).ConfigureAwait(false);
            }
            return(remoteClientTimestamp, serverBatchInfo, this.Options.ConflictResolutionPolicy, clientChangesApplied, serverChangesSelected);
        }
Exemple #5
0
        /// <summary>
        /// Generate an empty BatchInfo
        /// </summary>
        internal Task <(BatchInfo, DatabaseChangesSelected)> InternalGetEmptyChangesAsync(MessageGetChangesBatch message)
        {
            // Get config
            var isBatched = message.BatchSize > 0;

            // Create the batch info, in memory
            var batchInfo = new BatchInfo(!isBatched, message.Schema, message.BatchDirectory);;

            // Create a new empty in-memory batch info
            return(Task.FromResult((batchInfo, new DatabaseChangesSelected())));
        }
Exemple #6
0
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        internal virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> InternalGetChangesAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            // batch info containing changes
            BatchInfo batchInfo;


            // Statistics about changes that are selected
            DatabaseChangesSelected changesSelected;

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                (batchInfo, changesSelected) = await this.InternalGetEmptyChangesAsync(message).ConfigureAwait(false);

                return(context, batchInfo, changesSelected);
            }

            // Call interceptor
            await this.InterceptAsync(new DatabaseChangesSelectingArgs(context, message, connection, transaction), cancellationToken).ConfigureAwait(false);

            // create local directory
            if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory))
            {
                Directory.CreateDirectory(message.BatchDirectory);
            }

            changesSelected = new DatabaseChangesSelected();

            // numbers of batch files generated
            var batchIndex = 0;

            // Check if we are in batch mode
            var isBatch = message.BatchSize > 0;

            // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch)
            // batchinfo generate a schema clone with scope columns if needed
            batchInfo = new BatchInfo(!isBatch, message.Schema, message.BatchDirectory);

            // Clean SyncSet, we will add only tables we need in the batch info
            var changesSet = new SyncSet();

            var cptSyncTable    = 0;
            var currentProgress = context.ProgressPercentage;

            foreach (var syncTable in message.Schema.Tables)
            {
                // tmp count of table for report progress pct
                cptSyncTable++;

                // Only table schema is replicated, no datas are applied
                if (syncTable.SyncDirection == SyncDirection.None)
                {
                    continue;
                }

                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                // Get Command
                var selectIncrementalChangesCommand = await this.GetSelectChangesCommandAsync(context, syncTable, message.Setup, message.IsNew, connection, transaction);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand);

                // launch interceptor if any
                var args = new TableChangesSelectingArgs(context, syncTable, selectIncrementalChangesCommand, connection, transaction);
                await this.InterceptAsync(args, cancellationToken).ConfigureAwait(false);

                if (!args.Cancel && args.Command != null)
                {
                    // Statistics
                    var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName);

                    // Create a chnages table with scope columns
                    var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                    // Get the reader
                    using var dataReader = await args.Command.ExecuteReaderAsync().ConfigureAwait(false);

                    // memory size total
                    double rowsMemorySize = 0L;

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        // Set the correct state to be applied
                        if (row.RowState == DataRowState.Deleted)
                        {
                            tableChangesSelected.Deletes++;
                        }
                        else if (row.RowState == DataRowState.Modified)
                        {
                            tableChangesSelected.Upserts++;
                        }

                        // calculate row size if in batch mode
                        if (isBatch)
                        {
                            var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                            var finalFieldSize = fieldsSize / 1024d;

                            if (finalFieldSize > message.BatchSize)
                            {
                                throw new RowOverSizedException(finalFieldSize.ToString());
                            }

                            // Calculate the new memory size
                            rowsMemorySize += finalFieldSize;

                            // Next line if we don't reach the batch size yet.
                            if (rowsMemorySize <= message.BatchSize)
                            {
                                continue;
                            }

                            // Check interceptor
                            var batchTableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction);
                            await this.InterceptAsync(batchTableChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

                            // add changes to batchinfo
                            await batchInfo.AddChangesAsync(changesSet, batchIndex, false, message.SerializerFactory, this).ConfigureAwait(false);

                            // increment batch index
                            batchIndex++;

                            // we know the datas are serialized here, so we can flush  the set
                            changesSet.Clear();

                            // Recreate an empty ContainerSet and a ContainerTable
                            changesSet = new SyncSet();

                            changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                            // Init the row memory size
                            rowsMemorySize = 0L;
                        }
                    }

                    dataReader.Close();

                    // We don't report progress if no table changes is empty, to limit verbosity
                    if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                    {
                        changesSelected.TableChangesSelected.Add(tableChangesSelected);
                    }

                    // even if no rows raise the interceptor
                    var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, changesSetTable, tableChangesSelected, connection, transaction);
                    await this.InterceptAsync(tableChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

                    context.ProgressPercentage = currentProgress + (cptSyncTable * 0.2d / message.Schema.Tables.Count);

                    // only raise report progress if we have something
                    if (tableChangesSelectedArgs.TableChangesSelected.TotalChanges > 0)
                    {
                        this.ReportProgress(context, progress, tableChangesSelectedArgs);
                    }
                }
            }

            // We are in batch mode, and we are at the last batchpart info
            // Even if we don't have rows inside, we return the changesSet, since it contains at least schema
            if (changesSet != null && changesSet.HasTables && changesSet.HasRows)
            {
                await batchInfo.AddChangesAsync(changesSet, batchIndex, true, message.SerializerFactory, this).ConfigureAwait(false);
            }

            //Set the total rows count contained in the batch info
            batchInfo.RowsCount = changesSelected.TotalChangesSelected;

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            // Raise database changes selected
            if (changesSelected.TotalChangesSelected > 0 || changesSelected.TotalChangesSelectedDeletes > 0 || changesSelected.TotalChangesSelectedUpdates > 0)
            {
                var databaseChangesSelectedArgs = new DatabaseChangesSelectedArgs(context, message.LastTimestamp, batchInfo, changesSelected, connection);
                this.ReportProgress(context, progress, databaseChangesSelectedArgs);
                await this.InterceptAsync(databaseChangesSelectedArgs, cancellationToken).ConfigureAwait(false);
            }

            return(context, batchInfo, changesSelected);
        }
Exemple #7
0
        /// <summary>
        /// Gets changes rows count estimation,
        /// </summary>
        internal virtual async Task <(SyncContext, DatabaseChangesSelected)> InternalGetEstimatedChangesCountAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            // Call interceptor
            await this.InterceptAsync(new DatabaseChangesSelectingArgs(context, message, connection, transaction), cancellationToken).ConfigureAwait(false);

            // Create stats object to store changes count
            var changes = new DatabaseChangesSelected();

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                return(context, changes);
            }

            foreach (var syncTable in message.Schema.Tables)
            {
                // Only table schema is replicated, no datas are applied
                if (syncTable.SyncDirection == SyncDirection.None)
                {
                    continue;
                }

                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                // Get Command
                var command = await this.GetSelectChangesCommandAsync(context, syncTable, message.Setup, message.IsNew, connection, transaction);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, command);

                // launch interceptor if any
                var args = new TableChangesSelectingArgs(context, syncTable, command, connection, transaction);
                await this.InterceptAsync(args, cancellationToken).ConfigureAwait(false);

                if (args.Cancel || args.Command == null)
                {
                    continue;
                }

                // Statistics
                var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName);

                // Get the reader
                using var dataReader = await args.Command.ExecuteReaderAsync().ConfigureAwait(false);

                while (dataReader.Read())
                {
                    bool isTombstone = false;
                    for (var i = 0; i < dataReader.FieldCount; i++)
                    {
                        if (dataReader.GetName(i) == "sync_row_is_tombstone")
                        {
                            isTombstone = Convert.ToInt64(dataReader.GetValue(i)) > 0;
                            break;
                        }
                    }

                    // Set the correct state to be applied
                    if (isTombstone)
                    {
                        tableChangesSelected.Deletes++;
                    }
                    else
                    {
                        tableChangesSelected.Upserts++;
                    }
                }

                dataReader.Close();

                // Check interceptor
                var changesArgs = new TableChangesSelectedArgs(context, null, tableChangesSelected, connection, transaction);
                await this.InterceptAsync(changesArgs, cancellationToken).ConfigureAwait(false);

                if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                {
                    changes.TableChangesSelected.Add(tableChangesSelected);
                }
            }

            // Raise database changes selected
            var databaseChangesSelectedArgs = new DatabaseChangesSelectedArgs(context, message.LastTimestamp, null, changes, connection);

            this.ReportProgress(context, progress, databaseChangesSelectedArgs);
            await this.InterceptAsync(databaseChangesSelectedArgs, cancellationToken).ConfigureAwait(false);

            return(context, changes);
        }
Exemple #8
0
        GetEstimatedChangesCountAsync(ScopeInfo localScopeInfo = null, CancellationToken cancellationToken = default, IProgress <ProgressArgs> progress = null)
        {
            if (!this.StartTime.HasValue)
            {
                this.StartTime = DateTime.UtcNow;
            }

            // Output
            long clientTimestamp = 0L;
            DatabaseChangesSelected clientChangesSelected = null;

            // Get context or create a new one
            var ctx = this.GetContext();

            using (var connection = this.Provider.CreateConnection())
            {
                try
                {
                    ctx.SyncStage = SyncStage.ChangesSelecting;

                    // Open connection
                    await this.OpenConnectionAsync(connection, cancellationToken).ConfigureAwait(false);

                    using (var transaction = connection.BeginTransaction())
                    {
                        await this.InterceptAsync(new TransactionOpenedArgs(ctx, connection, transaction), cancellationToken).ConfigureAwait(false);

                        // Get local scope, if not provided
                        if (localScopeInfo == null)
                        {
                            ctx = await this.Provider.EnsureClientScopeAsync(ctx, this.Options.ScopeInfoTableName, connection, transaction, cancellationToken, progress).ConfigureAwait(false);

                            (ctx, localScopeInfo) = await this.Provider.GetClientScopeAsync(ctx, this.Options.ScopeInfoTableName, this.ScopeName, connection, transaction, cancellationToken, progress).ConfigureAwait(false);
                        }

                        // If no schema in the client scope. Maybe the client scope table does not exists, or we never get the schema from server
                        if (localScopeInfo.Schema == null)
                        {
                            throw new MissingLocalOrchestratorSchemaException();
                        }

                        this.logger.LogInformation(SyncEventsId.GetClientScope, localScopeInfo);

                        // On local, we don't want to chase rows from "others"
                        // We just want our local rows, so we dont exclude any remote scope id, by setting scope id to NULL
                        Guid?remoteScopeId = null;
                        // lastSyncTS : get lines inserted / updated / deteleted after the last sync commited
                        var lastSyncTS = localScopeInfo.LastSyncTimestamp;
                        // isNew : If isNew, lasttimestamp is not correct, so grab all
                        var isNew = localScopeInfo.IsNewScope;
                        //Direction set to Upload
                        ctx.SyncWay = SyncWay.Upload;

                        // JUST before the whole process, get the timestamp, to be sure to
                        // get rows inserted / updated elsewhere since the sync is not over
                        clientTimestamp = await this.Provider.GetLocalTimestampAsync(ctx, connection, transaction, cancellationToken, progress);

                        // Creating the message
                        // Since it's an estimated count, we don't need to create batches, so we hard code the batchsize to 0
                        var message = new MessageGetChangesBatch(remoteScopeId, localScopeInfo.Id, isNew, lastSyncTS, localScopeInfo.Schema, this.Setup, 0, this.Options.BatchDirectory);

                        // Call interceptor
                        await this.InterceptAsync(new DatabaseChangesSelectingArgs(ctx, message, connection, transaction), cancellationToken).ConfigureAwait(false);

                        this.logger.LogDebug(SyncEventsId.GetChanges, message);

                        // Locally, if we are new, no need to get changes
                        if (isNew)
                        {
                            clientChangesSelected = new DatabaseChangesSelected();
                        }
                        else
                        {
                            (ctx, clientChangesSelected) = await this.Provider.GetEstimatedChangesCountAsync(ctx, message, connection, transaction, cancellationToken, progress).ConfigureAwait(false);
                        }

                        await this.InterceptAsync(new TransactionCommitArgs(ctx, connection, transaction), cancellationToken).ConfigureAwait(false);

                        transaction.Commit();
                    }

                    // Event progress & interceptor
                    ctx.SyncStage = SyncStage.ChangesSelected;

                    await this.CloseConnectionAsync(connection, cancellationToken).ConfigureAwait(false);
                }
                catch (Exception ex)
                {
                    RaiseError(ex);
                }
                finally
                {
                    if (connection != null && connection.State != ConnectionState.Closed)
                    {
                        connection.Close();
                    }
                }

                this.logger.LogInformation(SyncEventsId.GetChanges, new { ClientTimestamp = clientTimestamp, ClientChangesSelected = clientChangesSelected });

                return(clientTimestamp, clientChangesSelected);
            }
        }
Exemple #9
0
        /// <summary>
        /// Gets changes rows count estimation,
        /// </summary>
        public virtual async Task <(SyncContext, DatabaseChangesSelected)> GetEstimatedChangesCountAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress)
        {
            this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, message);

            // Create stats object to store changes count
            var changes = new DatabaseChangesSelected();

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                return(context, changes);
            }

            foreach (var syncTable in message.Schema.Tables)
            {
                this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, syncTable);

                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                var tableBuilder = this.GetTableBuilder(syncTable, message.Setup);
                var syncAdapter  = tableBuilder.CreateSyncAdapter();

                // launch interceptor if any
                await this.Orchestrator.InterceptAsync(new TableChangesSelectingArgs(context, syncTable, connection, transaction), cancellationToken).ConfigureAwait(false);

                // Get Command
                var selectIncrementalChangesCommand = await this.GetSelectChangesCommandAsync(context, syncAdapter, syncTable, message.IsNew, connection, transaction);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand);

                // log
                this.Orchestrator.logger.LogDebug(SyncEventsId.GetChanges, selectIncrementalChangesCommand);

                // Statistics
                var tableChangesSelected = new TableChangesSelected(syncTable.TableName, syncTable.SchemaName);

                // Get the reader
                using (var dataReader = await selectIncrementalChangesCommand.ExecuteReaderAsync().ConfigureAwait(false))
                {
                    while (dataReader.Read())
                    {
                        bool isTombstone = false;
                        for (var i = 0; i < dataReader.FieldCount; i++)
                        {
                            if (dataReader.GetName(i) == "sync_row_is_tombstone")
                            {
                                isTombstone = Convert.ToInt64(dataReader.GetValue(i)) > 0;
                                break;
                            }
                        }

                        // Set the correct state to be applied
                        if (isTombstone)
                        {
                            tableChangesSelected.Deletes++;
                        }
                        else
                        {
                            tableChangesSelected.Upserts++;
                        }
                    }
                }

                if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                {
                    changes.TableChangesSelected.Add(tableChangesSelected);
                }
            }

            return(context, changes);
        }
Exemple #10
0
 public DatabaseChangesSelectingArgs(SyncContext context, MessageGetChangesBatch changesRequest, DbConnection connection, DbTransaction transaction)
     : base(context, connection, transaction)
 {
     this.ChangesRequest = changesRequest;
 }
        /// <summary>
        /// Gets a batch of changes to synchronize when given batch size,
        /// destination knowledge, and change data retriever parameters.
        /// </summary>
        /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns>
        public virtual async Task <(SyncContext, BatchInfo, DatabaseChangesSelected)> GetChangeBatchAsync(
            SyncContext context, MessageGetChangesBatch message,
            DbConnection connection, DbTransaction transaction,
            CancellationToken cancellationToken, IProgress <ProgressArgs> progress = null)
        {
            // batch info containing changes
            BatchInfo batchInfo;

            // Statistics about changes that are selected
            DatabaseChangesSelected changesSelected;

            if (context.SyncWay == SyncWay.Upload && context.SyncType == SyncType.Reinitialize)
            {
                (batchInfo, changesSelected) = this.GetEmptyChanges(message);
                return(context, batchInfo, changesSelected);
            }

            // Check if the provider is not outdated
            var isOutdated = this.IsRemoteOutdated();

            // Get a chance to make the sync even if it's outdated
            if (isOutdated)
            {
                var outdatedArgs = new OutdatedArgs(context, null, null);

                // Interceptor
                await this.InterceptAsync(outdatedArgs).ConfigureAwait(false);

                if (outdatedArgs.Action != OutdatedAction.Rollback)
                {
                    context.SyncType = outdatedArgs.Action == OutdatedAction.Reinitialize ? SyncType.Reinitialize : SyncType.ReinitializeWithUpload;
                }

                if (outdatedArgs.Action == OutdatedAction.Rollback)
                {
                    throw new OutOfDateException();
                }
            }

            // create local directory
            if (message.BatchSize > 0 && !string.IsNullOrEmpty(message.BatchDirectory) && !Directory.Exists(message.BatchDirectory))
            {
                Directory.CreateDirectory(message.BatchDirectory);
            }

            // numbers of batch files generated
            var batchIndex = 0;

            // Check if we are in batch mode
            var isBatch = message.BatchSize > 0;

            // Create stats object to store changes count
            var changes = new DatabaseChangesSelected();

            // create the in memory changes set
            var changesSet = new SyncSet(message.Schema.ScopeName);

            // Create a Schema set without readonly columns, attached to memory changes
            foreach (var table in message.Schema.Tables)
            {
                DbSyncAdapter.CreateChangesTable(message.Schema.Tables[table.TableName, table.SchemaName], changesSet);
            }

            // Create a batch info in memory (if !isBatch) or serialized on disk (if isBatch)
            // batchinfo generate a schema clone with scope columns if needed
            batchInfo = new BatchInfo(!isBatch, changesSet, message.BatchDirectory);

            // Clear tables, we will add only the ones we need in the batch info
            changesSet.Clear();

            foreach (var syncTable in message.Schema.Tables)
            {
                // if we are in upload stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Upload && syncTable.SyncDirection == SyncDirection.DownloadOnly)
                {
                    continue;
                }

                // if we are in download stage, so check if table is not download only
                if (context.SyncWay == SyncWay.Download && syncTable.SyncDirection == SyncDirection.UploadOnly)
                {
                    continue;
                }

                var tableBuilder = this.GetTableBuilder(syncTable);
                var syncAdapter  = tableBuilder.CreateSyncAdapter(connection, transaction);

                // raise before event
                context.SyncStage = SyncStage.TableChangesSelecting;
                var tableChangesSelectingArgs = new TableChangesSelectingArgs(context, syncTable.TableName, connection, transaction);
                // launch interceptor if any
                await this.InterceptAsync(tableChangesSelectingArgs).ConfigureAwait(false);

                // Get Command
                var selectIncrementalChangesCommand = this.GetSelectChangesCommand(context, syncAdapter, syncTable, message.IsNew);

                // Set parameters
                this.SetSelectChangesCommonParameters(context, syncTable, message.ExcludingScopeId, message.IsNew, message.LastTimestamp, selectIncrementalChangesCommand);

                // Statistics
                var tableChangesSelected = new TableChangesSelected(syncTable.TableName);

                // Get the reader
                using (var dataReader = selectIncrementalChangesCommand.ExecuteReader())
                {
                    // memory size total
                    double rowsMemorySize = 0L;

                    // Create a chnages table with scope columns
                    var changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                    while (dataReader.Read())
                    {
                        // Create a row from dataReader
                        var row = CreateSyncRowFromReader(dataReader, changesSetTable);

                        // Add the row to the changes set
                        changesSetTable.Rows.Add(row);

                        // Set the correct state to be applied
                        if (row.RowState == DataRowState.Deleted)
                        {
                            tableChangesSelected.Deletes++;
                        }
                        else if (row.RowState == DataRowState.Modified)
                        {
                            tableChangesSelected.Upserts++;
                        }

                        // calculate row size if in batch mode
                        if (isBatch)
                        {
                            var fieldsSize     = ContainerTable.GetRowSizeFromDataRow(row.ToArray());
                            var finalFieldSize = fieldsSize / 1024d;

                            if (finalFieldSize > message.BatchSize)
                            {
                                throw new RowOverSizedException(finalFieldSize.ToString());
                            }

                            // Calculate the new memory size
                            rowsMemorySize += finalFieldSize;

                            // Next line if we don't reach the batch size yet.
                            if (rowsMemorySize <= message.BatchSize)
                            {
                                continue;
                            }

                            // add changes to batchinfo
                            batchInfo.AddChanges(changesSet, batchIndex, false);

                            // increment batch index
                            batchIndex++;

                            // we know the datas are serialized here, so we can flush  the set
                            changesSet.Clear();

                            // Recreate an empty ContainerSet and a ContainerTable
                            changesSet = new SyncSet(message.Schema.ScopeName);

                            changesSetTable = DbSyncAdapter.CreateChangesTable(message.Schema.Tables[syncTable.TableName, syncTable.SchemaName], changesSet);

                            // Init the row memory size
                            rowsMemorySize = 0L;
                        }
                    }
                }

                selectIncrementalChangesCommand.Dispose();

                context.SyncStage = SyncStage.TableChangesSelected;

                if (tableChangesSelected.Deletes > 0 || tableChangesSelected.Upserts > 0)
                {
                    changes.TableChangesSelected.Add(tableChangesSelected);
                }

                // Event progress & interceptor
                context.SyncStage = SyncStage.TableChangesSelected;
                var tableChangesSelectedArgs = new TableChangesSelectedArgs(context, tableChangesSelected, connection, transaction);
                this.ReportProgress(context, progress, tableChangesSelectedArgs);
                await this.InterceptAsync(tableChangesSelectedArgs).ConfigureAwait(false);
            }

            // We are in batch mode, and we are at the last batchpart info
            // Even if we don't have rows inside, we return the changesSet, since it contains at leaset schema
            if (changesSet != null && changesSet.HasTables)
            {
                batchInfo.AddChanges(changesSet, batchIndex, true);
            }

            // Check the last index as the last batch
            batchInfo.EnsureLastBatch();

            return(context, batchInfo, changes);
        }