public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { _metadataStore.BeginTransaction(); IEnumerable<ItemChange> localChanges = _metadataStore.Metadata.GetLocalVersions(sourceChanges); NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(_idFormats); changeApplier.ApplyChanges(resolutionPolicy, sourceChanges, changeDataRetriever as IChangeDataRetriever, localChanges, _metadataStore.Metadata.GetKnowledge(), _metadataStore.Metadata.GetForgottenKnowledge(), _changeApplier, _currentSessionContext, syncCallback); _metadataStore.CommitTransaction(); }
/// <summary> /// Create a new instance of options with default values /// </summary> public SyncOptions() { this.BatchDirectory = GetDefaultUserBatchDiretory(); this.BatchSize = 0; this.CleanMetadatas = true; this.UseBulkOperations = true; this.UseVerboseErrors = false; this.DisableConstraintsOnApplyChanges = true; this.ScopeInfoTableName = DefaultScopeInfoTableName; this.ConflictResolutionPolicy = ConflictResolutionPolicy.ServerWins; }
public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { _metadataStore.BeginTransaction(); IEnumerable <ItemChange> localChanges = _metadataStore.Metadata.GetLocalVersions(sourceChanges); NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(_idFormats); changeApplier.ApplyChanges(resolutionPolicy, sourceChanges, changeDataRetriever as IChangeDataRetriever, localChanges, _metadataStore.Metadata.GetKnowledge(), _metadataStore.Metadata.GetForgottenKnowledge(), _changeApplier, _currentSessionContext, syncCallback); _metadataStore.CommitTransaction(); }
public byte[] ProcessFullEnumerationChangeBatch( ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, CachedChangeDataRetriever changeDataRetriever, byte[] changeApplierInfo) { return(FindProvider().ProcessRemoteFullEnumerationChangeBatch( resolutionPolicy, sourceChanges, changeDataRetriever, changeApplierInfo)); }
public byte[] ProcessFullEnumerationChangeBatch( ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, Sync101.CachedChangeDataRetriever changeDataRetriever, byte[] changeApplierInfo) { return(base.Channel.ProcessFullEnumerationChangeBatch( resolutionPolicy, sourceChanges, changeDataRetriever, changeApplierInfo)); }
public byte[] ProcessChangeBatch( ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, CachedChangeDataRetriever changeDataRetriever, byte[] changeApplierInfo) { return(provider.ProcessRemoteChangeBatch( resolutionPolicy, sourceChanges, changeDataRetriever, changeApplierInfo)); }
protected SyncRequestProcessorBase(SyncServiceConfiguration configuration, HttpContextServiceHost serviceHost) { WebUtil.CheckArgumentNull(configuration, "configuration"); Debug.Assert(0 != configuration.ScopeNames.Count); _configuration = configuration; _scopeName = configuration.ScopeNames[0]; _serviceHost = serviceHost; _serverConnectionString = _configuration.ServerConnectionString; _conflictResolutionPolicy = _configuration.ConflictResolutionPolicy; }
protected SyncRequestProcessorBase(SyncServiceConfiguration configuration, HttpContextServiceHost serviceHost) { WebUtil.CheckArgumentNull(configuration, "configuration"); Debug.Assert(0 != configuration.ScopeNames.Count); _configuration = configuration; _scopeName = configuration.ScopeNames[0]; _serviceHost = serviceHost; _serverConnectionString = _configuration.ServerConnectionString; _conflictResolutionPolicy = _configuration.ConflictResolutionPolicy; }
public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { IEnumerable <ItemChange> localVersions = _sync.GetChanges(sourceChanges); // Now we call the change applier // The change applier will compare the local and remote versions, apply // non-conflicting changes, and will also detect conflicts and react as specified ForgottenKnowledge destinationForgottenKnowledge = new ForgottenKnowledge(_sync.IdFormats, _sync.SyncKnowledge); NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(_sync.IdFormats); changeApplier.ApplyChanges(resolutionPolicy, sourceChanges, changeDataRetriever as IChangeDataRetriever, localVersions, _sync.SyncKnowledge.Clone(), destinationForgottenKnowledge, this, SyncSessionContext, syncCallbacks); }
internal static SyncConflictResolution GetSyncConflictResolution(ConflictResolutionPolicy conflictResolutionPolicy) { switch (conflictResolutionPolicy) { case ConflictResolutionPolicy.ClientWins: return(SyncConflictResolution.ClientWins); case ConflictResolutionPolicy.ServerWins: return(SyncConflictResolution.ServerWins); default: throw SyncServiceException.CreateInternalServerError(Strings.UnsupportedConflictResolutionPolicy); } }
/// <summary> /// When overridden in a derived class, processes a set of changes for a full enumeration by applying changes to the item store. /// </summary> /// <param name="resolutionPolicy">The conflict resolution policy to use when this method applies changes.</param> /// <param name="sourceChanges">A batch of changes from the source provider to be applied locally.</param> /// <param name="changeDataRetriever">An object that can be used to retrieve change data. It can be an <see cref="T:Microsoft.Synchronization.IChangeDataRetriever"/> object or a provider-specific object.</param> /// <param name="syncCallbacks">An object that receives event notifications during change application.</param> /// <param name="sessionStatistics">Tracks change statistics. For a provider that uses custom change application, this object must be updated with the results of the change application.</param> public override void ProcessFullEnumerationChangeBatch(ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { CachedChangeDataRetriever cachedChangeDataRetriever = new CachedChangeDataRetriever( changeDataRetriever as IChangeDataRetriever, sourceChanges); byte[] rawSourceChanges = sourceChanges.Serialize(); byte[] rawCachedChangeDataRetriever = SerializerHelper.BinarySerialize(cachedChangeDataRetriever); byte[] newChangeApplierInfo = _syncService.ProcessFullEnumerationChangeBatch( (int)resolutionPolicy, rawSourceChanges, rawCachedChangeDataRetriever, _syncSessionContext.ChangeApplierInfo); _syncSessionContext.ChangeApplierInfo = newChangeApplierInfo; }
/// <summary> /// Applying changes message. /// Be careful policy could be differente from the schema (especially on client side, it's the reverse one, by default) /// </summary> public MessageApplyChanges(Guid localScopeId, Guid senderScopeId, bool isNew, long?lastTimestamp, SyncSet schema, ConflictResolutionPolicy policy, bool disableConstraintsOnApplyChanges, bool cleanMetadatas, bool cleanFolder, bool snapshotApplied, BatchInfo changes) { this.LocalScopeId = localScopeId; this.SenderScopeId = senderScopeId; this.IsNew = isNew; this.LastTimestamp = lastTimestamp; this.Schema = schema ?? throw new ArgumentNullException(nameof(schema)); this.Policy = policy; this.DisableConstraintsOnApplyChanges = disableConstraintsOnApplyChanges; this.CleanMetadatas = cleanMetadatas; this.CleanFolder = cleanFolder; this.BatchInfo = changes ?? throw new ArgumentNullException(nameof(changes)); this.SnapshoteApplied = snapshotApplied; }
public void Synchronize(KnowledgeSyncProvider destinationProvider, KnowledgeSyncProvider sourceProvider, ConflictResolutionPolicy destinationPol, ConflictResolutionPolicy sourcePol, SyncDirectionOrder SyncOrder, uint batchSize, string scopeName) { ((LocalStore)destinationProvider).RequestedBatchSize = batchSize; ((RemoteStore)sourceProvider).RequestedBatchSize = batchSize; destinationProvider.Configuration.ConflictResolutionPolicy = destinationPol; sourceProvider.Configuration.ConflictResolutionPolicy = sourcePol; SyncOrchestrator syncAgent = new SyncOrchestrator(); syncAgent.LocalProvider = destinationProvider; syncAgent.RemoteProvider = sourceProvider; syncAgent.Direction = SyncOrder; syncAgent.Synchronize(); }
//If full enumeration is needed because this provider is out of date due to tombstone cleanup, then this method will be called by the engine. public override void ProcessFullEnumerationChangeBatch(ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { _metadataStore.BeginTransaction(); //Get all my local change versions from the metadata store IEnumerable <ItemChange> localChanges = _metadata.GetFullEnumerationLocalVersions(sourceChanges); //Create a changeapplier object to make change application easier (make the engine call me //when it needs data and when I should save data) NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(_idFormats); changeApplier.ApplyFullEnumerationChanges(resolutionPolicy, sourceChanges, changeDataRetriever as IChangeDataRetriever, localChanges, _metadata.GetKnowledge(), _metadata.GetForgottenKnowledge(), this, _currentSessionContext, syncCallback); _metadataStore.CommitTransaction(); }
/// <summary> /// Applying changes message. /// Be careful policy could be differente from the schema (especially on client side, it's the reverse one, by default) /// </summary> public MessageApplyChanges(Guid localScopeId, Guid senderScopeId, bool isNew, long lastTimestamp, SyncSet schema, SyncSetup setup, ConflictResolutionPolicy policy, bool disableConstraintsOnApplyChanges, bool useBulkOperations, bool cleanMetadatas, bool cleanFolder, BatchInfo changes) { this.LocalScopeId = localScopeId; this.SenderScopeId = senderScopeId; this.IsNew = isNew; this.LastTimestamp = lastTimestamp; this.Schema = schema ?? throw new ArgumentNullException(nameof(schema)); this.Setup = setup ?? throw new ArgumentNullException(nameof(setup)); this.Policy = policy; this.DisableConstraintsOnApplyChanges = disableConstraintsOnApplyChanges; this.UseBulkOperations = useBulkOperations; this.CleanMetadatas = cleanMetadatas; this.CleanFolder = cleanFolder; this.Changes = changes ?? throw new ArgumentNullException(nameof(changes)); }
public static ConflictResolutionPolicy ConvertPSConflictResolutionPolicyToConflictResolutionPolicy(PSConflictResolutionPolicy pSConflictResolutionPolicy) { ConflictResolutionPolicy conflictResolutionPolicy = new ConflictResolutionPolicy { Mode = pSConflictResolutionPolicy.Mode }; if (pSConflictResolutionPolicy.Mode.Equals(ConflictResolutionMode.LastWriterWins, StringComparison.OrdinalIgnoreCase)) { conflictResolutionPolicy.ConflictResolutionPath = pSConflictResolutionPolicy.ConflictResolutionPath; } else if (pSConflictResolutionPolicy.Mode.Equals(ConflictResolutionMode.Custom, StringComparison.OrdinalIgnoreCase)) { conflictResolutionPolicy.ConflictResolutionProcedure = pSConflictResolutionPolicy.ConflictResolutionProcedure; } return(conflictResolutionPolicy); }
/// <summary> /// Applies the current definition to the parent. /// </summary> /// <returns>An instance of the parent.</returns> public virtual ContainerBuilder Attach() { ConflictResolutionPolicy resolutionPolicy = new ConflictResolutionPolicy(); if (this.conflictResolutionPath != null) { resolutionPolicy.Mode = ConflictResolutionMode.LastWriterWins; resolutionPolicy.ConflictResolutionPath = this.conflictResolutionPath; } if (this.conflictResolutionProcedure != null) { resolutionPolicy.Mode = ConflictResolutionMode.Custom; resolutionPolicy.ConflictResolutionProcedure = this.conflictResolutionProcedure; } this.attachCallback(resolutionPolicy); return(this.parent); }
public override void ProcessFullEnumerationChangeBatch( ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { CachedChangeDataRetriever cachedChangeDataRetriever = new CachedChangeDataRetriever( changeDataRetriever as IChangeDataRetriever, sourceChanges); byte[] newChangeApplierInfo = this.client.ProcessFullEnumerationChangeBatch( resolutionPolicy, sourceChanges, cachedChangeDataRetriever, this.syncSessionContext.ChangeApplierInfo); this.syncSessionContext.ChangeApplierInfo = newChangeApplierInfo; }
public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { ItemsChangeInfo localVersions = null; try { localVersions = Proxy.GetChanges(Path, sourceChanges, _filters); } catch (Exception ex) { throw ex; } // Now we call the change applier // The change applier will compare the local and remote versions, apply // non-conflicting changes, and will also detect conflicts and react as specified ForgottenKnowledge = new ForgottenKnowledge(IdFormats, SyncKnowledge); NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(IdFormats); changeApplier.ApplyChanges(resolutionPolicy, sourceChanges, changeDataRetriever as IChangeDataRetriever, RemoteSyncDetails.GenerateChanges(localVersions), SyncKnowledge.Clone(), ForgottenKnowledge, this, SyncSessionContext, syncCallbacks); }
public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { var context = changeDataRetriever as DbSyncContext; if (context != null && context.IsDataBatched) { var fileName = new FileInfo(context.BatchFileName).Name; //Retrieve the remote peer id from the MadeWithKnowledge.ReplicaId. //MadeWithKnowledge is the local knowledge of the peer //that is enumerating the changes. var peerId = context.MadeWithKnowledge.ReplicaId.ToString(); //Check to see if service already has this file if (!_proxy.HasUploadedBatchFile(fileName, peerId)) { //Upload this file to remote service var stream = new FileStream(context.BatchFileName, FileMode.Open, FileAccess.Read); var contents = new byte[stream.Length]; using (stream) { stream.Read(contents, 0, contents.Length); } _proxy.UploadBatchFile(fileName, contents, peerId); } context.BatchFileName = fileName; } var stats = _proxy.ApplyChanges(resolutionPolicy, sourceChanges, changeDataRetriever); sessionStatistics.ChangesApplied += stats.ChangesApplied; sessionStatistics.ChangesFailed += stats.ChangesFailed; }
public SyncSessionStatistics ApplyChanges(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeData) { DbSyncContext dataRetriever = changeData as DbSyncContext; if (dataRetriever != null && dataRetriever.IsDataBatched) { string remotePeerId = dataRetriever.MadeWithKnowledge.ReplicaId.ToString(); //Data is batched. The client should have uploaded this file to us prior to calling ApplyChanges. //So look for it. //The Id would be the DbSyncContext.BatchFileName which is just the batch file name without the complete path string localBatchFileName = null; if (!this.batchIdToFileMapper.TryGetValue(dataRetriever.BatchFileName, out localBatchFileName)) { //Service has not received this file. Throw exception throw new FaultException<WebSyncFaultException>(new WebSyncFaultException("No batch file uploaded for id " + dataRetriever.BatchFileName, null)); } dataRetriever.BatchFileName = localBatchFileName; } SyncSessionStatistics sessionStatistics = new SyncSessionStatistics(); this.peerProvider.ProcessChangeBatch(resolutionPolicy, sourceChanges, changeData, new SyncCallbacks(), sessionStatistics); return sessionStatistics; }
public FolderSync GetFolderSync(ConflictResolutionPolicy policy, Func <ISyncProvider, FolderSyncState, ConflictResolutionPolicy, bool, FolderSync> creator) { base.CheckDisposed("GetFolderSync"); EnumValidator.ThrowIfInvalid <ConflictResolutionPolicy>(policy, "policy"); this.syncLogger.Information <int, ConflictResolutionPolicy>(ExTraceGlobals.SyncTracer, (long)this.GetHashCode(), "SyncState::GetFolderSync. Hashcode = {0}, Policy = {1}.", this.GetHashCode(), policy); if (this.syncProviderFactory == null) { throw new InvalidOperationException("Must set a sync provider factory before calling GetFolderSync"); } try { this.syncProvider = this.syncProviderFactory.CreateSyncProvider(null); } catch (ObjectNotFoundException) { return(null); } if (creator != null) { return(creator(this.syncProvider, this, policy, true)); } return(new FolderSync(this.syncProvider, this, policy, true)); }
public override void ProcessChangeBatch( ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { try { CachedChangeDataRetriever cachedChangeDataRetriever = new CachedChangeDataRetriever( changeDataRetriever as IChangeDataRetriever, sourceChanges); byte[] newChangeApplierInfo = this.client.ProcessChangeBatch( resolutionPolicy, sourceChanges, cachedChangeDataRetriever, this.syncSessionContext.ChangeApplierInfo); this.syncSessionContext.ChangeApplierInfo = newChangeApplierInfo; } catch (Exception ex) { Console.WriteLine(ex.Message); } }
public SyncSessionStatistics ApplyChanges(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeData) { DbSyncContext context = changeData as DbSyncContext; // Check to see if data is batched if (context != null && context.IsDataBatched) { string remoteId = context.MadeWithKnowledge.ReplicaId.ToString(); // Data is batched. The client should have uploaded this file to us prior to calling ApplyChanges, so look for it // The id would be the DbSyncContext.BatchFileName which is just the batch filename without the complete path string localBatchFilename = null; if (!batchIdToFileMapper.TryGetValue(context.BatchFileName, out localBatchFilename)) // Service did not received this file { throw new FaultException <WebSyncFaultException>(new WebSyncFaultException(string.Format("No batch file uploaded for the id {0}.", context.BatchFileName), null)); } context.BatchFileName = localBatchFilename; } SyncSessionStatistics sessionStatistics = new SyncSessionStatistics(); Provider.ProcessChangeBatch(resolutionPolicy, sourceChanges, changeData, new SyncCallbacks(), sessionStatistics); return(sessionStatistics); }
public SyncSessionStatistics ApplyChanges(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeData) { DbSyncContext dataRetriever = changeData as DbSyncContext; if (dataRetriever != null && dataRetriever.IsDataBatched) { string remotePeerId = dataRetriever.MadeWithKnowledge.ReplicaId.ToString(); //Data is batched. The client should have uploaded this file to us prior to calling ApplyChanges. //So look for it. //The Id would be the DbSyncContext.BatchFileName which is just the batch file name without the complete path string localBatchFileName = null; if (!this.batchIdToFileMapper.TryGetValue(dataRetriever.BatchFileName, out localBatchFileName)) { //Service has not received this file. Throw exception throw new FaultException <WebSyncFaultException>(new WebSyncFaultException("No batch file uploaded for id " + dataRetriever.BatchFileName, null)); } dataRetriever.BatchFileName = localBatchFileName; } SyncSessionStatistics sessionStatistics = new SyncSessionStatistics(); this.peerProvider.ProcessChangeBatch(resolutionPolicy, sourceChanges, changeData, new SyncCallbacks(), sessionStatistics); return(sessionStatistics); }
/// <summary> /// A conflict has occured, we try to ask for the solution to the user /// </summary> internal async Task <(ApplyAction, SyncRow)> GetConflictActionAsync(SyncContext context, SyncConflict conflict, ConflictResolutionPolicy policy, DbConnection connection, DbTransaction transaction = null, CancellationToken cancellationToken = default) { var conflictAction = ConflictResolution.ServerWins; if (policy == ConflictResolutionPolicy.ClientWins) { conflictAction = ConflictResolution.ClientWins; } // Interceptor var arg = new ApplyChangesFailedArgs(context, conflict, conflictAction, connection, transaction); this.Orchestrator.logger.LogDebug(SyncEventsId.ResolveConflicts, arg); await this.Orchestrator.InterceptAsync(arg, cancellationToken).ConfigureAwait(false); // if ConflictAction is ServerWins or MergeRow it's Ok to set to Continue var action = ApplyAction.Continue; // Change action only if we choose ClientWins or Rollback. // for ServerWins or MergeRow, action is Continue if (arg.Resolution == ConflictResolution.ClientWins) { action = ApplyAction.RetryWithForceWrite; } else if (arg.Resolution == ConflictResolution.Rollback) { action = ApplyAction.Rollback; } var finalRow = arg.Resolution == ConflictResolution.MergeRow ? arg.FinalRow : null; // returning the action to take, and actually the finalRow if action is set to Merge return(action, finalRow); }
/// <summary> /// Handle a conflict /// The int returned is the conflict count I need /// </summary> /// changeApplicationAction, conflictCount, resolvedRow, conflictApplyInt internal async Task <(int conflictResolvedCount, SyncRow resolvedRow, int rowAppliedCount)> HandleConflictAsync( Guid localScopeId, Guid senderScopeId, DbSyncAdapter syncAdapter, SyncContext context, SyncConflict conflict, ConflictResolutionPolicy policy, long lastTimestamp, DbConnection connection, DbTransaction transaction) { SyncRow finalRow; ApplyAction conflictApplyAction; int rowAppliedCount = 0; (conflictApplyAction, finalRow) = await this.GetConflictActionAsync(context, conflict, policy, connection, transaction).ConfigureAwait(false); // Conflict rollbacked by user if (conflictApplyAction == ApplyAction.Rollback) { throw new RollbackException("Rollback action taken on conflict"); } // Local provider wins, update metadata if (conflictApplyAction == ApplyAction.Continue) { var isMergeAction = finalRow != null; var row = isMergeAction ? finalRow : conflict.LocalRow; // Conflict on a line that is not present on the datasource if (row == null) { return(0, finalRow, 0); } // if we have a merge action, we apply the row on the server if (isMergeAction) { // if merge, we update locally the row and let the update_scope_id set to null var isUpdated = await syncAdapter.ApplyUpdateAsync(row, lastTimestamp, null, true); // We don't update metadatas so the row is updated (on server side) // and is mark as updated locally. // and will be returned back to sender, since it's a merge, and we need it on the client if (!isUpdated) { throw new Exception("Can't update the merge row."); } } finalRow = isMergeAction ? row : conflict.LocalRow; // We don't do anything, since we let the original row. so we resolved one conflict but applied no rows return(conflictResolvedCount : 1, finalRow, rowAppliedCount : 0); } // We gonna apply with force the line if (conflictApplyAction == ApplyAction.RetryWithForceWrite) { // TODO : Should Raise an error ? if (conflict.RemoteRow == null) { return(0, finalRow, 0); } bool operationComplete = false; switch (conflict.Type) { // Remote source has row, Local don't have the row, so insert it case ConflictType.RemoteExistsLocalExists: case ConflictType.RemoteExistsLocalNotExists: case ConflictType.RemoteExistsLocalIsDeleted: case ConflictType.UniqueKeyConstraint: operationComplete = await syncAdapter.ApplyUpdateAsync(conflict.RemoteRow, lastTimestamp, senderScopeId, true); rowAppliedCount = 1; break; // Conflict, but both have delete the row, so nothing to do case ConflictType.RemoteIsDeletedLocalIsDeleted: case ConflictType.RemoteIsDeletedLocalNotExists: operationComplete = true; rowAppliedCount = 0; break; // The remote has delete the row, and local has insert or update it // So delete the local row case ConflictType.RemoteIsDeletedLocalExists: operationComplete = await syncAdapter.ApplyDeleteAsync(conflict.RemoteRow, lastTimestamp, senderScopeId, true); rowAppliedCount = 1; break; case ConflictType.RemoteCleanedupDeleteLocalUpdate: case ConflictType.ErrorsOccurred: return(0, finalRow, 0); } finalRow = conflict.RemoteRow; //After a force update, there is a problem, so raise exception if (!operationComplete) { finalRow = null; return(0, finalRow, rowAppliedCount); } return(1, finalRow, rowAppliedCount); } return(0, finalRow, 0); }
/// <summary> /// Handle a conflict /// The int returned is the conflict count I need /// </summary> internal async Task <(ChangeApplicationAction, int, DmRow)> HandleConflictAsync(DbSyncAdapter syncAdapter, SyncContext context, SyncConflict conflict, ConflictResolutionPolicy policy, ScopeInfo scope, long fromScopeLocalTimeStamp, DbConnection connection, DbTransaction transaction) { DmRow finalRow = null; var conflictApplyAction = ApplyAction.Continue; (conflictApplyAction, finalRow) = await this.GetConflictActionAsync(context, conflict, policy, connection, transaction); // Default behavior and an error occured if (conflictApplyAction == ApplyAction.Rollback) { conflict.ErrorMessage = "Rollback action taken on conflict"; conflict.Type = ConflictType.ErrorsOccurred; return(ChangeApplicationAction.Rollback, 0, null); } // Local provider wins, update metadata if (conflictApplyAction == ApplyAction.Continue) { var isMergeAction = finalRow != null; var row = isMergeAction ? finalRow : conflict.LocalRow; // Conflict on a line that is not present on the datasource if (row == null) { return(ChangeApplicationAction.Continue, 0, finalRow); } if (row != null) { // if we have a merge action, we apply the row on the server if (isMergeAction) { bool isUpdated = false; bool isInserted = false; // Insert metadata is a merge, actually var commandType = DbCommandType.UpdateMetadata; isUpdated = syncAdapter.ApplyUpdate(row, scope, true); if (!isUpdated) { // Insert the row isInserted = syncAdapter.ApplyInsert(row, scope, true); // Then update the row to mark this row as updated from server // and get it back to client isUpdated = syncAdapter.ApplyUpdate(row, scope, true); commandType = DbCommandType.InsertMetadata; } if (!isUpdated && !isInserted) { throw new Exception("Can't update the merge row."); } // IF we have insert the row in the server side, to resolve the conflict // Whe should update the metadatas correctly if (isUpdated || isInserted) { using (var metadataCommand = syncAdapter.GetCommand(commandType)) { // getting the row updated from server var dmTableRow = syncAdapter.GetRow(row); row = dmTableRow.Rows[0]; // Deriving Parameters syncAdapter.SetCommandParameters(commandType, metadataCommand); // Set the id parameter syncAdapter.SetColumnParametersValues(metadataCommand, row); var version = row.RowState == DmRowState.Deleted ? DmRowVersion.Original : DmRowVersion.Current; Guid?create_scope_id = row["create_scope_id"] != DBNull.Value ? (Guid?)row["create_scope_id"] : null; long createTimestamp = row["create_timestamp", version] != DBNull.Value ? Convert.ToInt64(row["create_timestamp", version]) : 0; // The trick is to force the row to be "created before last sync" // Even if we just inserted it // to be able to get the row in state Updated (and not Added) row["create_scope_id"] = create_scope_id; row["create_timestamp"] = fromScopeLocalTimeStamp - 1; // Update scope id is set to server side Guid?update_scope_id = row["update_scope_id"] != DBNull.Value ? (Guid?)row["update_scope_id"] : null; long updateTimestamp = row["update_timestamp", version] != DBNull.Value ? Convert.ToInt64(row["update_timestamp", version]) : 0; row["update_scope_id"] = null; row["update_timestamp"] = updateTimestamp; // apply local row, set scope.id to null becoz applied locally var rowsApplied = syncAdapter.InsertOrUpdateMetadatas(metadataCommand, row, null); if (!rowsApplied) { throw new Exception("No metadatas rows found, can't update the server side"); } } } } finalRow = isMergeAction ? row : conflict.LocalRow; // We don't do anything on the local provider, so we do not need to return a +1 on syncConflicts count return(ChangeApplicationAction.Continue, 0, finalRow); } return(ChangeApplicationAction.Rollback, 0, finalRow); } // We gonna apply with force the line if (conflictApplyAction == ApplyAction.RetryWithForceWrite) { if (conflict.RemoteRow == null) { // TODO : Should Raise an error ? return(ChangeApplicationAction.Rollback, 0, finalRow); } bool operationComplete = false; // create a localscope to override values var localScope = new ScopeInfo { Name = scope.Name, Timestamp = fromScopeLocalTimeStamp }; DbCommandType commandType = DbCommandType.InsertMetadata; bool needToUpdateMetadata = true; switch (conflict.Type) { // Remote source has row, Local don't have the row, so insert it case ConflictType.RemoteUpdateLocalNoRow: case ConflictType.RemoteInsertLocalNoRow: operationComplete = syncAdapter.ApplyInsert(conflict.RemoteRow, localScope, true); commandType = DbCommandType.InsertMetadata; break; // Conflict, but both have delete the row, so nothing to do case ConflictType.RemoteDeleteLocalDelete: case ConflictType.RemoteDeleteLocalNoRow: operationComplete = true; needToUpdateMetadata = false; break; // The remote has delete the row, and local has insert or update it // So delete the local row case ConflictType.RemoteDeleteLocalUpdate: case ConflictType.RemoteDeleteLocalInsert: operationComplete = syncAdapter.ApplyDelete(conflict.RemoteRow, localScope, true); commandType = DbCommandType.UpdateMetadata; break; // Remote insert and local delete, sor insert again on local // but tracking line exist, so make an update on metadata case ConflictType.RemoteInsertLocalDelete: case ConflictType.RemoteUpdateLocalDelete: operationComplete = syncAdapter.ApplyInsert(conflict.RemoteRow, localScope, true); commandType = DbCommandType.UpdateMetadata; break; // Remote insert and local insert/ update, take the remote row and update the local row case ConflictType.RemoteUpdateLocalInsert: case ConflictType.RemoteUpdateLocalUpdate: case ConflictType.RemoteInsertLocalInsert: case ConflictType.RemoteInsertLocalUpdate: operationComplete = syncAdapter.ApplyUpdate(conflict.RemoteRow, localScope, true); commandType = DbCommandType.UpdateMetadata; break; case ConflictType.RemoteCleanedupDeleteLocalUpdate: case ConflictType.ErrorsOccurred: return(ChangeApplicationAction.Rollback, 0, finalRow); } if (needToUpdateMetadata) { using (var metadataCommand = syncAdapter.GetCommand(commandType)) { // Deriving Parameters syncAdapter.SetCommandParameters(commandType, metadataCommand); // force applying client row, so apply scope.id (client scope here) var rowsApplied = syncAdapter.InsertOrUpdateMetadatas(metadataCommand, conflict.RemoteRow, scope.Id); if (!rowsApplied) { throw new Exception("No metadatas rows found, can't update the server side"); } } } finalRow = conflict.RemoteRow; //After a force update, there is a problem, so raise exception if (!operationComplete) { var ex = $"Can't force operation for applyType {syncAdapter.ApplyType}"; finalRow = null; return(ChangeApplicationAction.Continue, 0, finalRow); } // tableProgress.ChangesApplied += 1; return(ChangeApplicationAction.Continue, 1, finalRow); } return(ChangeApplicationAction.Rollback, 0, finalRow); }
/// <summary> /// A conflict has occured, we try to ask for the solution to the user /// </summary> internal async Task <(ApplyAction, DmRow)> GetConflictActionAsync(SyncContext context, SyncConflict conflict, ConflictResolutionPolicy policy, DbConnection connection, DbTransaction transaction = null) { var conflictAction = ConflictResolution.ServerWins; if (policy == ConflictResolutionPolicy.ClientWins) { conflictAction = ConflictResolution.ClientWins; } // Interceptor var arg = new ApplyChangesFailedArgs(context, conflict, conflictAction, connection, transaction); await this.InterceptAsync(arg); // if ConflictAction is ServerWins or MergeRow it's Ok to set to Continue var action = ApplyAction.Continue; if (arg.Resolution == ConflictResolution.ClientWins) { action = ApplyAction.RetryWithForceWrite; } var finalRow = arg.Resolution == ConflictResolution.MergeRow ? arg.FinalRow : null; // returning the action to take, and actually the finalRow if action is set to Merge return(action, finalRow); }
private void AddConflictResolution(ConflictResolutionPolicy conflictResolutionPolicy) { this.conflictResolutionPolicy = conflictResolutionPolicy; }
public override void ExecuteCmdlet() { if (ParameterSetName.Equals(ParentObjectParameterSet, StringComparison.Ordinal)) { ResourceIdentifier resourceIdentifier = new ResourceIdentifier(ParentObject.Id); ResourceGroupName = resourceIdentifier.ResourceGroupName; DatabaseName = resourceIdentifier.ResourceName; AccountName = ResourceIdentifierExtensions.GetDatabaseAccountName(resourceIdentifier); } else if (ParameterSetName.Equals(ObjectParameterSet, StringComparison.Ordinal)) { ResourceIdentifier resourceIdentifier = new ResourceIdentifier(InputObject.Id); ResourceGroupName = resourceIdentifier.ResourceGroupName; Name = resourceIdentifier.ResourceName; DatabaseName = ResourceIdentifierExtensions.GetSqlDatabaseName(resourceIdentifier); AccountName = ResourceIdentifierExtensions.GetDatabaseAccountName(resourceIdentifier); } SqlContainerGetResults readSqlContainerGetResults = null; try { readSqlContainerGetResults = CosmosDBManagementClient.SqlResources.GetSqlContainer(ResourceGroupName, AccountName, DatabaseName, Name); } catch (CloudException e) { if (e.Response.StatusCode == System.Net.HttpStatusCode.NotFound) { throw new ResourceNotFoundException(message: string.Format(ExceptionMessage.NotFound, Name), innerException: e); } } SqlContainerResource sqlContainerResource = UpdateAzCosmosDBSqlContainer.PopulateSqlContainerResource(readSqlContainerGetResults.Resource); if (PartitionKeyPath != null) { List <string> Paths = new List <string>(PartitionKeyPath); sqlContainerResource.PartitionKey = new ContainerPartitionKey { Kind = PartitionKeyKind, Paths = Paths, Version = PartitionKeyVersion }; } if (UniqueKeyPolicy != null) { sqlContainerResource.UniqueKeyPolicy = PSUniqueKeyPolicy.ToSDKModel(UniqueKeyPolicy); } if (TtlInSeconds != null) { sqlContainerResource.DefaultTtl = TtlInSeconds; } if (ConflictResolutionPolicy != null) { sqlContainerResource.ConflictResolutionPolicy = PSConflictResolutionPolicy.ToSDKModel(ConflictResolutionPolicy); } else if (ConflictResolutionPolicyMode != null) { ConflictResolutionPolicy conflictResolutionPolicy = new ConflictResolutionPolicy { Mode = ConflictResolutionPolicyMode }; if (ConflictResolutionPolicyMode.Equals(ConflictResolutionMode.LastWriterWins, StringComparison.OrdinalIgnoreCase)) { conflictResolutionPolicy.ConflictResolutionPath = ConflictResolutionPolicyPath; } else if (ConflictResolutionPolicyMode.Equals(ConflictResolutionMode.Custom, StringComparison.OrdinalIgnoreCase)) { conflictResolutionPolicy.ConflictResolutionProcedure = ConflictResolutionPolicyProcedure; } sqlContainerResource.ConflictResolutionPolicy = conflictResolutionPolicy; } if (IndexingPolicy != null) { sqlContainerResource.IndexingPolicy = PSIndexingPolicy.ToSDKModel(IndexingPolicy); } if (AnalyticalStorageTtl != null) { sqlContainerResource.AnalyticalStorageTtl = AnalyticalStorageTtl; } CreateUpdateOptions options = ThroughputHelper.PopulateCreateUpdateOptions(Throughput, AutoscaleMaxThroughput); SqlContainerCreateUpdateParameters sqlContainerCreateUpdateParameters = new SqlContainerCreateUpdateParameters { Resource = sqlContainerResource, Options = options }; if (ShouldProcess(Name, "Updating an existing CosmosDB Sql Container")) { SqlContainerGetResults sqlContainerGetResults = CosmosDBManagementClient.SqlResources.CreateUpdateSqlContainer(ResourceGroupName, AccountName, DatabaseName, Name, sqlContainerCreateUpdateParameters); WriteObject(new PSSqlContainerGetResults(sqlContainerGetResults)); } return; }
public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { DbSyncContext context = changeDataRetriever as DbSyncContext; if (context != null && context.IsDataBatched) { string fileName = new FileInfo(context.BatchFileName).Name; //Retrieve the remote peer id from the MadeWithKnowledge.ReplicaId. MadeWithKnowledge is the local knowledge of the peer //that is enumerating the changes. string peerId = context.MadeWithKnowledge.ReplicaId.ToString(); //Check to see if service already has this file if (!this.proxy.HasUploadedBatchFile(fileName, peerId)) { //Upload this file to remote service FileStream stream = new FileStream(context.BatchFileName, FileMode.Open, FileAccess.Read); byte[] contents = new byte[stream.Length]; using (stream) { stream.Read(contents, 0, contents.Length); } this.proxy.UploadBatchFile(fileName, contents, peerId); } context.BatchFileName = fileName; } SyncSessionStatistics stats = this.proxy.ApplyChanges(resolutionPolicy, sourceChanges, changeDataRetriever); sessionStatistics.ChangesApplied += stats.ChangesApplied; sessionStatistics.ChangesFailed += stats.ChangesFailed; }
public ConflictHandler(MobileServiceClient client, ConflictResolutionPolicy conflictResolutionPolicy) { this._client = client; _conflictResolutionPolicy = conflictResolutionPolicy; }
/// <summary> /// Upload mechanism /// </summary> /// <param name="resolutionPolicy"></param> /// <param name="sourceChanges">Local File Changes</param> /// <param name="changeDataRetriever"></param> /// <param name="syncCallback"></param> /// <param name="sessionStatistics"></param> public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { myForgottenKnowledge = new ForgottenKnowledge(IdFormats, myKnowledge); NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(IdFormats); changeApplier.ApplyChanges(resolutionPolicy, sourceChanges, (IChangeDataRetriever)changeDataRetriever, myKnowledge.Clone(), myForgottenKnowledge, this, currentSessionContext, syncCallback); }
/// <summary> /// When overridden in a derived class, processes a set of changes for a full enumeration by applying changes to the item store. /// </summary> /// <param name="resolutionPolicy">The conflict resolution policy to use when this method applies changes.</param> /// <param name="sourceChanges">A batch of changes from the source provider to be applied locally.</param> /// <param name="changeDataRetriever">An object that can be used to retrieve change data. It can be an <see cref="T:Microsoft.Synchronization.IChangeDataRetriever"/> object or a provider-specific object.</param> /// <param name="syncCallbacks">An object that receives event notifications during change application.</param> /// <param name="sessionStatistics">Tracks change statistics. For a provider that uses custom change application, this object must be updated with the results of the change application.</param> public override void ProcessFullEnumerationChangeBatch(ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { CachedChangeDataRetriever cachedChangeDataRetriever = new CachedChangeDataRetriever( changeDataRetriever as IChangeDataRetriever, sourceChanges); byte[] rawSourceChanges = sourceChanges.Serialize(); byte[] rawCachedChangeDataRetriever = SerializerHelper.BinarySerialize(cachedChangeDataRetriever); byte[] newChangeApplierInfo = _syncService.ProcessFullEnumerationChangeBatch( (int)resolutionPolicy, rawSourceChanges, rawCachedChangeDataRetriever, _syncSessionContext.ChangeApplierInfo); _syncSessionContext.ChangeApplierInfo = newChangeApplierInfo; }
public override void ProcessFullEnumerationChangeBatch(ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { throw new NotImplementedException("The method or operation is not implemented."); }
/// <summary> /// Change the default conflict resolution policy. The default value is ClientWins. /// </summary> /// <param name="policy">The new conflict resolution policy</param> public void SetConflictResolutionPolicy(ConflictResolutionPolicy policy) { ConflictResolutionPolicy = policy; }
/// <summary> /// Enumerate all internal changes, no batch mode /// </summary> internal async Task <(BatchInfo, ChangesSelected)> EnumerateChangesInternal( SyncContext context, ScopeInfo scopeInfo, DmSet configTables, string batchDirectory, ConflictResolutionPolicy policy, ICollection <FilterClause> filters) { // create the in memory changes set DmSet changesSet = new DmSet(SyncConfiguration.DMSET_NAME); // Create the batch info, in memory var batchInfo = new BatchInfo { InMemory = true }; using (var connection = this.CreateConnection()) { // Open the connection await connection.OpenAsync(); using (var transaction = connection.BeginTransaction()) { try { // changes that will be returned as selected changes ChangesSelected changes = new ChangesSelected(); foreach (var tableDescription in configTables.Tables) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && tableDescription.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && tableDescription.SyncDirection == SyncDirection.UploadOnly) { continue; } var builder = this.GetDatabaseBuilder(tableDescription); var syncAdapter = builder.CreateSyncAdapter(connection, transaction); syncAdapter.ConflictApplyAction = SyncConfiguration.GetApplyAction(policy); // raise before event context.SyncStage = SyncStage.TableChangesSelecting; var beforeArgs = new TableChangesSelectingEventArgs(this.ProviderTypeName, context.SyncStage, tableDescription.TableName); this.TryRaiseProgressEvent(beforeArgs, this.TableChangesSelecting); // selected changes for the current table TableChangesSelected tableSelectedChanges = new TableChangesSelected { TableName = tableDescription.TableName }; // Get Command DbCommand selectIncrementalChangesCommand; DbCommandType dbCommandType; if (this.CanBeServerProvider && context.Parameters != null && context.Parameters.Count > 0 && filters != null && filters.Count > 0) { var filtersName = filters .Where(f => f.TableName.Equals(tableDescription.TableName, StringComparison.InvariantCultureIgnoreCase)) .Select(f => f.ColumnName); if (filtersName != null && filtersName.Count() > 0) { dbCommandType = DbCommandType.SelectChangesWitFilters; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType, filtersName); } else { dbCommandType = DbCommandType.SelectChanges; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType); } } else { dbCommandType = DbCommandType.SelectChanges; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType); } if (selectIncrementalChangesCommand == null) { var exc = "Missing command 'SelectIncrementalChangesCommand' "; throw new Exception(exc); } // Deriving Parameters syncAdapter.SetCommandParameters(dbCommandType, selectIncrementalChangesCommand); // Get a clone of the table with tracking columns var dmTableChanges = BuildChangesTable(tableDescription.TableName, configTables); SetSelectChangesCommonParameters(context, scopeInfo, selectIncrementalChangesCommand); // Set filter parameters if any if (this.CanBeServerProvider && context.Parameters != null && context.Parameters.Count > 0 && filters != null && filters.Count > 0) { var tableFilters = filters.Where(f => f.TableName.Equals(tableDescription.TableName, StringComparison.InvariantCultureIgnoreCase)).ToList(); if (tableFilters != null && tableFilters.Count > 0) { foreach (var filter in tableFilters) { var parameter = context.Parameters.FirstOrDefault(p => p.ColumnName.Equals(filter.ColumnName, StringComparison.InvariantCultureIgnoreCase) && p.TableName.Equals(filter.TableName, StringComparison.InvariantCultureIgnoreCase)); if (parameter != null) { DbManager.SetParameterValue(selectIncrementalChangesCommand, parameter.ColumnName, parameter.Value); } } } } this.AddTrackingColumns <int>(dmTableChanges, "sync_row_is_tombstone"); // Get the reader using (var dataReader = selectIncrementalChangesCommand.ExecuteReader()) { while (dataReader.Read()) { DmRow dataRow = CreateRowFromReader(dataReader, dmTableChanges); //DmRow dataRow = dmTableChanges.NewRow(); // assuming the row is not inserted / modified DmRowState state = DmRowState.Unchanged; // get if the current row is inserted, modified, deleted state = GetStateFromDmRow(dataRow, scopeInfo); if (state != DmRowState.Deleted && state != DmRowState.Modified && state != DmRowState.Added) { continue; } // add row dmTableChanges.Rows.Add(dataRow); // acceptchanges before modifying dataRow.AcceptChanges(); tableSelectedChanges.TotalChanges++; // Set the correct state to be applied if (state == DmRowState.Deleted) { dataRow.Delete(); tableSelectedChanges.Deletes++; } else if (state == DmRowState.Added) { dataRow.SetAdded(); tableSelectedChanges.Inserts++; } else if (state == DmRowState.Modified) { dataRow.SetModified(); tableSelectedChanges.Updates++; } } // Since we dont need this column anymore, remove it this.RemoveTrackingColumns(dmTableChanges, "sync_row_is_tombstone"); // add it to the DmSet changesSet.Tables.Add(dmTableChanges); } // add the stats to global stats changes.TableChangesSelected.Add(tableSelectedChanges); // Raise event for this table context.SyncStage = SyncStage.TableChangesSelected; var args = new TableChangesSelectedEventArgs(this.ProviderTypeName, SyncStage.TableChangesSelected, tableSelectedChanges); this.TryRaiseProgressEvent(args, this.TableChangesSelected); } transaction.Commit(); // generate the batchpartinfo batchInfo.GenerateBatchInfo(0, changesSet, batchDirectory); // Create a new in-memory batch info with an the changes DmSet return(batchInfo, changes); } catch (Exception) { throw; } finally { if (connection != null && connection.State == ConnectionState.Open) { connection.Close(); } } } } }
//If full enumeration is needed because this provider is out of date due to tombstone cleanup, then this method will be called by the engine. public override void ProcessFullEnumerationChangeBatch(ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { _metadataStore.BeginTransaction(); //Get all my local change versions from the metadata store IEnumerable<ItemChange> localChanges = _metadata.GetFullEnumerationLocalVersions(sourceChanges); //Create a changeapplier object to make change application easier (make the engine call me //when it needs data and when I should save data) NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(_idFormats); changeApplier.ApplyFullEnumerationChanges(resolutionPolicy, sourceChanges, changeDataRetriever as IChangeDataRetriever, localChanges, _metadata.GetKnowledge(), _metadata.GetForgottenKnowledge(), this, _currentSessionContext, syncCallback); _metadataStore.CommitTransaction(); }
/// <summary> /// Enumerate all internal changes, no batch mode /// </summary> internal async Task <(BatchInfo, ChangesSelected)> EnumerateChangesInBatchesInternal (SyncContext context, ScopeInfo scopeInfo, int downloadBatchSizeInKB, DmSet configTables, string batchDirectory, ConflictResolutionPolicy policy, ICollection <FilterClause> filters) { DmTable dmTable = null; // memory size total double memorySizeFromDmRows = 0L; int batchIndex = 0; // this batch info won't be in memory, it will be be batched BatchInfo batchInfo = new BatchInfo { // directory where all files will be stored Directory = BatchInfo.GenerateNewDirectoryName(), // not in memory since we serialized all files in the tmp directory InMemory = false }; // Create stats object to store changes count ChangesSelected changes = new ChangesSelected(); using (var connection = this.CreateConnection()) { try { // Open the connection await connection.OpenAsync(); using (var transaction = connection.BeginTransaction()) { // create the in memory changes set DmSet changesSet = new DmSet(configTables.DmSetName); foreach (var tableDescription in configTables.Tables) { // if we are in upload stage, so check if table is not download only if (context.SyncWay == SyncWay.Upload && tableDescription.SyncDirection == SyncDirection.DownloadOnly) { continue; } // if we are in download stage, so check if table is not download only if (context.SyncWay == SyncWay.Download && tableDescription.SyncDirection == SyncDirection.UploadOnly) { continue; } var builder = this.GetDatabaseBuilder(tableDescription); var syncAdapter = builder.CreateSyncAdapter(connection, transaction); syncAdapter.ConflictApplyAction = SyncConfiguration.GetApplyAction(policy); // raise before event context.SyncStage = SyncStage.TableChangesSelecting; var beforeArgs = new TableChangesSelectingEventArgs(this.ProviderTypeName, context.SyncStage, tableDescription.TableName); this.TryRaiseProgressEvent(beforeArgs, this.TableChangesSelecting); // Get Command DbCommand selectIncrementalChangesCommand; DbCommandType dbCommandType; if (this.CanBeServerProvider && context.Parameters != null && context.Parameters.Count > 0 && filters != null && filters.Count > 0) { var filtersName = filters .Where(f => f.TableName.Equals(tableDescription.TableName, StringComparison.InvariantCultureIgnoreCase)) .Select(f => f.ColumnName); if (filtersName != null && filtersName.Count() > 0) { dbCommandType = DbCommandType.SelectChangesWitFilters; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType, filtersName); } else { dbCommandType = DbCommandType.SelectChanges; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType); } } else { dbCommandType = DbCommandType.SelectChanges; selectIncrementalChangesCommand = syncAdapter.GetCommand(dbCommandType); } // Deriving Parameters syncAdapter.SetCommandParameters(DbCommandType.SelectChanges, selectIncrementalChangesCommand); if (selectIncrementalChangesCommand == null) { var exc = "Missing command 'SelectIncrementalChangesCommand' "; throw new Exception(exc); } dmTable = BuildChangesTable(tableDescription.TableName, configTables); try { // Set commons parameters SetSelectChangesCommonParameters(context, scopeInfo, selectIncrementalChangesCommand); // Set filter parameters if any // Only on server side if (this.CanBeServerProvider && context.Parameters != null && context.Parameters.Count > 0 && filters != null && filters.Count > 0) { var filterTable = filters.Where(f => f.TableName.Equals(tableDescription.TableName, StringComparison.InvariantCultureIgnoreCase)).ToList(); if (filterTable != null && filterTable.Count > 0) { foreach (var filter in filterTable) { var parameter = context.Parameters.FirstOrDefault(p => p.ColumnName.Equals(filter.ColumnName, StringComparison.InvariantCultureIgnoreCase) && p.TableName.Equals(filter.TableName, StringComparison.InvariantCultureIgnoreCase)); if (parameter != null) { DbManager.SetParameterValue(selectIncrementalChangesCommand, parameter.ColumnName, parameter.Value); } } } } this.AddTrackingColumns <int>(dmTable, "sync_row_is_tombstone"); // Statistics TableChangesSelected tableChangesSelected = new TableChangesSelected { TableName = tableDescription.TableName }; changes.TableChangesSelected.Add(tableChangesSelected); // Get the reader using (var dataReader = selectIncrementalChangesCommand.ExecuteReader()) { while (dataReader.Read()) { DmRow dmRow = CreateRowFromReader(dataReader, dmTable); DmRowState state = DmRowState.Unchanged; state = GetStateFromDmRow(dmRow, scopeInfo); // If the row is not deleted inserted or modified, go next if (state != DmRowState.Deleted && state != DmRowState.Modified && state != DmRowState.Added) { continue; } var fieldsSize = DmTableSurrogate.GetRowSizeFromDataRow(dmRow); var dmRowSize = fieldsSize / 1024d; if (dmRowSize > downloadBatchSizeInKB) { var exc = $"Row is too big ({dmRowSize} kb.) for the current Configuration.DownloadBatchSizeInKB ({downloadBatchSizeInKB} kb.) Aborting Sync..."; throw new Exception(exc); } // Calculate the new memory size memorySizeFromDmRows = memorySizeFromDmRows + dmRowSize; // add row dmTable.Rows.Add(dmRow); tableChangesSelected.TotalChanges++; // acceptchanges before modifying dmRow.AcceptChanges(); // Set the correct state to be applied if (state == DmRowState.Deleted) { dmRow.Delete(); tableChangesSelected.Deletes++; } else if (state == DmRowState.Added) { dmRow.SetAdded(); tableChangesSelected.Inserts++; } else if (state == DmRowState.Modified) { dmRow.SetModified(); tableChangesSelected.Updates++; } // We exceed the memorySize, so we can add it to a batch if (memorySizeFromDmRows > downloadBatchSizeInKB) { // Since we dont need this column anymore, remove it this.RemoveTrackingColumns(dmTable, "sync_row_is_tombstone"); changesSet.Tables.Add(dmTable); // generate the batch part info batchInfo.GenerateBatchInfo(batchIndex, changesSet, batchDirectory); // increment batch index batchIndex++; changesSet.Clear(); // Recreate an empty DmSet, then a dmTable clone changesSet = new DmSet(configTables.DmSetName); dmTable = dmTable.Clone(); this.AddTrackingColumns <int>(dmTable, "sync_row_is_tombstone"); // Init the row memory size memorySizeFromDmRows = 0L; // add stats for a SyncProgress event context.SyncStage = SyncStage.TableChangesSelected; var args2 = new TableChangesSelectedEventArgs (this.ProviderTypeName, SyncStage.TableChangesSelected, tableChangesSelected); this.TryRaiseProgressEvent(args2, this.TableChangesSelected); } } // Since we dont need this column anymore, remove it this.RemoveTrackingColumns(dmTable, "sync_row_is_tombstone"); context.SyncStage = SyncStage.TableChangesSelected; changesSet.Tables.Add(dmTable); // Init the row memory size memorySizeFromDmRows = 0L; // Event progress context.SyncStage = SyncStage.TableChangesSelected; var args = new TableChangesSelectedEventArgs(this.ProviderTypeName, SyncStage.TableChangesSelected, tableChangesSelected); this.TryRaiseProgressEvent(args, this.TableChangesSelected); } } catch (Exception) { throw; } finally { } } // We are in batch mode, and we are at the last batchpart info if (changesSet != null && changesSet.HasTables && changesSet.HasChanges()) { var batchPartInfo = batchInfo.GenerateBatchInfo(batchIndex, changesSet, batchDirectory); if (batchPartInfo != null) { batchPartInfo.IsLastBatch = true; } } transaction.Commit(); } } catch (Exception) { throw; } finally { if (connection != null && connection.State == ConnectionState.Open) { connection.Close(); } } } return(batchInfo, changes); }
/// <summary> /// Download Mechanism /// </summary> /// <param name="resolutionPolicy"></param> /// <param name="sourceChanges"></param> /// <param name="changeDataRetriever"></param> /// <param name="syncCallback"></param> /// <param name="sessionStatistics"></param> public override void ProcessChangeBatch(ConflictResolutionPolicy resolutionPolicy, ChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallback, SyncSessionStatistics sessionStatistics) { ChangeBatch localVersions = sync.GetChanges(sourceChanges); ForgottenKnowledge destinationForgottenKnowledge = new ForgottenKnowledge(sync.IdFormats, sync.SyncKnowledge); NotifyingChangeApplier changeApplier = new NotifyingChangeApplier(sync.IdFormats); changeApplier.ApplyChanges(resolutionPolicy, CollisionConflictResolutionPolicy.Merge, sourceChanges, (IChangeDataRetriever)changeDataRetriever, localVersions, sync.SyncKnowledge.Clone(), destinationForgottenKnowledge, this, _memConflictLog, currentSessionContext, syncCallback); }
/// <summary> /// Gets a batch of changes to synchronize when given batch size, /// destination knowledge, and change data retriever parameters. /// </summary> /// <returns>A DbSyncContext object that will be used to retrieve the modified data.</returns> public virtual async Task <TimeSpan> PrepareArchiveAsync(string[] tables, int downloadBatchSizeInKB, string batchDirectory, ConflictResolutionPolicy policy, ICollection <FilterClause> filters) { try { // We need to save // the lasttimestamp when the zip generated for the client to be able to launch a sync since this ts // IF the client is new and the SyncConfiguration object has the Archive property Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); SyncContext context; ScopeInfo scopeInfo; context = new SyncContext(Guid.NewGuid()) { SyncType = SyncType.Normal, SyncWay = SyncWay.Download, }; scopeInfo = new ScopeInfo { IsNewScope = true }; // Read configuration var config = await this.ReadSchemaAsync(tables); // We want a batch zip if (downloadBatchSizeInKB <= 0) { downloadBatchSizeInKB = 10000; } (var batchInfo, var changesSelected) = await this.EnumerateChangesInBatchesInternal(context, scopeInfo, downloadBatchSizeInKB, config.Schema, batchDirectory, policy, filters); var dir = Path.Combine(batchDirectory, batchInfo.Directory); var archiveFullName = String.Concat(batchDirectory, "\\", Path.GetRandomFileName()); ZipFile.CreateFromDirectory(dir, archiveFullName, CompressionLevel.Fastest, false); stopwatch.Stop(); return(stopwatch.Elapsed); } catch (Exception ex) { throw new SyncException(ex, SyncStage.TableChangesSelecting, this.ProviderTypeName); } }
/// <summary> /// Creates a new instance of the <see cref="SqlSyncProviderService" /> class. Also uses the batch handler passed as parameter if it is not null. /// </summary> /// <param name="configuration">Sync configuration</param> /// <param name="serverScope">Server scope/template</param> /// <param name="filterParams">Filter parameters. Pass null for no filter parameters.</param> /// <param name="operationContext">SyncOperationContext object to create the SyncConflictContext object.</param> /// <param name="batchHandler">Batch Handler for spooling and retrieving batches.</param> internal SqlSyncProviderService(SyncServiceConfiguration configuration, string serverScope, List<SqlSyncProviderFilterParameterInfo> filterParams, SyncOperationContext operationContext, IBatchHandler batchHandler) { WebUtil.CheckArgumentNull(serverScope, "serverScope"); _configuration = configuration; _serverConnectionString = _configuration.ServerConnectionString; _scopeName = serverScope; _conflictResolutionPolicy = _configuration.ConflictResolutionPolicy; _filterParams = filterParams; _converter = new DataSetToEntitiesConverter(_configuration.TableGlobalNameToTypeMapping, _configuration.TypeToTableGlobalNameMapping, _configuration.TypeToTableLocalNameMapping); _batchHandler = batchHandler ?? new FileBasedBatchHandler(_configuration.BatchSpoolDirectory); if (operationContext != null) { _conflictContext = new SyncConflictContext() { ScopeName = serverScope, Operation = SyncOperations.Upload, RequestHeaders = operationContext.RequestHeaders, ResponseHeaders = operationContext.ResponseHeaders, QueryString = operationContext.QueryString }; } }
/// <inheritdoc /> public override void ProcessFullEnumerationChangeBatch(ConflictResolutionPolicy resolutionPolicy, FullEnumerationChangeBatch sourceChanges, object changeDataRetriever, SyncCallbacks syncCallbacks, SyncSessionStatistics sessionStatistics) { // anyone know how to implement ??? throw new NotImplementedException(); }