/// <summary> /// Apply changes sent by a client to the server. /// </summary> /// <param name="serverBlob">Blob sent in the incoming request</param> /// <param name="entities">Changes from the client</param> /// <returns>Response containing the new knowledge and conflict/error information.</returns> public ApplyChangesResponse ApplyChanges(byte[] serverBlob, List<IOfflineEntity> entities) { WebUtil.CheckArgumentNull(serverBlob, "serverBlob"); WebUtil.CheckArgumentNull(entities, "entities"); if (0 == serverBlob.Length) { throw new InvalidOperationException("serverBlob is empty"); } var syncBlob = new SyncBlob(); SyncBlob incomingBlob = SyncBlob.DeSerialize(serverBlob); PopulateClientScopeNameAndSyncId(incomingBlob); // Set the scope name in the response blob. syncBlob.ClientScopeName = incomingBlob.ClientScopeName; // If the requested scope does not exists, then throw an error since we // don't initialize scopes on upload requests. if (!CheckIfScopeExists()) { throw SyncServiceException.CreateResourceNotFound("Scope does not exist"); } byte[] clientKnowledgeBlob = incomingBlob.ClientKnowledge; // Initialize a SqlSyncProvider object. _sqlSyncProvider = CreateSqlSyncProviderInstance(_clientScopeName, _serverConnectionString, _configuration.SyncObjectSchema); var response = new ApplyChangesResponse(); // Deserialize the knowledge or create new empty knowledge. SyncKnowledge clientKnowledge = GetSyncKnowledgeFromBlob(clientKnowledgeBlob); // If there are no entities to upload, then return the client knowledge as is. if (entities.Count == 0) { response.Conflicts = new List<SyncConflict>(); response.Errors = new List<SyncError>(); syncBlob.ClientKnowledge = clientKnowledge.Serialize(); response.ServerBlob = syncBlob.Serialize(); return response; } // Client never has any forgotten knowledge. So create a new one. var forgottenKnowledge = new ForgottenKnowledge(_sqlSyncProvider.IdFormats, clientKnowledge); // Convert the entities to dataset using the custom converter. DataSet changesDS = _converter.ConvertEntitiesToDataSet(entities); var stats = new SyncSessionStatistics(); var sessionContext = new SyncSessionContext(_sqlSyncProvider.IdFormats, new SyncCallbacks()); _sqlSyncProvider.BeginSession(SyncProviderPosition.Remote, sessionContext); ulong tickCount = 0; SyncKnowledge updatedClientKnowldege; try { uint batchSize; SyncKnowledge serverKnowledge; // This gives us the server knowledge. _sqlSyncProvider.GetSyncBatchParameters(out batchSize, out serverKnowledge); var changeBatch = new ChangeBatch(_sqlSyncProvider.IdFormats, clientKnowledge, forgottenKnowledge); changeBatch.SetLastBatch(); //Note: There is a possiblity of (-ve) item exceptions , between two uploads from the // same client (for example: in case of RI failures). This would result in an incorrect value if the function // FindMinTickCountForReplica is used to get the last tickcount. So, we need to ignore the -ve item exceptions // when finding the tickcount for the client replica from the server knowledge. /* Logic: * SyncKnowledge.GetKnowledgeForItemId could be used for itemid Zero and then we can find the mintickcount for client replica id. * This does not however seem to work, so we use the KnowledgeInspector and enumerate over each ClockVector * and find the client clockvector and get its tickcount. * * Assumption: The above approach assumes that we don't have any positive exceptions in the knowledge. */ try { // Check if the client replica key exists. uint clientReplicaKey = serverKnowledge.ReplicaKeyMap.LookupReplicaKey(_clientSyncId); var ki = new KnowledgeInspector(1, serverKnowledge); var clockVector = (ClockVector)ki.ScopeClockVector; int noOfReplicaKeys = clockVector.Count; for (int i = noOfReplicaKeys - 1; i >= 0; i--) { if (clockVector[i].ReplicaKey == clientReplicaKey) { tickCount = clockVector[i].TickCount; break; } } } catch (ReplicaNotFoundException exception) { SyncTracer.Info("ReplicaNotFoundException. NEW CLIENT. Exception details: {0}", WebUtil.GetExceptionMessage(exception)); // If the knowedge does not contain the client replica (first apply), initialize tickcount to zero. tickCount = 0; } // Increment the tickcount tickCount++; // update the made with knowledge to include the new tickcount. updatedClientKnowldege = new SyncKnowledge(_sqlSyncProvider.IdFormats, _clientSyncId, tickCount); updatedClientKnowldege.Combine(clientKnowledge); // The incoming data does not have metadata for each item, so we need to create it at this point. AddSyncColumnsToDataSet(changesDS, tickCount); // Make DbSyncContext var dbSyncContext = new DbSyncContext { IsDataBatched = false, IsLastBatch = true, DataSet = changesDS, MadeWithKnowledge = updatedClientKnowldege, MadeWithForgottenKnowledge = forgottenKnowledge, ScopeProgress = new DbSyncScopeProgress() }; _conflicts = new List<SyncConflict>(); _syncErrors = new List<SyncError>(); // Subscribe to the ApplyChangeFailed event to handle conflicts. _sqlSyncProvider.ApplyChangeFailed += SqlSyncProviderApplyChangeFailed; // Subscribe to the ChangesApplied event to read the server tickcount incase there are any conflicts. _sqlSyncProvider.ChangesApplied += SqlSyncProviderChangesApplied; //NOTE: The ConflictResolutionPolicy pass into the method is IGNORED. // Conflicts can be logged by subscribing to the failed events _sqlSyncProvider.ProcessChangeBatch(Microsoft.Synchronization.ConflictResolutionPolicy.DestinationWins, changeBatch, dbSyncContext, new SyncCallbacks(), stats); if (0 != _conflicts.Count) { _sqlSyncProvider.GetSyncBatchParameters(out batchSize, out serverKnowledge); // The way the current P2P provider works, versions are bumped up when conflicts are resolved on the server. // This would result in us sending the changes to the client on the next download request. We want // to not enumerate that change again on the next request from the same client. // The solution is to get the server knowledge after all changes are applied and then // project the knowledge of each conflictign item and add it as a positive exception to the updated client knowledge. AddConflictItemsKnowledgeToClientKnowledge(updatedClientKnowldege, serverKnowledge); } } finally { _sqlSyncProvider.EndSession(sessionContext); } // Don't send any updates to the server knowledge since the client has not got any updates yet. // This updated knowledge will only include an update to the client tickcount. // The client would obtain the server knowledge when it does a get changes. // If we include the serverknowlege, the client would never get any items that are // between the current server knowledge and the client known server knowledge. syncBlob.ClientKnowledge = updatedClientKnowldege.Serialize(); response.ServerBlob = syncBlob.Serialize(); response.Conflicts = _conflicts; response.Errors = _syncErrors; return response; }
/// <summary> /// Get changes for a client using the knowledge that is passed in. /// </summary> /// <param name="serverBlob">Client knowledge as byte[]</param> /// <returns>Response containing the new knowledge and the list of changes.</returns> public GetChangesResponse GetChanges(byte[] serverBlob) { bool isNewClient = false; var response = new GetChangesResponse(); var syncBlob = new SyncBlob(); byte[] clientKnowledgeBlob = null; // If the incoming knowledge blob is null, then we need to initialize a new scope // for this request. if (null == serverBlob || 0 == serverBlob.Length) { // Create a new Guid and use that as the client Id. Guid clientId = Guid.NewGuid(); _clientScopeName = String.Format(CultureInfo.InvariantCulture, "{0}_{1}", _scopeName, clientId); _clientSyncId = new SyncId(clientId); CreateNewScopeForClient(); isNewClient = true; syncBlob.ClientScopeName = clientId.ToString(); } else { SyncBlob incomingBlob = SyncBlob.DeSerialize(serverBlob); PopulateClientScopeNameAndSyncId(incomingBlob); syncBlob.ClientScopeName = incomingBlob.ClientScopeName; clientKnowledgeBlob = incomingBlob.ClientKnowledge; if (null != incomingBlob.BatchCode && null != incomingBlob.NextBatch) { // This is a batched request, so handle it separately. return GetChanges(incomingBlob.ClientKnowledge, incomingBlob.BatchCode.Value, incomingBlob.NextBatch.Value); } } // Intialize a SqlSyncProvider object. _sqlSyncProvider = CreateSqlSyncProviderInstance(_clientScopeName, _serverConnectionString, _configuration.SyncObjectSchema); var sessionContext = new SyncSessionContext(_sqlSyncProvider.IdFormats, new SyncCallbacks()); _sqlSyncProvider.BeginSession(SyncProviderPosition.Remote, sessionContext); try { // Get the SyncKnowledge from the blob. If the blob is null, initialize a default SyncKnowledge object. SyncKnowledge clientKnowledge = GetSyncKnowledgeFromBlob(clientKnowledgeBlob); DbSyncContext dbSyncContext; uint changeBatchSize = (_configuration.IsBatchingEnabled) ? (uint)_configuration.DownloadBatchSizeInKB : 0; RowSorter rowSorter = null; do { object changeDataRetriever; // Get the next batch. _sqlSyncProvider.GetChangeBatch(changeBatchSize, clientKnowledge, out changeDataRetriever); dbSyncContext = (DbSyncContext)changeDataRetriever; // Only initialize the RowSorter, if the data is batched. if (null == rowSorter && _configuration.IsBatchingEnabled) { // Clone the client knowledge. var clonedClientKnowledge = clientKnowledge.Clone(); // Combine with the MadeWithKnowledge of the server. clonedClientKnowledge.Combine(dbSyncContext.MadeWithKnowledge); // Use the new knowledge and get and instance of the RowSorter class. rowSorter = GetRowSorter(clonedClientKnowledge); } // Remove version information from the result dataset. RemoveSyncVersionColumns(dbSyncContext.DataSet); // For a new client, we don't want to send tombstones. This will reduce amount of data // transferred and the client doesn't care about tombstones anyways. if (isNewClient) { RemoveTombstoneRowsFromDataSet(dbSyncContext.DataSet); } // Add the dataset to the row sorter. Only use this if batching is enabled. if (_configuration.IsBatchingEnabled) { rowSorter.AddUnsortedDataSet(dbSyncContext.DataSet); // Delete the batch file generated by the provider, since we have read it. // Otherwise we will keep accumulating files which are not needed. if (!String.IsNullOrEmpty(dbSyncContext.BatchFileName) && File.Exists(dbSyncContext.BatchFileName)) { File.Delete(dbSyncContext.BatchFileName); } } } while (!dbSyncContext.IsLastBatch && dbSyncContext.IsDataBatched); List<IOfflineEntity> entities; if (_configuration.IsBatchingEnabled) { // If batching is enabled. Batch batch = SaveBatchesAndReturnFirstBatch(rowSorter); if (null == batch) { entities = new List<IOfflineEntity>(); } else { // Conver to to entities. entities = _converter.ConvertDataSetToEntities(batch.Data); //Only combine the knowledge of this batch. clientKnowledge.Combine(SyncKnowledge.Deserialize(_sqlSyncProvider.IdFormats, batch.LearnedKnowledge)); response.IsLastBatch = batch.IsLastBatch; syncBlob.IsLastBatch = batch.IsLastBatch; if (batch.IsLastBatch) { syncBlob.NextBatch = null; syncBlob.BatchCode = null; } else { syncBlob.NextBatch = batch.NextBatch; syncBlob.BatchCode = batch.BatchCode; } } } else { // No batching. response.IsLastBatch = true; entities = _converter.ConvertDataSetToEntities(dbSyncContext.DataSet); // combine the client and the server knowledge. // the server may have an updated knowledge from the last time the client sync'd. clientKnowledge.Combine(dbSyncContext.MadeWithKnowledge); } // Save data in the response object. syncBlob.ClientKnowledge = clientKnowledge.Serialize(); response.ServerBlob = syncBlob.Serialize(); response.EntityList = entities; } finally { _sqlSyncProvider.EndSession(sessionContext); } return response; }
/// <summary> /// Gets the next batch of changes for a client. /// </summary> /// <param name="serverBlob">Client knowledge as byte[]</param> /// <param name="batchCode">batchcode for the batch</param> /// <param name="nextBatchSequenceNumber">Sequence number of the next batch</param> /// <returns>Response containing the new knowledge and the list of changes.</returns> private GetChangesResponse GetChanges(byte[] serverBlob, Guid batchCode, Guid nextBatchSequenceNumber) { WebUtil.CheckArgumentNull(serverBlob, "clientKnowledgeBlob"); // Get the next batch using the batch handler implementation. Batch batch = _batchHandler.GetNextBatch(batchCode, nextBatchSequenceNumber); if (null == batch) { // Since we did'nt get a batch, default to the full get changes call. return GetChanges(serverBlob); } // Intialize a SqlSyncProvider object. _sqlSyncProvider = CreateSqlSyncProviderInstance(_clientScopeName, _serverConnectionString, _configuration.SyncObjectSchema); SyncKnowledge clientKnowledge = GetSyncKnowledgeFromBlob(serverBlob); List<IOfflineEntity> entities = _converter.ConvertDataSetToEntities(batch.Data); //Only combine the knowledge of this batch. clientKnowledge.Combine(SyncKnowledge.Deserialize(_sqlSyncProvider.IdFormats, batch.LearnedKnowledge)); var syncBlob = new SyncBlob { ClientScopeName = _clientSyncId.GetGuidId().ToString(), ClientKnowledge = clientKnowledge.Serialize(), BatchCode = batch.BatchCode, IsLastBatch = batch.IsLastBatch, NextBatch = batch.NextBatch }; // Save data in the response object. var response = new GetChangesResponse { EntityList = entities, IsLastBatch = batch.IsLastBatch, ServerBlob = syncBlob.Serialize() }; return response; }