/// <summary> /// Apply changes sent by a client to the server. /// </summary> /// <param name="serverBlob">Blob sent in the incoming request</param> /// <param name="entities">Changes from the client</param> /// <returns>Response containing the new knowledge and conflict/error information.</returns> public ApplyChangesResponse ApplyChanges(byte[] serverBlob, List<IOfflineEntity> entities) { WebUtil.CheckArgumentNull(serverBlob, "serverBlob"); WebUtil.CheckArgumentNull(entities, "entities"); if (0 == serverBlob.Length) { throw new InvalidOperationException("serverBlob is empty"); } var syncBlob = new SyncBlob(); SyncBlob incomingBlob = SyncBlob.DeSerialize(serverBlob); PopulateClientScopeNameAndSyncId(incomingBlob); // Set the scope name in the response blob. syncBlob.ClientScopeName = incomingBlob.ClientScopeName; // If the requested scope does not exists, then throw an error since we // don't initialize scopes on upload requests. if (!CheckIfScopeExists()) { throw SyncServiceException.CreateResourceNotFound("Scope does not exist"); } byte[] clientKnowledgeBlob = incomingBlob.ClientKnowledge; // Initialize a SqlSyncProvider object. _sqlSyncProvider = CreateSqlSyncProviderInstance(_clientScopeName, _serverConnectionString, _configuration.SyncObjectSchema); var response = new ApplyChangesResponse(); // Deserialize the knowledge or create new empty knowledge. SyncKnowledge clientKnowledge = GetSyncKnowledgeFromBlob(clientKnowledgeBlob); // If there are no entities to upload, then return the client knowledge as is. if (entities.Count == 0) { response.Conflicts = new List<SyncConflict>(); response.Errors = new List<SyncError>(); syncBlob.ClientKnowledge = clientKnowledge.Serialize(); response.ServerBlob = syncBlob.Serialize(); return response; } // Client never has any forgotten knowledge. So create a new one. var forgottenKnowledge = new ForgottenKnowledge(_sqlSyncProvider.IdFormats, clientKnowledge); // Convert the entities to dataset using the custom converter. DataSet changesDS = _converter.ConvertEntitiesToDataSet(entities); var stats = new SyncSessionStatistics(); var sessionContext = new SyncSessionContext(_sqlSyncProvider.IdFormats, new SyncCallbacks()); _sqlSyncProvider.BeginSession(SyncProviderPosition.Remote, sessionContext); ulong tickCount = 0; SyncKnowledge updatedClientKnowldege; try { uint batchSize; SyncKnowledge serverKnowledge; // This gives us the server knowledge. _sqlSyncProvider.GetSyncBatchParameters(out batchSize, out serverKnowledge); var changeBatch = new ChangeBatch(_sqlSyncProvider.IdFormats, clientKnowledge, forgottenKnowledge); changeBatch.SetLastBatch(); //Note: There is a possiblity of (-ve) item exceptions , between two uploads from the // same client (for example: in case of RI failures). This would result in an incorrect value if the function // FindMinTickCountForReplica is used to get the last tickcount. So, we need to ignore the -ve item exceptions // when finding the tickcount for the client replica from the server knowledge. /* Logic: * SyncKnowledge.GetKnowledgeForItemId could be used for itemid Zero and then we can find the mintickcount for client replica id. * This does not however seem to work, so we use the KnowledgeInspector and enumerate over each ClockVector * and find the client clockvector and get its tickcount. * * Assumption: The above approach assumes that we don't have any positive exceptions in the knowledge. */ try { // Check if the client replica key exists. uint clientReplicaKey = serverKnowledge.ReplicaKeyMap.LookupReplicaKey(_clientSyncId); var ki = new KnowledgeInspector(1, serverKnowledge); var clockVector = (ClockVector)ki.ScopeClockVector; int noOfReplicaKeys = clockVector.Count; for (int i = noOfReplicaKeys - 1; i >= 0; i--) { if (clockVector[i].ReplicaKey == clientReplicaKey) { tickCount = clockVector[i].TickCount; break; } } } catch (ReplicaNotFoundException exception) { SyncTracer.Info("ReplicaNotFoundException. NEW CLIENT. Exception details: {0}", WebUtil.GetExceptionMessage(exception)); // If the knowedge does not contain the client replica (first apply), initialize tickcount to zero. tickCount = 0; } // Increment the tickcount tickCount++; // update the made with knowledge to include the new tickcount. updatedClientKnowldege = new SyncKnowledge(_sqlSyncProvider.IdFormats, _clientSyncId, tickCount); updatedClientKnowldege.Combine(clientKnowledge); // The incoming data does not have metadata for each item, so we need to create it at this point. AddSyncColumnsToDataSet(changesDS, tickCount); // Make DbSyncContext var dbSyncContext = new DbSyncContext { IsDataBatched = false, IsLastBatch = true, DataSet = changesDS, MadeWithKnowledge = updatedClientKnowldege, MadeWithForgottenKnowledge = forgottenKnowledge, ScopeProgress = new DbSyncScopeProgress() }; _conflicts = new List<SyncConflict>(); _syncErrors = new List<SyncError>(); // Subscribe to the ApplyChangeFailed event to handle conflicts. _sqlSyncProvider.ApplyChangeFailed += SqlSyncProviderApplyChangeFailed; // Subscribe to the ChangesApplied event to read the server tickcount incase there are any conflicts. _sqlSyncProvider.ChangesApplied += SqlSyncProviderChangesApplied; //NOTE: The ConflictResolutionPolicy pass into the method is IGNORED. // Conflicts can be logged by subscribing to the failed events _sqlSyncProvider.ProcessChangeBatch(Microsoft.Synchronization.ConflictResolutionPolicy.DestinationWins, changeBatch, dbSyncContext, new SyncCallbacks(), stats); if (0 != _conflicts.Count) { _sqlSyncProvider.GetSyncBatchParameters(out batchSize, out serverKnowledge); // The way the current P2P provider works, versions are bumped up when conflicts are resolved on the server. // This would result in us sending the changes to the client on the next download request. We want // to not enumerate that change again on the next request from the same client. // The solution is to get the server knowledge after all changes are applied and then // project the knowledge of each conflictign item and add it as a positive exception to the updated client knowledge. AddConflictItemsKnowledgeToClientKnowledge(updatedClientKnowldege, serverKnowledge); } } finally { _sqlSyncProvider.EndSession(sessionContext); } // Don't send any updates to the server knowledge since the client has not got any updates yet. // This updated knowledge will only include an update to the client tickcount. // The client would obtain the server knowledge when it does a get changes. // If we include the serverknowlege, the client would never get any items that are // between the current server knowledge and the client known server knowledge. syncBlob.ClientKnowledge = updatedClientKnowldege.Serialize(); response.ServerBlob = syncBlob.Serialize(); response.Conflicts = _conflicts; response.Errors = _syncErrors; return response; }
private static async Task SynchronizeAsync() { // Database script used for this sample : https://github.com/Mimetis/Dotmim.Sync/blob/master/CreateAdventureWorks.sql var serverOrchestrator = new WebClientOrchestrator("https://localhost.fiddler:44342/api/sync"); // Second provider is using plain old Sql Server provider, relying on triggers and tracking tables to create the sync environment var clientProvider = new SqlSyncProvider(clientConnectionString); // Creating an agent that will handle all the process var agent = new SyncAgent(clientProvider, serverOrchestrator); do { try { var progress = new SynchronousProgress <ProgressArgs>(args => Console.WriteLine($"{args.PogressPercentageString}:\t{args.Message}")); // Launch the sync process var s1 = await agent.SynchronizeAsync(progress); // Write results Console.WriteLine(s1); } catch (Exception ex) { Console.WriteLine(ex.Message); } } while (Console.ReadKey().Key != ConsoleKey.Escape); Console.WriteLine("End"); }
private static async Task CreateSnapshotAsync() { // Create 2 Sql Sync providers var serverProvider = new SqlSyncProvider(DBHelper.GetDatabaseConnectionString(serverDbName)); // specific Setup with only 2 tables, and one filtered var setup = new SyncSetup(allTables); // Using the Progress pattern to handle progession during the synchronization var progress = new SynchronousProgress <ProgressArgs>(s => { Console.ForegroundColor = ConsoleColor.Green; Console.WriteLine($"{s.Context.SyncStage}:\t{s.Message}"); Console.ResetColor(); }); var remoteProgress = new SynchronousProgress <ProgressArgs>(s => { Console.ForegroundColor = ConsoleColor.Yellow; Console.WriteLine($"{s.Context.SyncStage}:\t{s.Message}"); Console.ResetColor(); }); // snapshot directory var directory = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.UserProfile), "Snapshots"); var options = new SyncOptions { SnapshotsDirectory = directory, BatchSize = 2000 }; var remoteOrchestrator = new RemoteOrchestrator(serverProvider, options, setup); await remoteOrchestrator.CreateSnapshotAsync(null, default, remoteProgress);
static SyncOperationStatistics sync(string scope, SyncDirectionOrder order) { using (SqlSyncProvider masterProvider = new SqlSyncProvider { ScopeName = scope }, slaveProvider = new SqlSyncProvider { ScopeName = scope }) { using (SqlConnection master = new SqlConnection(Settings.Default.ServerConnectionString), slave = new SqlConnection(Settings.Default.ClientConnectionString)) { masterProvider.Connection = master; slaveProvider.Connection = slave; SyncOrchestrator orchestrator = new SyncOrchestrator { LocalProvider = slaveProvider, RemoteProvider = masterProvider, Direction = order }; if (scope == "OneWay") { slaveProvider.ApplyChangeFailed += new EventHandler<Microsoft.Synchronization.Data.DbApplyChangeFailedEventArgs>(slaveProvider_ApplyChangeFailed); } try { SyncOperationStatistics stats = orchestrator.Synchronize(); return stats; } catch (Exception ex) { Console.Error.WriteLine(ex.Message); return null; } } } }
public void Synchronize() { LogUtilities.LogTracingLevels(); var clientSyncProvider = new SqlSyncProvider("FullScope", SqlConnectionFactory.DefaultClientConnection); clientSyncProvider.ApplyChangeFailed += ClientApplyChangeFailed; clientSyncProvider.ChangesSelected += ClientSyncProviderOnChangesSelected; var serverSyncProvider = new SqlSyncProvider("FullScope", SqlConnectionFactory.DefaultServerConnection); serverSyncProvider.ApplyChangeFailed += ServerApplyChangeFailed; serverSyncProvider.ChangesSelected += ServerSyncProviderOnChangesSelected; // create the sync orhcestrator var syncOrchestrator = new SyncOrchestrator { LocalProvider = clientSyncProvider, RemoteProvider = serverSyncProvider, Direction = SyncDirectionOrder.UploadAndDownload }; // execute the synchronization process var syncStats = syncOrchestrator.Synchronize(); //TODO: einzelne Spalten synchronisierbar? // print statistics Console.WriteLine("Start Time: " + syncStats.SyncStartTime); Console.WriteLine("Total Changes Uploaded: " + syncStats.UploadChangesTotal); // Changes from Client to Server (LocalSyncProvider to RemoteSyncProvider) Console.WriteLine("Total Changes Downloaded: " + syncStats.DownloadChangesTotal); // Changes from Server to Client (RemoteSyncProvider to LocalSyncProvider) Console.WriteLine("Complete Time: " + syncStats.SyncEndTime); Console.WriteLine(string.Empty); Console.ReadLine(); }
public SqliteSyncAllColumnsFixture() { var builder = new SqliteConnectionStringBuilder { DataSource = ClientSqliteFilePath }; this.ClientSqliteConnectionString = builder.ConnectionString; GC.Collect(); GC.WaitForPendingFinalizers(); if (File.Exists(ClientSqliteFilePath)) File.Delete(ClientSqliteFilePath); // create databases helperDb.CreateDatabase(serverDbName); // create table helperDb.ExecuteScript(serverDbName, createTableScript); // insert table helperDb.ExecuteScript(serverDbName, datas); var serverProvider = new SqlSyncProvider(ServerConnectionString); var clientProvider = new SqliteSyncProvider(ClientSqliteFilePath); var simpleConfiguration = new SyncConfiguration(new[] { "AllColumns" }); Agent = new SyncAgent(clientProvider, serverProvider, simpleConfiguration); }
public SqliteSyncHttpTests(SqliteSyncHttpFixture fixture) { this.fixture = fixture; configurationProvider = () => new SyncConfiguration(fixture.Tables); serverProvider = new SqlSyncProvider(fixture.ServerConnectionString); proxyServerProvider = new WebProxyServerProvider(serverProvider); webApp = WebApp.Start(fixture.BaseAddress.OriginalString, (appBuilder) => { // Configure Web API for self-host. HttpConfiguration config = new HttpConfiguration(); config.Routes.MapHttpRoute( name: "DefaultApi", routeTemplate: "api/{controller}/{actionid}/{id}", defaults: new { actionid = RouteParameter.Optional, id = RouteParameter.Optional } ); config.Services.Replace(typeof(IHttpControllerActivator), new TestControllerActivator( () => { proxyServerProvider.Configuration = configurationProvider(); return(proxyServerProvider); })); appBuilder.UseWebApi(config); }); clientProvider = new SqliteSyncProvider(fixture.ClientSqliteFilePath); proxyClientProvider = new WebProxyClientProvider(new Uri(fixture.BaseAddress, "api/values")); agent = new SyncAgent(clientProvider, proxyClientProvider); }
public async Task BaseOrchestrator_Provision_ShouldFails_If_SetupTable_DoesNotExist() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var scopeName = "scope"; var options = new SyncOptions(); var setup = new SyncSetup(new string[] { "SalesLT.badTable" }); var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup, scopeName); var provision = SyncProvision.Table | SyncProvision.TrackingTable | SyncProvision.StoredProcedures | SyncProvision.Triggers; var se = await Assert.ThrowsAsync <SyncException>(async() => await localOrchestrator.ProvisionAsync(provision)); Assert.Equal(SyncStage.Provisioning, se.SyncStage); Assert.Equal(SyncSide.ClientSide, se.Side); Assert.Equal("MissingTableException", se.TypeName); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task UpdateFromServer(SyncConfiguration conf) { Guid guid = Guid.NewGuid(); var updateRowScript = $@" Declare @id uniqueidentifier; Select top 1 @id = ServiceTicketID from ServiceTickets; Update [ServiceTickets] Set [Title] = 'Updated from server {guid.ToString()}' Where ServiceTicketId = @id"; using (var sqlConnection = new SqlConnection(fixture.ServerConnectionString)) { using (var sqlCmd = new SqlCommand(updateRowScript, sqlConnection)) { sqlConnection.Open(); sqlCmd.ExecuteNonQuery(); sqlConnection.Close(); } } var serverProvider = new SqlSyncProvider(fixture.ServerConnectionString); var clientProvider = new MySqlSyncProvider(fixture.ClientMySqlConnectionString); //var simpleConfiguration = new SyncConfiguration(Tables); var agent = new SyncAgent(clientProvider, serverProvider); agent.Configuration.DownloadBatchSizeInKB = conf.DownloadBatchSizeInKB; agent.Configuration.UseBulkOperations = conf.UseBulkOperations; agent.Configuration.SerializationFormat = conf.SerializationFormat; agent.Configuration.Add(fixture.Tables); var session = await agent.SynchronizeAsync(); Assert.Equal(1, session.TotalChangesDownloaded); Assert.Equal(0, session.TotalChangesUploaded); }
/// <summary> /// Configure the SqlSyncprovider. Note that this method assumes you have a direct conection /// to the server as this is more of a design time use case vs. runtime use case. We think /// of provisioning the server as something that occurs before an application is deployed whereas /// provisioning the client is somethng that happens during runtime (on intitial sync) after the /// application is deployed. /// /// </summary> /// <param name="hostName"></param> /// <returns></returns> public SqlSyncProvider ConfigureSqlSyncProvider(string scopeName, string hostName) { SqlSyncProvider provider = new SqlSyncProvider(); provider.ApplyChangeFailed += new EventHandler <DbApplyChangeFailedEventArgs>(Provider_ApplyingChanges); provider.ScopeName = scopeName; SqlConn conn = new SqlConn(); provider.Connection = new SqlConnection(conn.connString); MakeBackUp(); //create anew scope description and add the appropriate tables to this scope DbSyncScopeDescription scopeDesc = new DbSyncScopeDescription("CardsScope"); //class to be used to provision the scope defined above SqlSyncScopeProvisioning serverConfig = new SqlSyncScopeProvisioning((SqlConnection)provider.Connection); //determine if this scope already exists on the server and if not go ahead and provision if (!serverConfig.ScopeExists("CardsScope")) { //add the approrpiate tables to this scope scopeDesc.Tables.Add(SqlSyncDescriptionBuilder.GetDescriptionForTable("[" + conn.schema + "].[cards]", (System.Data.SqlClient.SqlConnection)provider.Connection)); //note that it is important to call this after the tables have been added to the scope serverConfig.PopulateFromScopeDescription(scopeDesc); //indicate that the base table already exists and does not need to be created serverConfig.SetCreateTableDefault(DbSyncCreationOption.Skip); //provision the server serverConfig.Apply(); } conn.close(); return(provider); }
private static async Task SyncAdvAsync() { // Sql Server provider, the master. var serverProvider = new SqlSyncProvider( @"Data Source=.;Initial Catalog=AdventureWorks;User Id=sa;Password=Password12!;"); // Sqlite Client provider for a Sql Server <=> Sqlite sync var clientProvider = new SqliteSyncProvider("advworks2.db"); // Tables involved in the sync process: var tables = new string[] { "ProductCategory", "ProductDescription", "ProductModel", "Product", "ProductModelProductDescription", "Address", "Customer", "CustomerAddress", "SalesOrderHeader", "SalesOrderDetail" }; // Sync orchestrator var agent = new SyncAgent(clientProvider, serverProvider, tables); do { var s = await agent.SynchronizeAsync(); Console.WriteLine($"Total Changes downloaded : {s.TotalChangesDownloaded}"); } while (Console.ReadKey().Key != ConsoleKey.Escape); }
/// <summary> /// Launch a simple sync, over TCP network, each sql server (client and server are reachable through TCP cp /// </summary> /// <returns></returns> private static async Task SynchronizeAsync() { // Create 2 Sql Sync providers var serverProvider = new SqlSyncProvider(DbHelper.GetDatabaseConnectionString(serverDbName)); var clientProvider = new SqlSyncProvider(DbHelper.GetDatabaseConnectionString(clientDbName)); // Tables involved in the sync process: var tables = allTables; // Creating an agent that will handle all the process var agent = new SyncAgent(clientProvider, serverProvider, tables); // Using the Progress pattern to handle progession during the synchronization var progress = new Progress <ProgressArgs>(s => Console.WriteLine($"[client]: {s.Context.SyncStage}:\t{s.Message}")); // Setting configuration options agent.SetConfiguration(s => { s.ScopeInfoTableName = "tscopeinfo"; s.SerializationFormat = Dotmim.Sync.Enumerations.SerializationFormat.Binary; s.StoredProceduresPrefix = "s"; s.StoredProceduresSuffix = ""; s.TrackingTablesPrefix = "t"; s.TrackingTablesSuffix = ""; }); agent.SetOptions(opt => { opt.BatchDirectory = Path.Combine(SyncOptions.GetDefaultUserBatchDiretory(), "sync"); opt.BatchSize = 100; opt.CleanMetadatas = true; opt.UseBulkOperations = true; opt.UseVerboseErrors = false; }); do { Console.Clear(); Console.WriteLine("Sync Start"); try { // Launch the sync process var s1 = await agent.SynchronizeAsync(progress); // Write results Console.WriteLine(s1); } catch (Exception e) { Console.WriteLine(e.Message); } //Console.WriteLine("Sync Ended. Press a key to start again, or Escapte to end"); } while (Console.ReadKey().Key != ConsoleKey.Escape); Console.WriteLine("End"); }
private static async Task SynchronizeAsync() { // Database script used for this sample : https://github.com/Mimetis/Dotmim.Sync/blob/master/CreateAdventureWorks.sql // Getting a JWT token // This sample is NOT SECURE at all // You should get a Jwt Token from an identity provider like Azure, Google, AWS or other. var token = GenerateJwtToken("*****@*****.**", "SPERTUS01"); HttpClient httpClient = new HttpClient(); httpClient.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", token); // Adding the HttpClient instance to the web client orchestrator var serverOrchestrator = new WebRemoteOrchestrator("https://localhost:44342/api/sync", client: httpClient); // Second provider is using plain old Sql Server provider, relying on triggers and tracking tables to create the sync environment var clientProvider = new SqlSyncProvider(clientConnectionString); // Creating an agent that will handle all the process var agent = new SyncAgent(clientProvider, serverOrchestrator); do { // Launch the sync process var s1 = await agent.SynchronizeAsync(); // Write results Console.WriteLine(s1); } while (Console.ReadKey().Key != ConsoleKey.Escape); Console.WriteLine("End"); }
private static async Task SynchronizeAsync() { // Database script used for this sample : https://github.com/Mimetis/Dotmim.Sync/blob/master/CreateAdventureWorks.sql // Create 2 Sql Sync providers // First provider is using the Sql change tracking feature. Don't forget to enable it on your database until running this code ! // For instance, use this SQL statement on your server database : ALTER DATABASE AdventureWorks SET CHANGE_TRACKING = ON (CHANGE_RETENTION = 10 DAYS, AUTO_CLEANUP = ON) // Otherwise, if you don't want to use Change Tracking feature, just change 'SqlSyncChangeTrackingProvider' to 'SqlSyncProvider' var serverProvider = new SqlSyncProvider(serverConnectionString); // Second provider is using plain old Sql Server provider, relying on triggers and tracking tables to create the sync environment var clientProvider = new SqliteSyncProvider("adv.db"); // Tables involved in the sync process: var tables = new string[] { "ProductCategory", "ProductModel", "Product", "Address", "Customer", "CustomerAddress", "SalesOrderHeader", "SalesOrderDetail" }; // Creating an agent that will handle all the process var agent = new SyncAgent(clientProvider, serverProvider, tables); do { // Launch the sync process var s1 = await agent.SynchronizeAsync(); // Write results Console.WriteLine(s1); } while (Console.ReadKey().Key != ConsoleKey.Escape); Console.WriteLine("End"); }
public SyncProvAndDeprovTests(SyncProvAndDeprovTestsFixture fixture) { this.fixture = fixture; serverProvider = new SqlSyncProvider(fixture.ServerConnectionString); clientProvider = new SqlSyncProvider(fixture.Client1ConnectionString); }
public SQLiteSyncSimpleFixture() { var builder = new SQLiteConnectionStringBuilder { DataSource = ClientSQLiteFilePath }; this.ClientSQLiteConnectionString = builder.ConnectionString; if (File.Exists(ClientSQLiteFilePath)) { File.Delete(ClientSQLiteFilePath); } // create databases helperDb.CreateDatabase(serverDbName); // create table helperDb.ExecuteScript(serverDbName, createTableScript); // insert table helperDb.ExecuteScript(serverDbName, datas); var serverProvider = new SqlSyncProvider(ServerConnectionString); var clientProvider = new SQLiteSyncProvider(ClientSQLiteFilePath); var simpleConfiguration = new SyncConfiguration(Tables); Agent = new SyncAgent(clientProvider, serverProvider, simpleConfiguration); }
public async Task <int> OnExecuteAsync(CommandLineApplication app) { var serverOrchestrator = new WebClientOrchestrator(ApiOptions.SyncAddressOld); // Second provider is using plain old Sql Server provider, relying on triggers and tracking tables to create the sync environment var connectionString = Configuration.GetConnectionString(ProviderType.Sql, "Client"); var clientProvider = new SqlSyncProvider(connectionString); try { if (Scope == ScopeNames.Logs) { await SyncServices.SynchronizeLogsAsync(serverOrchestrator, clientProvider, new SyncOptions(), Reinitialize); } else { await SyncServices.SynchronizeDefaultAsync(serverOrchestrator, clientProvider, new SyncOptions(), Reinitialize); } } catch (Exception ex) { Console.WriteLine(ex.Message); return(0); } return(1); }
/// <summary> /// Create the tables in each database /// Add some datas in each database /// Performs an optional DeprovisionAsync, to be sure we are starting from scratch /// </summary> private static async Task SetupDatabasesAsync(SqlSyncProvider serverProvider, SqliteSyncProvider clientProvider) { // Add some datas in both var serverConnection = serverProvider.CreateConnection(); await Helper.CreateSqlServerServiceTicketsTableAsync(serverConnection); var clientConnection = clientProvider.CreateConnection(); await Helper.CreateSqliteServiceTicketsTableAsync(clientConnection); // Creating an agent that will handle all the process var agent = new SyncAgent(clientProvider, serverProvider); // Be sure we don't have an already existing sync setup. (from previous run) await agent.LocalOrchestrator.DropAllAsync(); await agent.RemoteOrchestrator.DropAllAsync(); // Be sure we don't have existing rows (from previous run) await Helper.DropRowsAsync(serverConnection); await Helper.DropRowsAsync(clientConnection); // Add rows await Helper.AddRowsAsync(serverConnection); await Helper.AddRowsAsync(clientConnection); }
public async Task TrackingTable_Exists() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var options = new SyncOptions(); var setup = new SyncSetup(new string[] { "SalesLT.Product", "SalesLT.ProductCategory" }); setup.TrackingTablesPrefix = "t_"; setup.TrackingTablesSuffix = "_t"; var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup); await localOrchestrator.CreateTrackingTableAsync(setup.Tables[0]); var exists = await localOrchestrator.ExistTrackingTableAsync(setup.Tables[0]); Assert.True(exists); exists = await localOrchestrator.ExistTrackingTableAsync(setup.Tables[1]); Assert.False(exists); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task LocalOrchestrator_BeginSession_ShouldIncrement_SyncStage() { var options = new SyncOptions(); var setup = new SyncSetup(); var provider = new SqlSyncProvider(); var onSessionBegin = false; var localOrchestrator = new LocalOrchestrator(provider, options, setup); var ctx = localOrchestrator.GetContext(); localOrchestrator.OnSessionBegin(args => { Assert.Equal(SyncStage.BeginSession, args.Context.SyncStage); Assert.IsType <SessionBeginArgs>(args); Assert.Null(args.Connection); Assert.Null(args.Transaction); onSessionBegin = true; }); await localOrchestrator.BeginSessionAsync(); Assert.Equal(SyncStage.BeginSession, ctx.SyncStage); Assert.True(onSessionBegin); }
public async Task TrackingTable_Drop_All() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var options = new SyncOptions(); var setup = new SyncSetup(new string[] { "SalesLT.ProductCategory", "SalesLT.ProductModel", "SalesLT.Product", "dbo.Sql", "Posts" }); setup.TrackingTablesPrefix = "t_"; setup.TrackingTablesSuffix = "_t"; var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup); var onDropping = 0; var onDropped = 0; localOrchestrator.OnTrackingTableDropping(ttca => onDropping++); localOrchestrator.OnTrackingTableDropped(ttca => onDropped++); await localOrchestrator.CreateTrackingTablesAsync(); await localOrchestrator.DropTrackingTablesAsync(); Assert.Equal(5, onDropping); Assert.Equal(5, onDropped); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
private static async Task SynchronizeAsync() { // Database script used for this sample : https://github.com/Mimetis/Dotmim.Sync/blob/master/CreateAdventureWorks.sql var serverProvider = new SqlSyncProvider(serverConnectionString); // Tables involved in the sync process: var tables = new string[] { "ProductCategory", "ProductModel", "Product", "Address", "Customer", "CustomerAddress", "SalesOrderHeader", "SalesOrderDetail" }; // First DownloadOnly provider for Sqlite var clientSqliteProvider = new SqliteSyncDownloadOnlyProvider("adv.db"); // Second DownloadOnly provider for SqlServer var clientSqlServerProvider = new SqlSyncDownloadOnlyProvider(clientConnectionString); do { // Creating an agent that will handle all the process var agentSqlite = new SyncAgent(clientSqliteProvider, serverProvider); var sqliteResults = await agentSqlite.SynchronizeAsync(tables); Console.WriteLine(sqliteResults); var agentSqlServer = new SyncAgent(clientSqlServerProvider, serverProvider); var sqlServerResults = await agentSqlServer.SynchronizeAsync(tables); Console.WriteLine(sqlServerResults); } while (Console.ReadKey().Key != ConsoleKey.Escape); Console.WriteLine("End"); }
public async Task SyncThroughHttp() { using (var server = new KestrellTestServer()) { var serverHandler = new RequestDelegate(async context => { SqlSyncProvider serverProvider = new SqlSyncProvider(this.fixture.ServerConnectionString); SyncConfiguration configuration = new SyncConfiguration(this.fixture.Tables); configuration.DownloadBatchSizeInKB = 500; WebProxyServerProvider proxyServerProvider = new WebProxyServerProvider(serverProvider); proxyServerProvider.Configuration = configuration; await proxyServerProvider.HandleRequestAsync(context); }); var clientHandler = new ResponseDelegate(async(serviceUri) => { var proxyProvider = new WebProxyClientProvider(new Uri(serviceUri)); var clientProvider = new SqlSyncProvider(this.fixture.Client1ConnectionString); SyncAgent agent = new SyncAgent(clientProvider, proxyProvider); var session = await agent.SynchronizeAsync(); Assert.Equal(5, session.TotalChangesDownloaded); Assert.Equal(0, session.TotalChangesUploaded); }); await server.Run(serverHandler, clientHandler); } }
public async Task BaseOrchestrator_Provision_SchemaNotCreated_If_SetupHasTables_AndDbIsEmpty() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var scopeName = "scope"; var options = new SyncOptions(); var setup = new SyncSetup(new string[] { "SalesLT.Product" }); var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup, scopeName); var provision = SyncProvision.Table | SyncProvision.TrackingTable | SyncProvision.StoredProcedures | SyncProvision.Triggers; var se = await Assert.ThrowsAsync <SyncException>(async() => await localOrchestrator.ProvisionAsync(provision)); Assert.Equal(SyncStage.Provisioning, se.SyncStage); Assert.Equal(SyncSide.ClientSide, se.Side); Assert.Equal("MissingTableException", se.TypeName); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task Initialize() { serverProvider = new SqlSyncProvider(fixture.ServerConnectionString); clientProvider = new SqlSyncProvider(fixture.Client1ConnectionString); var simpleConfiguration = new SyncConfiguration(new[] { "ProductCategory", "ProductDescription", "ProductModel", "ProductModelProductDescription", "Product" }); agent = new SyncAgent(clientProvider, serverProvider, simpleConfiguration); var session = await agent.SynchronizeAsync(); Assert.Equal(7, session.TotalChangesDownloaded); Assert.Equal(0, session.TotalChangesUploaded); int fkeysCount = 0; using (var sqlConnection = new SqlConnection(fixture.Client1ConnectionString)) { using (var sqlCmd = new SqlCommand("select count(*) from sys.foreign_keys", sqlConnection)) { sqlConnection.Open(); fkeysCount = (int)sqlCmd.ExecuteScalar(); sqlConnection.Close(); } } Assert.Equal(5, fkeysCount); }
public async Task BaseOrchestrator_GetSchema_NonExistingTables_ShouldFail() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var options = new SyncOptions(); // Create a bad setup with a non existing table var tables = new string[] { "SalesLT.ProductCategory", "SalesLT.ProductModel", "SalesLT.Product", "Employee", "Customer", "Address", "CustomerAddress", "EmployeeAddress", "SalesLT.SalesOrderHeader", "SalesLT.SalesOrderDetail", "Posts", "Tags", "PostTag", "PricesList", "PricesListCategory", "PricesListDetail", "WRONGTABLE" }; var setup = new SyncSetup(tables); var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup); var se = await Assert.ThrowsAsync <SyncException>(async() => { var schema = await localOrchestrator.GetSchemaAsync(); }); Assert.Equal(SyncStage.SchemaReading, se.SyncStage); Assert.Equal(SyncSide.ClientSide, se.Side); Assert.Equal("MissingTableException", se.TypeName); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task BaseOrchestrator_Provision_SchemaCreated_If_SetupHasTables() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var scopeName = "scope"; var options = new SyncOptions(); var setup = new SyncSetup(new string[] { "SalesLT.Product" }); var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup, scopeName); var provision = SyncProvision.Table | SyncProvision.TrackingTable | SyncProvision.StoredProcedures | SyncProvision.Triggers; var schema = await localOrchestrator.ProvisionAsync(provision); var context = localOrchestrator.GetContext(); Assert.Equal(SyncStage.Provisioning, context.SyncStage); Assert.Single(schema.Tables); Assert.Equal("SalesLT.Product", schema.Tables[0].GetFullName()); Assert.Equal(17, schema.Tables[0].Columns.Count); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task BaseOrchestrator_GetSchema_SetupColumnsDefined_ShouldReturn_SchemaWithSetupColumnsOnly() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var options = new SyncOptions(); // Create a bad setup with a non existing table var tables = new string[] { "Customer", "Address", "CustomerAddress" }; var setup = new SyncSetup(tables); setup.Tables["Customer"].Columns.AddRange(new string[] { "CustomerID", "FirstName", "LastName", "CompanyName" }); var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup); var schema = await localOrchestrator.GetSchemaAsync(); Assert.Equal(SyncStage.SchemaReading, localOrchestrator.GetContext().SyncStage); Assert.Equal(3, schema.Tables.Count); // Only 4 columns shoud be part of Customer table Assert.Equal(4, schema.Tables["Customer"].Columns.Count); Assert.Equal(9, schema.Tables["Address"].Columns.Count); Assert.Equal(5, schema.Tables["CustomerAddress"].Columns.Count); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task BaseOrchestrator_GetSchema_NonExistingColumns_ShouldFail() { var dbName = HelperDatabase.GetRandomName("tcp_lo_"); await HelperDatabase.CreateDatabaseAsync(ProviderType.Sql, dbName, true); var cs = HelperDatabase.GetConnectionString(ProviderType.Sql, dbName); var sqlProvider = new SqlSyncProvider(cs); var ctx = new AdventureWorksContext((dbName, ProviderType.Sql, sqlProvider), true, false); await ctx.Database.EnsureCreatedAsync(); var options = new SyncOptions(); // Create a bad setup with a non existing table var tables = new string[] { "Customer", "Address", "CustomerAddress" }; var setup = new SyncSetup(tables); setup.Tables["Customer"].Columns.AddRange(new string[] { "FirstName", "LastName", "CompanyName", "BADCOLUMN" }); var localOrchestrator = new LocalOrchestrator(sqlProvider, options, setup); var se = await Assert.ThrowsAsync <SyncException>(async() => { var schema = await localOrchestrator.GetSchemaAsync(); }); Assert.Equal(SyncStage.SchemaReading, se.SyncStage); Assert.Equal(SyncSide.ClientSide, se.Side); Assert.Equal("MissingColumnException", se.TypeName); HelperDatabase.DropDatabase(ProviderType.Sql, dbName); }
public async Task Test2_SqlServer_SqlServer() { using (var localDb = new SqlServerBlogDbContext(ConnectionString + ";Initial Catalog=Test2_Local")) using (var remoteDb = new SqlServerBlogDbContext(ConnectionString + ";Initial Catalog=Test2_Remote")) { await localDb.Database.EnsureDeletedAsync(); await remoteDb.Database.EnsureDeletedAsync(); await localDb.Database.MigrateAsync(); await remoteDb.Database.MigrateAsync(); var remoteConfigurationBuilder = new SqlSyncConfigurationBuilder(remoteDb.ConnectionString) .Table("Users") .Table("Posts") .Table("Comments"); var remoteSyncProvider = new SqlSyncProvider(remoteConfigurationBuilder.Configuration); var localConfigurationBuilder = new SqlSyncConfigurationBuilder(localDb.ConnectionString) .Table("Users") .Table("Posts") .Table("Comments"); var localSyncProvider = new SqlSyncProvider(localConfigurationBuilder.Configuration); await Test2(localDb, localSyncProvider, remoteDb, remoteSyncProvider); } }
private static async Task DeprovisionClientManuallyAsync() { // Create client provider var clientProvider = new SqlSyncProvider(clientConnectionString); // Create standard Setup and Options var setup = new SyncSetup(new string[] { "Address", "Customer", "CustomerAddress" }); var options = new SyncOptions(); // Create a local orchestrator used to Deprovision everything var localOrchestrator = new LocalOrchestrator(clientProvider, options, setup); // Get the local scope var clientScope = await localOrchestrator.GetClientScopeAsync(); // Deprovision everything await localOrchestrator.DeprovisionAsync(SyncProvision.StoredProcedures | SyncProvision.Triggers | SyncProvision.TrackingTable | SyncProvision.Table); // affect good values clientScope.Setup = null; clientScope.Schema = null; // save the local scope await localOrchestrator.SaveClientScopeAsync(clientScope); }
private static async Task DeprovisionServerManuallyAsync() { // Create server provider var serverProvider = new SqlSyncProvider(serverConnectionString); // Create standard Setup and Options var setup = new SyncSetup(new string[] { "Address", "Customer", "CustomerAddress" }); var options = new SyncOptions(); // Create a server orchestrator used to Deprovision everything on the server side var remoteOrchestrator = new RemoteOrchestrator(serverProvider, options, setup); // Get the server scope var serverScope = await remoteOrchestrator.GetServerScopeAsync(); // Deprovision everything await remoteOrchestrator.DeprovisionAsync(SyncProvision.StoredProcedures | SyncProvision.Triggers | SyncProvision.TrackingTable); // Affect good values serverScope.Setup = null; serverScope.Schema = null; // save the server scope await remoteOrchestrator.SaveServerScopeAsync(serverScope); }
private static async Task SynchronizeAsync() { // Database script used for this sample : https://github.com/Mimetis/Dotmim.Sync/blob/master/CreateAdventureWorks.sql // Create a web proxy Orchesrtrator with a custom serializer var serverProxyOrchestrator = new WebClientOrchestrator("https://localhost:44342/api/sync") { SerializerFactory = new CustomMessagePackSerializerFactory(), Converter = new CustomConverter() }; // Second provider is using plain old Sql Server provider, relying on triggers and tracking tables to create the sync environment var clientProvider = new SqlSyncProvider(clientConnectionString); var progress = new SynchronousProgress <ProgressArgs>(args => Console.WriteLine($"{args.Context.SyncStage}:\t{args.Message}")); // Creating an agent that will handle all the process var agent = new SyncAgent(clientProvider, serverProxyOrchestrator); do { // Launch the sync process var s1 = await agent.SynchronizeAsync(progress); // Write results Console.WriteLine(s1); } while (Console.ReadKey().Key != ConsoleKey.Escape); Console.WriteLine("End"); }
public bool Sync(Guid clientId, string clientConnectionString) { try { string serverConnectionString = ConfigurationSettings.AppSettings["ServerConnectionString"].ToString(); string scopeName = "ClientScope-" + clientId; SqlSyncProvider serverProvider = synchronizationHelper.ConfigureSqlSyncProvider(scopeName, serverConnectionString, clientId); SqlSyncProvider destinationProvider = new SqlSyncProvider(); SqlConnection cn = new SqlConnection(); cn.ConnectionString = clientConnectionString; destinationProvider.Connection = cn; destinationProvider.ScopeName = scopeName; SqlSyncProvider destinationProxy = new SqlSyncProvider( scopeName, ((SqlConnection)destinationProvider.Connection)); serverProvider.MemoryDataCacheSize = 100000; if (!string.IsNullOrEmpty(this.batchSpoolLocation)) { serverProvider.BatchingDirectory = this.batchSpoolLocation; destinationProxy.BatchingDirectory = this.batchSpoolLocation; } SyncOperationStatistics statistics = synchronizationHelper.SynchronizeProviders(scopeName, clientConnectionString, serverConnectionString, serverProvider, destinationProxy); TimeSpan diff = statistics.SyncEndTime.Subtract(statistics.SyncStartTime); destinationProxy.Dispose(); this.syncStats = string.Format("Batching: {4} - Total Time To Synchronize = {0}:{1}:{2}:{3}", diff.Hours, diff.Minutes, diff.Seconds, diff.Milliseconds, (this._batchSize > 0) ? "Enabled" : "Disabled"); return true; } catch (FaultException ex) { return false; } }
/// <summary> /// Configure the SqlSyncprovider. Note that this method assumes you have a direct conection /// to the server as this is more of a design time use case vs. runtime use case. We think /// of provisioning the server as something that occurs before an application is deployed whereas /// provisioning the client is somethng that happens during runtime (on intitial sync) after the /// application is deployed. /// /// </summary> /// <param name="hostName"></param> /// <returns></returns> public SqlSyncProvider ConfigureSqlSyncProvider(string ScopeName, string serverConnectionString, Guid clientId) { SqlSyncProvider provider = new SqlSyncProvider(); provider.ScopeName = ScopeName; provider.Connection = new SqlConnection(); provider.Connection.ConnectionString = serverConnectionString; DbSyncScopeDescription scopeDesc = new DbSyncScopeDescription(ScopeName); SqlSyncScopeProvisioning serverConfig = new SqlSyncScopeProvisioning((System.Data.SqlClient.SqlConnection)provider.Connection); if (!serverConfig.ScopeExists(ScopeName)) { serverConfig.ObjectSchema = "dbo."; scopeDesc.Tables.Add(SqlSyncDescriptionBuilder.GetDescriptionForTable("Client", (System.Data.SqlClient.SqlConnection)provider.Connection)); serverConfig.PopulateFromScopeDescription(scopeDesc); //indicate that the base table already exists and does not need to be created serverConfig.SetCreateTableDefault(DbSyncCreationOption.Skip); serverConfig.Tables["Client"].AddFilterColumn("ClientId"); serverConfig.Tables["Client"].FilterClause = "[side].[ClientId] = '" + clientId + "'"; //Create new selectchanges procedure for our scope serverConfig.SetCreateProceduresForAdditionalScopeDefault(DbSyncCreationOption.Create); //provision the server serverConfig.Apply(); } //Register the BatchSpooled and BatchApplied events. These are fired when a provider is either enumerating or applying changes in batches. provider.BatchApplied += new EventHandler<DbBatchAppliedEventArgs>(provider_BatchApplied); provider.BatchSpooled += new EventHandler<DbBatchSpooledEventArgs>(provider_BatchSpooled); return provider; }
private static void Main(string[] args) { SyncOrchestrator sync = new SyncOrchestrator(); string scopeName = "test"; SqlConnection localData = new SqlConnection(@"Data Source=nipun;Initial Catalog=ClientData;Integrated Security=True;"); SqlConnection serverData = new SqlConnection(@"Data Source=nipun;Initial Catalog=ServerData;Integrated Security=True;"); SqlSyncProvider localProvider = new SqlSyncProvider(scopeName, localData); SqlSyncProvider serverProvider = new SqlSyncProvider(scopeName, serverData); SqlSyncScopeProvisioning scopeProvisionLocal = new SqlSyncScopeProvisioning(localData); if (!scopeProvisionLocal.ScopeExists(scopeName)) { DbSyncScopeDescription scopeDesc = new DbSyncScopeDescription(scopeName); scopeDesc.Tables.Add(SqlSyncDescriptionBuilder.GetDescriptionForTable("abc", localData)); scopeProvisionLocal.PopulateFromScopeDescription(scopeDesc); scopeProvisionLocal.SetCreateTableDefault(DbSyncCreationOption.Skip); scopeProvisionLocal.Apply(); } SqlSyncScopeProvisioning scopeProvisionRemote = new SqlSyncScopeProvisioning(serverData); if (!scopeProvisionRemote.ScopeExists(scopeName)) { DbSyncScopeDescription scopeDesc = SqlSyncDescriptionBuilder.GetDescriptionForScope(scopeName, localData); scopeProvisionRemote.PopulateFromScopeDescription(scopeDesc); scopeProvisionRemote.Apply(); } SqlSyncScopeProvisioning romve = new SqlSyncScopeProvisioning(localData); sync.LocalProvider = localProvider; sync.RemoteProvider = serverProvider; SyncOperationStatistics stats = sync.Synchronize(); Console.WriteLine("Update Data:\t\t {0}", stats.UploadChangesApplied); Console.WriteLine("Update Data ChangesFailed:\t\t {0}", stats.UploadChangesFailed); Console.WriteLine("Update Data Changes:\t\t {0}", stats.UploadChangesTotal); Console.ReadLine(); }
/// <summary> /// Get changes for a client using the knowledge that is passed in. /// </summary> /// <param name="serverBlob">Client knowledge as byte[]</param> /// <returns>Response containing the new knowledge and the list of changes.</returns> public GetChangesResponse GetChanges(byte[] serverBlob) { bool isNewClient = false; var response = new GetChangesResponse(); var syncBlob = new SyncBlob(); byte[] clientKnowledgeBlob = null; // If the incoming knowledge blob is null, then we need to initialize a new scope // for this request. if (null == serverBlob || 0 == serverBlob.Length) { // Create a new Guid and use that as the client Id. Guid clientId = Guid.NewGuid(); _clientScopeName = String.Format(CultureInfo.InvariantCulture, "{0}_{1}", _scopeName, clientId); _clientSyncId = new SyncId(clientId); CreateNewScopeForClient(); isNewClient = true; syncBlob.ClientScopeName = clientId.ToString(); } else { SyncBlob incomingBlob = SyncBlob.DeSerialize(serverBlob); PopulateClientScopeNameAndSyncId(incomingBlob); syncBlob.ClientScopeName = incomingBlob.ClientScopeName; clientKnowledgeBlob = incomingBlob.ClientKnowledge; if (null != incomingBlob.BatchCode && null != incomingBlob.NextBatch) { // This is a batched request, so handle it separately. return GetChanges(incomingBlob.ClientKnowledge, incomingBlob.BatchCode.Value, incomingBlob.NextBatch.Value); } } // Intialize a SqlSyncProvider object. _sqlSyncProvider = CreateSqlSyncProviderInstance(_clientScopeName, _serverConnectionString, _configuration.SyncObjectSchema); var sessionContext = new SyncSessionContext(_sqlSyncProvider.IdFormats, new SyncCallbacks()); _sqlSyncProvider.BeginSession(SyncProviderPosition.Remote, sessionContext); try { // Get the SyncKnowledge from the blob. If the blob is null, initialize a default SyncKnowledge object. SyncKnowledge clientKnowledge = GetSyncKnowledgeFromBlob(clientKnowledgeBlob); DbSyncContext dbSyncContext; uint changeBatchSize = (_configuration.IsBatchingEnabled) ? (uint)_configuration.DownloadBatchSizeInKB : 0; RowSorter rowSorter = null; do { object changeDataRetriever; // Get the next batch. _sqlSyncProvider.GetChangeBatch(changeBatchSize, clientKnowledge, out changeDataRetriever); dbSyncContext = (DbSyncContext)changeDataRetriever; // Only initialize the RowSorter, if the data is batched. if (null == rowSorter && _configuration.IsBatchingEnabled) { // Clone the client knowledge. var clonedClientKnowledge = clientKnowledge.Clone(); // Combine with the MadeWithKnowledge of the server. clonedClientKnowledge.Combine(dbSyncContext.MadeWithKnowledge); // Use the new knowledge and get and instance of the RowSorter class. rowSorter = GetRowSorter(clonedClientKnowledge); } // Remove version information from the result dataset. RemoveSyncVersionColumns(dbSyncContext.DataSet); // For a new client, we don't want to send tombstones. This will reduce amount of data // transferred and the client doesn't care about tombstones anyways. if (isNewClient) { RemoveTombstoneRowsFromDataSet(dbSyncContext.DataSet); } // Add the dataset to the row sorter. Only use this if batching is enabled. if (_configuration.IsBatchingEnabled) { rowSorter.AddUnsortedDataSet(dbSyncContext.DataSet); // Delete the batch file generated by the provider, since we have read it. // Otherwise we will keep accumulating files which are not needed. if (!String.IsNullOrEmpty(dbSyncContext.BatchFileName) && File.Exists(dbSyncContext.BatchFileName)) { File.Delete(dbSyncContext.BatchFileName); } } } while (!dbSyncContext.IsLastBatch && dbSyncContext.IsDataBatched); List<IOfflineEntity> entities; if (_configuration.IsBatchingEnabled) { // If batching is enabled. Batch batch = SaveBatchesAndReturnFirstBatch(rowSorter); if (null == batch) { entities = new List<IOfflineEntity>(); } else { // Conver to to entities. entities = _converter.ConvertDataSetToEntities(batch.Data); //Only combine the knowledge of this batch. clientKnowledge.Combine(SyncKnowledge.Deserialize(_sqlSyncProvider.IdFormats, batch.LearnedKnowledge)); response.IsLastBatch = batch.IsLastBatch; syncBlob.IsLastBatch = batch.IsLastBatch; if (batch.IsLastBatch) { syncBlob.NextBatch = null; syncBlob.BatchCode = null; } else { syncBlob.NextBatch = batch.NextBatch; syncBlob.BatchCode = batch.BatchCode; } } } else { // No batching. response.IsLastBatch = true; entities = _converter.ConvertDataSetToEntities(dbSyncContext.DataSet); // combine the client and the server knowledge. // the server may have an updated knowledge from the last time the client sync'd. clientKnowledge.Combine(dbSyncContext.MadeWithKnowledge); } // Save data in the response object. syncBlob.ClientKnowledge = clientKnowledge.Serialize(); response.ServerBlob = syncBlob.Serialize(); response.EntityList = entities; } finally { _sqlSyncProvider.EndSession(sessionContext); } return response; }
private void syncButton_Click(object sender, EventArgs e) { using (SqlSyncProvider masterProvider = new SqlSyncProvider { ScopeName = this.scopeNameTextBox.Text }, slaveProvider = new SqlSyncProvider { ScopeName = this.scopeNameTextBox.Text }) { using (SqlConnection master = new SqlConnection(Settings.Default.MasterConnectionString), slave = new SqlConnection(Settings.Default.SlaveConnectionString)) { string masterScopeConfig; string slaveScopeConfig; using (SqlCommand command = master.CreateCommand()) { master.Open(); command.CommandText = string.Format("SELECT scope_config.config_data FROM scope_config INNER JOIN scope_info ON scope_config.config_id = scope_info.scope_config_id WHERE scope_info.sync_scope_name = N'{0}'", TableName); masterScopeConfig = command.ExecuteScalar() as string; master.Close(); } using (SqlCommand command = slave.CreateCommand()) { slave.Open(); command.CommandText = string.Format("SELECT scope_config.config_data FROM scope_config INNER JOIN scope_info ON scope_config.config_id = scope_info.scope_config_id WHERE scope_info.sync_scope_name = N'{0}'", TableName); slaveScopeConfig = command.ExecuteScalar() as string; slave.Close(); } if (masterScopeConfig != slaveScopeConfig) { MessageBox.Show("The master scope does not match the slave scope", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error); return; } masterProvider.Connection = master; slaveProvider.Connection = slave; SyncOrchestrator orchestrator = new SyncOrchestrator { LocalProvider = slaveProvider, RemoteProvider = masterProvider, Direction = SyncDirectionOrder.UploadAndDownload }; try { SyncOperationStatistics stats = orchestrator.Synchronize(); MessageBox.Show("Downloaded: " + stats.DownloadChangesTotal + "Uploaded: " + stats.UploadChangesApplied, "Complete", MessageBoxButtons.OK, MessageBoxIcon.Information); } catch (Exception ex) { MessageBox.Show(ex.Message, "Sync Error", MessageBoxButtons.OK, MessageBoxIcon.Error); } } } }
private SqlSyncProvider ObtenerProveedor(String nombreDeAmbito, SqlConnection conexionSql, uint tamañoDeCache, uint tamañoTransaccion) { SqlSyncProvider proveedor = new SqlSyncProvider(nombreDeAmbito, conexionSql, this.prefijoMetadataSyncFramework, this.esquemaMetadataSyncFramework); if (tamañoDeCache > 0) { proveedor.MemoryDataCacheSize = tamañoDeCache; //KB --> los archivos de cache se guardan en BatchingDirectory (%tmp% por default) proveedor.ApplicationTransactionSize = tamañoTransaccion; } proveedor.Connection = conexionSql; proveedor.ApplyChangeFailed += new EventHandler<DbApplyChangeFailedEventArgs>(proveedor_ApplyChangeFailed); //Este tiene logica importante, no quitar. if (this.SuscribirATodosLosEventos) { proveedor.BatchApplied += new EventHandler<DbBatchAppliedEventArgs>(Proveedor_BatchApplied); proveedor.BatchSpooled += new EventHandler<DbBatchSpooledEventArgs>(Proveedor_BatchSpooled); proveedor.ChangesSelected += new EventHandler<DbChangesSelectedEventArgs>(proveedor_ChangesSelected); proveedor.ApplyingChanges += new EventHandler<DbApplyingChangesEventArgs>(proveedor_ApplyingChanges); proveedor.ApplyMetadataFailed += new EventHandler<ApplyMetadataFailedEventArgs>(proveedor_ApplyMetadataFailed); proveedor.ChangesApplied += new EventHandler<DbChangesAppliedEventArgs>(proveedor_ChangesApplied); proveedor.DbConnectionFailure += new EventHandler<DbConnectionFailureEventArgs>(proveedor_DbConnectionFailure); proveedor.SelectingChanges += new EventHandler<DbSelectingChangesEventArgs>(proveedor_SelectingChanges); proveedor.SyncPeerOutdated += new EventHandler<DbOutdatedEventArgs>(proveedor_SyncPeerOutdated); proveedor.SyncProgress += new EventHandler<DbSyncProgressEventArgs>(proveedor_SyncProgress); } return proveedor; }
/// <summary> /// Create a new instance of the SqlSyncProvider class. /// </summary> /// <param name="clientScopeName">Scope name</param> /// <param name="serverConnectionString">Connection string</param> /// <param name="syncObjectSchema">Schema for sync objects</param> /// <returns>Instance of <see cref="SqlSyncProvider" /> class.</returns> private static SqlSyncProvider CreateSqlSyncProviderInstance(string clientScopeName, string serverConnectionString, string syncObjectSchema) { var sqlSyncProvider = new SqlSyncProvider(clientScopeName, new SqlConnection(serverConnectionString)); if (!String.IsNullOrEmpty(syncObjectSchema)) { sqlSyncProvider.ObjectSchema = syncObjectSchema; } return sqlSyncProvider; }
/// <summary> /// Gets the next batch of changes for a client. /// </summary> /// <param name="serverBlob">Client knowledge as byte[]</param> /// <param name="batchCode">batchcode for the batch</param> /// <param name="nextBatchSequenceNumber">Sequence number of the next batch</param> /// <returns>Response containing the new knowledge and the list of changes.</returns> private GetChangesResponse GetChanges(byte[] serverBlob, Guid batchCode, Guid nextBatchSequenceNumber) { WebUtil.CheckArgumentNull(serverBlob, "clientKnowledgeBlob"); // Get the next batch using the batch handler implementation. Batch batch = _batchHandler.GetNextBatch(batchCode, nextBatchSequenceNumber); if (null == batch) { // Since we did'nt get a batch, default to the full get changes call. return GetChanges(serverBlob); } // Intialize a SqlSyncProvider object. _sqlSyncProvider = CreateSqlSyncProviderInstance(_clientScopeName, _serverConnectionString, _configuration.SyncObjectSchema); SyncKnowledge clientKnowledge = GetSyncKnowledgeFromBlob(serverBlob); List<IOfflineEntity> entities = _converter.ConvertDataSetToEntities(batch.Data); //Only combine the knowledge of this batch. clientKnowledge.Combine(SyncKnowledge.Deserialize(_sqlSyncProvider.IdFormats, batch.LearnedKnowledge)); var syncBlob = new SyncBlob { ClientScopeName = _clientSyncId.GetGuidId().ToString(), ClientKnowledge = clientKnowledge.Serialize(), BatchCode = batch.BatchCode, IsLastBatch = batch.IsLastBatch, NextBatch = batch.NextBatch }; // Save data in the response object. var response = new GetChangesResponse { EntityList = entities, IsLastBatch = batch.IsLastBatch, ServerBlob = syncBlob.Serialize() }; return response; }
/// <summary> /// Create a SqlSyncProvider instance without provisioning its database. /// </summary> /// <param name="sqlConnection"></param> /// <returns></returns> public SqlSyncProvider ConfigureSqlSyncProvider(string ScopeName, SqlConnection sqlConnection) { SqlSyncProvider provider = new SqlSyncProvider(); //Set the scope name provider.ScopeName = ScopeName; //Set the connection. provider.Connection = sqlConnection; //Register event handlers //1. Register the BatchSpooled and BatchApplied events. These are fired when a provider is either enumerating or applying changes in batches. provider.BatchApplied += new EventHandler<DbBatchAppliedEventArgs>(provider_BatchApplied); provider.BatchSpooled += new EventHandler<DbBatchSpooledEventArgs>(provider_BatchSpooled); //Thats it. We are done configuring the SQL provider. return provider; }
/// <summary> /// Sync a single scope from the client to the server. /// </summary> /// <param name="server">Provider for the server.</param> /// <param name="client">Provider for the client.</param> /// <param name="tablename">The able name to sync.</param> public static SyncOperationStatistics syncscope(SqlConnection server, SqlConnection client, string scope, SyncDirectionOrder order, Action<object, DbApplyingChangesEventArgs> callback, Action<object, DbApplyingChangesEventArgs> mastercallback) { // If we are only doing a download and the scope on the database // is out of date then we need to reprovision the data, but for now just // error. if (order == SyncDirectionOrder.Download && ScopesDiffer(server, client, scope)) { ProgressUpdate("Scope has changed on server. Reprovisoning client"); Provisioning.ProvisionTable(server, client, scope, true); } else if (order != SyncDirectionOrder.Download && ScopesDiffer(server, client, scope)) { throw new DbSyncException("Can not sync twoway tables with changed scopes"); } using (SqlSyncProvider masterProvider = new SqlSyncProvider(scope, server), slaveProvider = new SqlSyncProvider(scope, client)) { SyncOrchestrator orchestrator = new SyncOrchestrator { LocalProvider = slaveProvider, RemoteProvider = masterProvider, Direction = order }; slaveProvider.ApplyingChanges += new EventHandler<DbApplyingChangesEventArgs>(callback); masterProvider.ApplyingChanges += new EventHandler<DbApplyingChangesEventArgs>(mastercallback); slaveProvider.ApplyChangeFailed += slaveProvider_ApplyChangeFailed; return orchestrator.Synchronize(); } }
/// <summary> /// Check to see if the passed in SqlSyncProvider needs Schema from server /// </summary> /// <param name="localProvider"></param> private void CheckIfProviderNeedsSchema(SqlSyncProvider localProvider, string serverConnectionString) { if (localProvider != null) { SqlConnection conn = (SqlConnection)localProvider.Connection; SqlSyncScopeProvisioning sqlConfig = new SqlSyncScopeProvisioning(conn); string scopeName = localProvider.ScopeName; if (!sqlConfig.ScopeExists(scopeName)) { SqlSyncProviderProxy serverProxy = new SqlSyncProviderProxy(scopeName, serverConnectionString); DbSyncScopeDescription scopeDesc = serverProxy.GetScopeDescription(scopeName, serverConnectionString); serverProxy.Dispose(); sqlConfig.PopulateFromScopeDescription(scopeDesc); sqlConfig.Apply(); } } }