public static async Task TestGenarate() { try { Console.WriteLine("RecurringJobTestHangFire_Master Running.. at " + DateTime.Now.ToString() + " " + TimeZoneInfo.Local.ToString()); var repSync = new SyncRepository(UtilsProvider.HostingEnvironment, UtilsProvider.Config); await repSync.GenerateReport(new CustomModel.ParamReportModel { UnitNo = "C01", TDefectId = 44489, ProjectCode = "10060", ProjectType = "H" }); } catch (Exception ex) { while (ex.InnerException != null) { ex = ex.InnerException; } var ilog = UtilsProvider.ApplicationLogging.CreateLogger <Startup>(); ilog.LogError("RecurringJobDaily_Master Error :: " + ex.Message); throw ex; } }
/// <summary> /// Fire up the repos and service context in the constructor. /// </summary> /// <param name="oAuthorization"></param> public SyncService(OAuthorizationdto oAuthorization) { dataserviceFactory = new DataserviceFactory(oAuthorization); dataService = dataserviceFactory.getDataService(); syncObjects = new Syncdto(); syncRepository = new SyncRepository(); }
public async Task ListUpstreamOnlyMaster() { using (var dir = new SelfDeletingDirectory(Path.GetFullPath(Path.Combine(basePath, nameof(this.ListUpstreamOnlyMaster))))) { var upstreamPath = Path.Combine(dir.Path, "Upstream"); var authorPath = Path.Combine(dir.Path, "Author"); var downstreamPath = Path.Combine(dir.Path, "Downstream"); Repository.Init(upstreamPath, true); Repository.Clone(upstreamPath, authorPath); using (var repo = new Repository(authorPath)) { var testFilePath = Path.Combine(dir.Path, "Author/test.txt"); File.WriteAllText(testFilePath, "Some test data."); Commands.Stage(repo, testFilePath); var signature = new Signature("Test Bot", "*****@*****.**", DateTime.Now); repo.Commit("Added test data", signature, signature); var syncRepo = new SyncRepository(repo, mockup.Get <ICommitRepository>(), mockup.Get <IGitCredentialsProvider>(), mockup.Get <IProcessRunner>()); await syncRepo.Push(); } Repository.Clone(upstreamPath, downstreamPath); using (var repo = new Repository(downstreamPath)) { var branchRepo = new BranchRepository(repo, mockup.Get <ICommitRepository>()); var branches = branchRepo.List(); Assert.Single(branches.Items); Assert.Equal("master", branches.Items.First().FriendlyName); Assert.Equal("refs/heads/master", branches.Items.First().CanonicalName); } } }
private async Task <ISyncRepository> GetSyncRepository() { var repo = new SyncRepository(LogManager.GetLogger("SyncRepository"), ServerConfigurationManager.ApplicationPaths); await repo.Initialize().ConfigureAwait(false); return(repo); }
public AuditMigrationProcessor(SyncRepository syncRepository, IGetOpenConnection targetDatabase, IConfigurationService configurationService, ILogService logService) { _logService = logService; _syncRepository = syncRepository; _auditRepository = new AuditRepository(configurationService, logService); _candidateRepository = new CandidateRepository(targetDatabase); _applicationRepository = new ApplicationRepository(targetDatabase); }
/// <summary> /// Action: OnStateElection /// Description: It is the error filter to capture any error occur while performing the data sync. If any error occured then it will update sync status to 3. /// Sync status 3 means failed. /// </summary> /// <param name="context"></param> void IElectStateFilter.OnStateElection(ElectStateContext context) { Console.WriteLine(string.Format("Job `{0}` has been changed to state `{1}`", context.BackgroundJob?.Id, context.CandidateState.Name)); //Get current state var failedState = context.CandidateState as FailedState; if (failedState != null) { Console.WriteLine(string.Format("Job `{0}` has been failed due to an exception `{1}`", context.BackgroundJob.Id, failedState.Exception)); var ccid = context.BackgroundJob.Job.Args.ElementAt(2) as string; int connectorId = (int)context.BackgroundJob.Job.Args.ElementAt(1); if (!string.IsNullOrEmpty(ccid) && connectorId > 0) { if (GC.GetTotalMemory(false) >= 67108864) { Console.WriteLine($"GC.Generation: 2, max allocated memory: {GC.GetTotalMemory(false)}"); GC.Collect(2); GC.WaitForPendingFinalizers(); GC.Collect(2); Console.WriteLine($"Max allocated memory after GC.Collect: {GC.GetTotalMemory(false)}"); } if (GC.GetTotalMemory(false) >= 33554432) { Console.WriteLine($"GC.Generation: 1, max allocated memory: {GC.GetTotalMemory(false)}"); GC.Collect(1); GC.WaitForPendingFinalizers(); GC.Collect(1); Console.WriteLine($"Max allocated memory after GC.Collect: {GC.GetTotalMemory(false)}"); } if (GC.GetTotalMemory(false) >= 20971520) { Console.WriteLine($"GC.Generation: 0, max allocated memory: {GC.GetTotalMemory(false)}"); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); Console.WriteLine($"Max allocated memory after GC.Collect: {GC.GetTotalMemory(false)}"); } //set sync status to failed{3} var connectorLogs = new ConnectorLogs() { sync_ended_at = DateTime.UtcNow, sync_logs = new List <string>() { HttpUtility.UrlEncode($"{DateTime.UtcNow:yyyy-MM-dd HH:mm:ss.fff zzz} [{LogLevel.Error}]: {failedState.Exception} {Environment.NewLine}") } }; SyncRepository.UpdateSyncInfo(id: connectorId, ccid: ccid, status: 3, connectorLogs: connectorLogs); } } }
public async Task ListUpstreamMultiple() { using (var dir = new SelfDeletingDirectory(Path.GetFullPath(Path.Combine(basePath, nameof(this.ListUpstreamMultiple))))) { var upstreamPath = Path.Combine(dir.Path, "Upstream"); var authorPath = Path.Combine(dir.Path, "Author"); var downstreamPath = Path.Combine(dir.Path, "Downstream"); var identity = new Identity("Test Bot", "*****@*****.**"); Repository.Init(upstreamPath, true); Repository.Clone(upstreamPath, authorPath); using (var repo = new Repository(authorPath)) { var testFilePath = Path.Combine(dir.Path, "Author/test.txt"); File.WriteAllText(testFilePath, "Some test data."); Commands.Stage(repo, testFilePath); var signature = new Signature(identity, DateTime.Now); repo.Commit("Added test data", signature, signature); //Sync main branch var syncRepo = new SyncRepository(repo, mockup.Get <ICommitRepository>(), mockup.Get <IGitCredentialsProvider>(), mockup.Get <IProcessRunner>()); await syncRepo.Push(); var authorBranchRepo = new BranchRepository(repo, mockup.Get <ICommitRepository>()); //Create side branch authorBranchRepo.Add("sidebranch"); authorBranchRepo.Checkout("sidebranch", new Signature(identity, DateTime.Now)); File.WriteAllText(testFilePath, "Some test data sidebranch."); Commands.Stage(repo, testFilePath); signature = new Signature(identity, DateTime.Now); repo.Commit("Added test data", signature, signature); await syncRepo.Push(); //Create another branch authorBranchRepo.Add("another"); authorBranchRepo.Checkout("another", new Signature(identity, DateTime.Now)); File.WriteAllText(testFilePath, "Some test data another."); Commands.Stage(repo, testFilePath); signature = new Signature(identity, DateTime.Now); repo.Commit("Added test data", signature, signature); await syncRepo.Push(); } Repository.Clone(upstreamPath, downstreamPath); using (var repo = new Repository(downstreamPath)) { var branchRepo = new BranchRepository(repo, mockup.Get <ICommitRepository>()); var branches = branchRepo.List(); Assert.Equal(3, branches.Items.Count()); } } }
public static void StartSync(BackgroundWorker backgroundWorker, bool forceFullSync = false) { try { LoggerUtility.InitializeLogger(); DateTime syncStartTime = DateTime.Now.AddMinutes(-15); //LoggerUtility.Logger.Info($"POS sync started at {syncStartTime.ToLongTimeString()}"); backgroundWorker?.ReportProgress(0, $"POS sync started at {syncStartTime.ToLongTimeString()}"); SyncRepository syncRepository = new SyncRepository(); CloudRepository cloudRepository = new CloudRepository(); DataTable dtEntity = cloudRepository.GetEntityData(branchinfo.BranchCounterID, "FromCloud"); foreach (DataRow entityRow in dtEntity.Rows) { string entityName = entityRow["ENTITYNAME"].ToString(); //LoggerUtility.Logger.Info($"{entityName} down sync started"); ReportText(backgroundWorker, $"{entityName} down sync started"); DataTable dtEntityWiseData = cloudRepository.GetEntityWiseData( entityName, forceFullSync ? "01-01-1900" : entityRow["SYNCDATE"] , branchinfo.BranchID); ReportText(backgroundWorker, $"Found {dtEntityWiseData.Rows.Count} records to down sync in entity : {entityName} "); syncRepository.SaveData(entityName, dtEntityWiseData); cloudRepository.UpdateEntitySyncStatus(entityRow["ENTITYSYNCSTATUSID"], syncStartTime); ReportText(backgroundWorker, $"{entityName} down sync completed"); } // start up sync dtEntity = cloudRepository.GetEntityData(branchinfo.BranchCounterID, "ToCloud"); foreach (DataRow entityRow in dtEntity.Rows) { string entityName = entityRow["ENTITYNAME"].ToString(); ReportText(backgroundWorker, $"{entityName} up sync started"); DataTable dtEntityWiseData = syncRepository.GetEntityWiseData(entityName, entityRow["SYNCDATE"]); ReportText(backgroundWorker, $"Found {dtEntityWiseData.Rows.Count} records to up sync in entity : {entityName} "); cloudRepository.SaveData(entityName, dtEntityWiseData); cloudRepository.UpdateEntitySyncStatus(entityRow["ENTITYSYNCSTATUSID"], syncStartTime); ReportText(backgroundWorker, $"{entityName} up sync completed"); } // clear old data ReportText(backgroundWorker, $"clearing one month old data"); syncRepository.ClearOldData(); //LoggerUtility.Logger.Info($"POS sync completed"); backgroundWorker?.ReportProgress(0, $"POS sync completed at {DateTime.Now.ToLongTimeString()}"); } catch (Exception ex) { XtraMessageBox.Show($"Error while running sync : {ex.Message} {Environment.NewLine} {ex.StackTrace}"); } }
/// <summary> /// Action: OnCreating /// Description: It is used to check the current connector sync is already going or not. /// If not then allow to proceed else cancel the schedule. This is the entry filter for all sync task. /// Sync status 1 means pending/ongoing /// </summary> /// <param name="context"></param> void IClientFilter.OnCreating(CreatingContext context) { Console.WriteLine(string.Format("Creating a job based on method `{0}`", context.Job.Method.Name)); var ccid = context.Job.Args.ElementAt(2) as string; int connectorId = (int)context.Job.Args.ElementAt(1); if (!string.IsNullOrEmpty(ccid) && connectorId > 0) { //Check connector status. If it is 1 then cancel the new schedule if (SyncRepository.GetSyncStatus(ccid: ccid, connectorId: connectorId) == 1) { context.Canceled = true; } } }
/// <summary> /// Action: OnPerformed /// Description: It is the final filter to used for updating sync status to 2. /// Sync status 2 means completed. /// </summary> /// <param name="context"></param> void IServerFilter.OnPerformed(PerformedContext context) { if (context.Canceled == false && context.Exception == null) { Console.WriteLine(string.Format("Job `{0}` has been performed", context.BackgroundJob?.Id)); var ccid = context.BackgroundJob.Job.Args.ElementAt(2) as string; int connectorId = (int)context.BackgroundJob.Job.Args.ElementAt(1); if (!string.IsNullOrEmpty(ccid) && connectorId > 0) { if (GC.GetTotalMemory(false) >= 67108864) { Console.WriteLine($"GC.Generation: 2, max allocated memory: {GC.GetTotalMemory(false)}"); GC.Collect(2); GC.WaitForPendingFinalizers(); GC.Collect(2); Console.WriteLine($"Max allocated memory after GC.Collect: {GC.GetTotalMemory(false)}"); } if (GC.GetTotalMemory(false) >= 33554432) { Console.WriteLine($"GC.Generation: 1, max allocated memory: {GC.GetTotalMemory(false)}"); GC.Collect(1); GC.WaitForPendingFinalizers(); GC.Collect(1); Console.WriteLine($"Max allocated memory after GC.Collect: {GC.GetTotalMemory(false)}"); } if (GC.GetTotalMemory(false) >= 20971520) { Console.WriteLine($"GC.Generation: 0, max allocated memory: {GC.GetTotalMemory(false)}"); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); Console.WriteLine($"Max allocated memory after GC.Collect: {GC.GetTotalMemory(false)}"); } //set sync status to completed{2} var connectorLogs = new ConnectorLogs() { sync_ended_at = DateTime.UtcNow, sync_logs = new List <string>() }; SyncRepository.UpdateSyncInfo(id: connectorId, ccid: ccid, status: 2, connectorLogs: connectorLogs); } } }
/// <summary> /// Action: OnPerforming /// Description: It is the third filter to cancel any jobs if sync status is not 1 while they are performing. /// </summary> /// <param name="context"></param> void IServerFilter.OnPerforming(PerformingContext context) { Console.WriteLine(string.Format("Job `{0}` has been performing", context.BackgroundJob?.Id)); if (context.Canceled == false) { var ccid = context.BackgroundJob.Job.Args.ElementAt(2) as string; int connectorId = (int)context.BackgroundJob.Job.Args.ElementAt(1); if (!string.IsNullOrEmpty(ccid) && connectorId > 0) { //Check connector status. If it is not 1 then cancel it if (SyncRepository.GetSyncStatus(ccid: ccid, connectorId: connectorId) != 1) { context.Canceled = true; } } } }
public CandidateMigrationProcessor(ICandidateMappers candidateMappers, SyncRepository syncRepository, IGenericSyncRespository genericSyncRespository, IGetOpenConnection targetDatabase, IConfigurationService configurationService, ILogService logService) { _candidateMappers = candidateMappers; _syncRepository = syncRepository; _genericSyncRespository = genericSyncRespository; _targetDatabase = targetDatabase; _logService = logService; _vacancyRepository = new VacancyRepository(targetDatabase); _localAuthorityRepository = new LocalAuthorityRepository(targetDatabase); _candidateRepository = new CandidateRepository(targetDatabase); _schoolAttendedRepository = new SchoolAttendedRepository(targetDatabase); _candidateHistoryRepository = new CandidateHistoryRepository(targetDatabase); _candidateUserRepository = new CandidateUserRepository(configurationService, _logService); _userRepository = new UserRepository(configurationService, logService); var configuration = configurationService.Get <MigrateFromFaaToAvmsPlusConfiguration>(); _anonymiseData = configuration.AnonymiseData; }
/// <summary> /// Action: OnCreated /// Description: It is the second filter to update the sync status to 1 for all jobs validated by previous filter. But jobs are not to be started yet. /// Sync status 1 means pending/ongoing. /// </summary> /// <param name="context"></param> void IClientFilter.OnCreated(CreatedContext context) { if (context.Canceled == false || context.Exception == null) { Console.WriteLine(string.Format("Job is based on method `{0}` has been created with id `{1}`", context.Job.Method.Name, context.BackgroundJob?.Id)); var ccid = context.Job.Args.ElementAt(2) as string; int connectorId = (int)context.Job.Args.ElementAt(1); string jobId = context.BackgroundJob?.Id; if (!string.IsNullOrEmpty(ccid) && connectorId > 0) { //set sync status to progress{1}. var connectorLogs = new ConnectorLogs() { sync_started_at = DateTime.UtcNow, sync_ended_at = null, sync_logs = new List <string>() }; SyncRepository.UpdateSyncInfo(id: connectorId, ccid: ccid, status: 1, count: 0, jobid: jobId, connectorLogs: connectorLogs, totaluniquecount: 0, sync_updated_count: 0, deduped_count: 0, total_records_count: 0); } } }
//public static async Task RecurringJobDaily_Master() //{ // try // { // Console.WriteLine("RecurringJobDaily_Master Running.. at " + DateTime.Now.ToString() + " " + TimeZoneInfo.Local.ToString()); // var repSync = new SyncRepository(UtilsProvider.HostingEnvironment, UtilsProvider.Config); // await repSync.ReplicateMasterWebToMobileAsync(); // } // catch (Exception ex) // { // while (ex.InnerException != null) // ex = ex.InnerException; // var ilog = UtilsProvider.ApplicationLogging.CreateLogger<Startup>(); // ilog.LogError("RecurringJobDaily_Master Error :: " + ex.Message); // throw ex; // } //} public static async Task RecurringJobTestHangFire_Master() { try { Console.WriteLine("RecurringJobTestHangFire_Master Running.. at " + DateTime.Now.ToString() + " " + TimeZoneInfo.Local.ToString()); var repSync = new SyncRepository(UtilsProvider.HostingEnvironment, UtilsProvider.Config); await repSync.TanonchaiJobSample(); } catch (Exception ex) { while (ex.InnerException != null) { ex = ex.InnerException; } var ilog = UtilsProvider.ApplicationLogging.CreateLogger <Startup>(); ilog.LogError("RecurringJobDaily_Master Error :: " + ex.Message); throw ex; } }
public MigrationProcessor(IConfigurationService configurationService, ILogService logService) { _logService = logService; var configuration = configurationService.Get <MigrateFromFaaToAvmsPlusConfiguration>(); //Ensure date precision is honoured Dapper.SqlMapper.AddTypeMap(typeof(DateTime), System.Data.DbType.DateTime2); var sourceDatabase = new GetOpenConnectionFromConnectionString(configuration.SourceConnectionString); var targetDatabase = new GetOpenConnectionFromConnectionString(configuration.TargetConnectionString); var genericSyncRespository = new GenericSyncRespository(_logService, sourceDatabase, targetDatabase); _syncRepository = new SyncRepository(targetDatabase); var applicationMappers = new ApplicationMappers(_logService); _candidateMigrationProcessor = new CandidateMigrationProcessor(new CandidateMappers(_logService), _syncRepository, genericSyncRespository, targetDatabase, configurationService, _logService); _traineeshipApplicationsMigrationProcessor = new VacancyApplicationsMigrationProcessor(new TraineeshipApplicationsUpdater(_syncRepository), applicationMappers, genericSyncRespository, sourceDatabase, targetDatabase, configurationService, _logService); _apprenticeshipApplicationsMigrationProcessor = new VacancyApplicationsMigrationProcessor(new ApprenticeshipApplicationsUpdater(_syncRepository), applicationMappers, genericSyncRespository, sourceDatabase, targetDatabase, configurationService, _logService); _auditMigrationProcessor = new AuditMigrationProcessor(_syncRepository, targetDatabase, configurationService, _logService); _logService.Info("Initialisation"); }
/// <summary> /// Obtiene de la bd los parámetros para el servicio /// </summary> /// <returns></returns> private RequestBodyContent GetBodyContent() { RequestBodyContent _GetBodyContent = null; try { SyncRepository sync = new SyncRepository(); using (var entity = new db_SeguimientoProtocolo_r2Entities()) { var res = entity.spGetMaxTableCiRegistroRecurrent().FirstOrDefault(); if (res != null) { _GetBodyContent = new RequestBodyContent() { fechaActual =(long)res.FechaInicio, //sync.GetCurrentDate(), fechaFin = (long)res.FechaFin, LastModifiedDate = (long)res.LastModifiedDate, ServerLastModifiedDate = (long)res.ServerLastModifiedDate }; } } } catch (Exception ex) { throw ex; } return _GetBodyContent; }
public async Task UpstreamWithCheckout() { using (var dir = new SelfDeletingDirectory(Path.GetFullPath(Path.Combine(basePath, nameof(this.UpstreamWithCheckout))))) { var upstreamPath = Path.Combine(dir.Path, "Upstream"); var authorPath = Path.Combine(dir.Path, "Author"); var downstreamPath = Path.Combine(dir.Path, "Downstream"); var contents = "Main Branch"; var sideContents = "Side branch"; var sideContentsRemoteChanges = "Side branch remote changes"; var identity = new Identity("Test Bot", "*****@*****.**"); Repository.Init(upstreamPath, true); Repository.Clone(upstreamPath, authorPath); using (var repo = new Repository(authorPath)) { var testFilePath = Path.Combine(dir.Path, "Author/test.txt"); //Create some test data on master File.WriteAllText(testFilePath, contents); Commands.Stage(repo, testFilePath); var sig = new Signature(identity, DateTime.Now); repo.Commit("Added test data", sig, sig); //Switch to side branch, and make update var authorBranchRepo = new BranchRepository(repo, mockup.Get <ICommitRepository>()); authorBranchRepo.Add("sidebranch"); authorBranchRepo.Checkout("sidebranch", new Signature(identity, DateTime.Now)); File.WriteAllText(testFilePath, sideContents); Commands.Stage(repo, testFilePath); sig = new Signature(identity, DateTime.Now); repo.Commit("Updated branch data", sig, sig); var syncRepo = new SyncRepository(repo, mockup.Get <ICommitRepository>(), mockup.Get <IGitCredentialsProvider>(), mockup.Get <IProcessRunner>()); //Push side branch await syncRepo.Push(); //Back to master authorBranchRepo.Checkout("master", new Signature(identity, DateTime.Now)); String masterText = File.ReadAllText(testFilePath); Assert.Equal(contents, masterText); await syncRepo.Push(); } Repository.Clone(upstreamPath, downstreamPath); using (var repo = new Repository(downstreamPath)) { var testFilePath = Path.Combine(dir.Path, "Downstream/test.txt"); var branchRepo = new BranchRepository(repo, mockup.Get <ICommitRepository>()); //First check master String masterText = File.ReadAllText(testFilePath); Assert.Equal(contents, masterText); //Swith to side branch and check branchRepo.Checkout("sidebranch", new Signature(identity, DateTime.Now)); String sideText = File.ReadAllText(testFilePath); Assert.Equal(sideContents, sideText); //Now make some changes and send them back File.WriteAllText(testFilePath, sideContentsRemoteChanges); Commands.Stage(repo, testFilePath); var sig = new Signature(identity, DateTime.Now); repo.Commit("Updated branch remotely", sig, sig); var syncRepo = new SyncRepository(repo, mockup.Get <ICommitRepository>(), mockup.Get <IGitCredentialsProvider>(), mockup.Get <IProcessRunner>()); await syncRepo.Push(); } using (var repo = new Repository(authorPath)) { var syncRepo = new SyncRepository(repo, mockup.Get <ICommitRepository>(), mockup.Get <IGitCredentialsProvider>(), mockup.Get <IProcessRunner>()); await syncRepo.Pull(new Signature(identity, DateTime.Now)); var testFilePath = Path.Combine(dir.Path, "Author/test.txt"); var branchRepo = new BranchRepository(repo, mockup.Get <ICommitRepository>()); branchRepo.Checkout("sidebranch", new Signature(identity, DateTime.Now)); String text = File.ReadAllText(testFilePath); Assert.Equal(sideContentsRemoteChanges, text); } } }
public async Task ResetScheduledJobs() { try { //cancel token var cancellationToken = (new CancellationTokenSource()).Token; await Task.Run(async() => { Connectors[] connectors = null; Console.WriteLine("RSJ:Get connectors starts"); //get sync connectors using (ConnectionFactory connectionFactory = new ConnectionFactory(ConfigVars.Instance.connectionString)) { StringBuilder sb = new StringBuilder(); sb.Append(string.Format("SELECT c.* FROM \"{0}\".\"Connectors\" c", Constants.ADDON_DB_DEFAULT_SCHEMA)); sb.Append(string.Format(" LEFT JOIN \"{0}\".\"Resources\" r ON c.ccid=r.uuid", Constants.ADDON_DB_DEFAULT_SCHEMA)); //sb.Append(" WHERE r.plan NOT IN (" + $"{string.Join(",", ConfigVars.Instance.addonPrivatePlanLevels.Select(p => $"'{p}'").ToArray())}" + ");"); connectors = connectionFactory.DbConnection.Query <Connectors>(sb.ToString()).ToArray(); } Console.WriteLine("RSJ:Get connectors ended"); if (connectors != null && connectors.Length > 0) { Console.WriteLine("RSJ:Connectors Count: {0}", connectors.Length); for (int i = 0; i < connectors.Length; ++i) { //Cancel current task if cancel requested (eg: when system getting shutdown) if (cancellationToken != null && cancellationToken.IsCancellationRequested) { cancellationToken.ThrowIfCancellationRequested(); return; } var isRecurringJobNeedRestart = false; var connector = connectors[i]; if (connector.sync_status == 1) { //one time schedule job delete if (!string.IsNullOrEmpty(connector.job_id) && (connector.schedule_type == ScheduleType.MANUAL_SYNC)) { //delete old jobs DeleteJob(connector.job_id); } //set sync status to failed{3} var connectorLogs = new ConnectorLogs() { sync_ended_at = DateTime.UtcNow, sync_logs = new List <string>() { RestSharp.Extensions.MonoHttp.HttpUtility.UrlEncode($"Records synced: {connector.sync_count} {Environment.NewLine}"), RestSharp.Extensions.MonoHttp.HttpUtility.UrlEncode($"{DateTime.UtcNow:yyyy-MM-dd HH:mm:ss.fff zzz} [{Microsoft.Extensions.Logging.LogLevel.Information}]: {"Restarted sync after Heroku Dyno Restart, no action needed on your part"} {Environment.NewLine}") } }; //Update job status Console.WriteLine("RSJ:Update connector sync status starts"); if (connector.schedule_type == ScheduleType.MANUAL_SYNC) { SyncRepository.UpdateSyncInfo(id: connector.connector_id, ccid: connector.ccid, status: 9, connectorLogs: connectorLogs); Console.WriteLine("RSJ:Newly schedule job manual sync:{0}-{1}", connector.ccid, connector.connector_id); await ScheduleJob(connector.ccid, connector.connector_id, connector.connector_type, connector.schedule_type, connector.custom_schedule_in_minutes).ConfigureAwait(false); } else { isRecurringJobNeedRestart = true; SyncRepository.UpdateSyncInfo(id: connector.connector_id, ccid: connector.ccid, status: 9, connectorLogs: connectorLogs); } Console.WriteLine("RSJ:Update connector sync status ended"); } if (connector.schedule_type != ScheduleType.MANUAL_SYNC) { string recurringJobId = await IsScheduledJob(connector.ccid, connector.connector_id, connector.schedule_type, connector.custom_schedule_in_minutes).ConfigureAwait(false); Console.WriteLine("RSJ:Recurring Job ID:{0}", recurringJobId); Console.WriteLine("RSJ:isRecurringJobNeedRestart:{0}", isRecurringJobNeedRestart); if (string.IsNullOrEmpty(recurringJobId)) { Console.WriteLine("RSJ:Newly schedule job:{0}-{1}", connector.ccid, connector.connector_id); await ScheduleJob(connector.ccid, connector.connector_id, connector.connector_type, connector.schedule_type, connector.custom_schedule_in_minutes).ConfigureAwait(false); } else if (isRecurringJobNeedRestart) { Console.WriteLine("RSJ:scheduled job triggered immediately:{0}", recurringJobId); RecurringJob.Trigger(recurringJobId.Replace("recurring-job:", "")); } } } } }, cancellationToken); } catch (Exception ex) { Console.WriteLine("Error {0}", ex.Message); } }
public TraineeshipApplicationsUpdater(SyncRepository syncRepository) { _syncRepository = syncRepository; }
public static T ToModel <T>(this List <Connectors> connectors, bool isSetConfig = true) where T : class { if (typeof(T) == typeof(List <ConnectorConfig>)) { List <ConnectorConfig> connectorConfigs = null; if (connectors != null) { DeDupSettings dedupSettings = isSetConfig ? connectors.FirstOrDefault().DeDupSetting : null; connectorConfigs = connectors.Select(c => { ConnectorConfig conConfig; conConfig = new ConnectorConfig() { ccid = c.ccid, connectorId = c.connector_id, connectorName = c.connector_name, sourceObjectName = c.src_object_name, destObjectName = c.dest_object_name, scheduleType = c.schedule_type, srcNewRecordFilter = c.src_new_record_filter, srcUpdateRecordFilter = c.src_update_record_filter, twoWaySyncPriority = c.two_way_sync_priority, syncDestination = c.connector_type, syncCount = c.sync_count, syncStatus = c.sync_status, dedup_type = c.dedup_type, jobId = c.job_id, syncStartedAt = c.sync_started_at, syncEndedAt = c.sync_ended_at, lastSyncAt = c.last_sync_at, lastSyncStatus = c.last_sync_status, dbSchema = c.src_schema, dataSource = c.sync_src, destDBSchema = c.dest_schema, customScheduleInMinutes = c.custom_schedule_in_minutes, dedupSourceType = c.dedup_source_type, dedup_method = c.dedup_method, review_before_delete = c.review_before_delete, backup_before_delete = c.backup_before_delete, simulation_count = c.simulation_count, total_records_count = c.total_records_count, deduped_count = c.deduped_count, fuzzy_ratio = c.fuzzy_ratio * 100, unique_records_count = c.unique_records_count }; //if (!string.IsNullOrEmpty(c.src_object_fields_json)) //{ // conConfig.sourceObjectFields = JsonConvert.DeserializeObject<List<string>>(c.compare_object_fields); //} if (!string.IsNullOrEmpty(c.compare_config_json) && (c.dedup_source_type == SourceType.Copy_Source_data_to_Destination_and_Remove_Duplicates_from_Destination || c.dedup_source_type == SourceType.Merge_Table_A_Data_to_Table_B_and_Remove_Duplicates_from_Table_B)) { conConfig.dbConfig_compare = JsonConvert.DeserializeObject <DatabaseConfig>(c.compare_config_json); } if (!string.IsNullOrEmpty(c.compare_object_fields)) { conConfig.compareObjectFieldsMapping = JsonConvert.DeserializeObject <List <string> >(c.compare_object_fields); if (c.dedup_source_type == SourceType.Remove_Duplicates_from_a_Single_Table || conConfig.compareObjectFieldsMapping != null) { conConfig.sourceObjectFields = conConfig.compareObjectFieldsMapping; conConfig.dbConfig_compare.compareObjectFields = conConfig.compareObjectFieldsMapping; } } //if (!string.IsNullOrEmpty(c.sync_log_json)) //{ // conConfig.connectorLogs = new ConnectorLogs() // { // sync_started_at = conConfig.syncStartedAt, // sync_ended_at = conConfig.syncEndedAt, // sync_count = conConfig.syncCount, // sync_logs= JsonConvert.DeserializeObject<List<string>>(c.sync_log_json) // }; //} if (conConfig != null) { if (conConfig.syncDestination == ConnectorType.Heroku_Postgres || conConfig.syncDestination == ConnectorType.Azure_Postgres || conConfig.syncDestination == ConnectorType.AWS_Postgres || conConfig.syncDestination == ConnectorType.Azure_SQL) { DatabaseType databaseType; if (conConfig.syncDestination == ConnectorType.Azure_SQL) { databaseType = DatabaseType.Azure_SQL; } else if (conConfig.syncDestination == ConnectorType.Azure_Postgres) { databaseType = DatabaseType.Azure_Postgres; } else if (conConfig.syncDestination == ConnectorType.AWS_Postgres) { databaseType = DatabaseType.AWS_Postgres; } else { databaseType = DatabaseType.Heroku_Postgres; } if (!string.IsNullOrEmpty(c.dest_config_json)) { conConfig.destDBConfig = JsonConvert.DeserializeObject <DatabaseConfig>(c.dest_config_json); if (conConfig.destDBConfig.databaseType == DatabaseType.None) { conConfig.destDBConfig.databaseType = databaseType; } } else if (c.DeDupSetting != null && !string.IsNullOrEmpty(c.DeDupSetting.database_config_json)) { conConfig.destDBConfig = c.DeDupSetting.ToModel <DatabaseConfig>(databaseType: databaseType); } } if (string.IsNullOrEmpty(conConfig.dbSchema)) { if ((conConfig.dataSource == DataSource.Heroku_Postgres || conConfig.dataSource == DataSource.Azure_Postgres || conConfig.dataSource == DataSource.AWS_Postgres)) { conConfig.dbSchema = Constants.POSTGRES_DEFAULT_SCHEMA; } else if (conConfig.dataSource == DataSource.Azure_SQL) { conConfig.dbSchema = Constants.MSSQL_DEFAULT_SCHEMA; } } if (string.IsNullOrEmpty(conConfig.destDBSchema)) { if ((conConfig.syncDestination == ConnectorType.Heroku_Postgres || conConfig.syncDestination == ConnectorType.Azure_Postgres || conConfig.syncDestination == ConnectorType.AWS_Postgres)) { conConfig.destDBSchema = Constants.POSTGRES_DEFAULT_SCHEMA; } else if (conConfig.syncDestination == ConnectorType.Azure_SQL) { conConfig.destDBSchema = Constants.MSSQL_DEFAULT_SCHEMA; } } if (isSetConfig) { if (conConfig.dataSource == DataSource.Heroku_Postgres || conConfig.dataSource == DataSource.Azure_Postgres || conConfig.dataSource == DataSource.AWS_Postgres || conConfig.dataSource == DataSource.Azure_SQL) { DatabaseType databaseType; if (conConfig.syncDestination == ConnectorType.Azure_SQL) { databaseType = DatabaseType.Azure_SQL; } else if (conConfig.syncDestination == ConnectorType.Azure_Postgres) { databaseType = DatabaseType.Azure_Postgres; } else if (conConfig.syncDestination == ConnectorType.AWS_Postgres) { databaseType = DatabaseType.AWS_Postgres; } else { databaseType = DatabaseType.Heroku_Postgres; } if (!string.IsNullOrEmpty(c.compare_config_json) && (c.dedup_source_type == SourceType.Copy_Source_data_to_Destination_and_Remove_Duplicates_from_Destination || c.dedup_source_type == SourceType.Merge_Table_A_Data_to_Table_B_and_Remove_Duplicates_from_Table_B)) { if (c.compare_config_json.StartsWith("[")) { conConfig.multipleDBConfigs.AddRange(JsonConvert.DeserializeObject <List <DatabaseConfig> >(c.compare_config_json)); //when multiple source assigned then we need to use th below line //conConfig.dbConfig_compare = JsonConvert.DeserializeObject<DatabaseConfig>(connector.compare_config_json); } else { conConfig.dbConfig_compare = JsonConvert.DeserializeObject <DatabaseConfig>(c.compare_config_json); conConfig.multipleDBConfigs.Add(conConfig.dbConfig_compare); } } if (!string.IsNullOrEmpty(c.src_config_json)) { conConfig.dbConfig = JsonConvert.DeserializeObject <DatabaseConfig>(c.src_config_json); if (conConfig.dbConfig.databaseType == DatabaseType.None) { conConfig.dbConfig.databaseType = databaseType; } conConfig.dbConfig.db_schema = conConfig.dbSchema; conConfig.dbConfig.object_name = conConfig.sourceObjectName; conConfig.multipleDBConfigs.Add(conConfig.dbConfig); } else if (dedupSettings != null && !string.IsNullOrEmpty(dedupSettings.database_config_json)) { conConfig.dbConfig = dedupSettings.ToModel <DatabaseConfig>(databaseType: databaseType); conConfig.dbConfig.db_schema = conConfig.dbSchema; conConfig.dbConfig.object_name = conConfig.sourceObjectName; conConfig.multipleDBConfigs.Add(conConfig.dbConfig); } } if (conConfig.syncDestination == ConnectorType.Heroku_Postgres || conConfig.syncDestination == ConnectorType.Azure_Postgres || conConfig.syncDestination == ConnectorType.AWS_Postgres || conConfig.syncDestination == ConnectorType.Azure_SQL) { DatabaseType databaseType; if (conConfig.syncDestination == ConnectorType.Azure_SQL) { databaseType = DatabaseType.Azure_SQL; } else if (conConfig.syncDestination == ConnectorType.Azure_Postgres) { databaseType = DatabaseType.Azure_Postgres; } else if (conConfig.syncDestination == ConnectorType.AWS_Postgres) { databaseType = DatabaseType.AWS_Postgres; } else { databaseType = DatabaseType.Heroku_Postgres; } if (!string.IsNullOrEmpty(c.dest_config_json)) { conConfig.destDBConfig = JsonConvert.DeserializeObject <DatabaseConfig>(c.dest_config_json); if (conConfig.destDBConfig.databaseType == DatabaseType.None) { conConfig.destDBConfig.databaseType = databaseType; } } else if (dedupSettings != null && !string.IsNullOrEmpty(dedupSettings.database_config_json)) { conConfig.destDBConfig = dedupSettings.ToModel <DatabaseConfig>(databaseType: databaseType); } } } conConfig.child_record_count = SyncRepository.GetChildRecordsCount(conConfig); } return(conConfig); }).ToList(); } return(connectorConfigs as T); } else if (typeof(T) == typeof(List <ConnectorLogs>)) { List <ConnectorLogs> connectorLogs = null; if (connectors != null) { connectorLogs = connectors.Select(c => { return(new ConnectorLogs() { sync_connector_name = c.connector_name, sync_started_at = c.sync_started_at, sync_ended_at = c.sync_ended_at, sync_count = c.sync_count, last_sync_at = c.last_sync_at, last_sync_status = c.last_sync_status, sync_status = c.sync_status, sync_logs = Utilities.GetJsonPropertyValueByKeyPath <List <string> >(c.sync_log_json, "") }); }).ToList(); } return(connectorLogs as T); } return(null); }
public ApprenticeshipApplicationsUpdater(SyncRepository syncRepository) { _syncRepository = syncRepository; }
/// <summary> /// Method: ScheduleJob /// Description: It is used to schedule background sync process for connector /// </summary> /// <param name="connector"></param> /// <param name="id"></param> /// <returns>status as int</returns> public async Task <int> ScheduleJob(string ccid, int id, ConnectorType connectorType, ScheduleType scheduleType, int?customScheduleInMinutes = 1200) { var syncStatus = 1; try { if (string.IsNullOrEmpty(ccid) || id == 0) { return(0); } IJobCancellationToken token = JobCancellationToken.Null; //token = new JobCancellationToken(true); var jobKey = string.Empty; if (scheduleType != ScheduleType.MANUAL_SYNC)// && scheduleType != ScheduleType.STREAMING_SYNC) { jobKey = Math.Abs(Guid.NewGuid().ToInt()).ToString(); } if (ConnectorType.Azure_SQL == connectorType) { //switch (scheduleType) //{ // case ScheduleType.EVERY_15_MINS: // RecurringJob.AddOrUpdate<DERepository>(jobKey, service => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.MinuteInterval(15), null, "critical"); // break; // case ScheduleType.CUSTOM: // RecurringJob.AddOrUpdate<DERepository>(jobKey, service => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.MinuteInterval(customScheduleInMinutes), null, "critical"); // break; // case ScheduleType.EVERY_60_MINS: // RecurringJob.AddOrUpdate<DERepository>(jobKey, service => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.Hourly, null, "critical"); // break; // case ScheduleType.ONCE_DAILY: // RecurringJob.AddOrUpdate<DERepository>(jobKey, service => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.Daily, null, "critical"); // break; // case ScheduleType.TWICE_DAILY: // RecurringJob.AddOrUpdate<DERepository>(jobKey, service => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.HourInterval(11), null, "critical"); // break; // case ScheduleType.TWICE_WEEKLY: // RecurringJob.AddOrUpdate<DERepository>(jobKey, service => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.DayInterval(2), null, "critical"); // break; // case ScheduleType.ONCE_WEEKLY: // RecurringJob.AddOrUpdate<DERepository>(jobKey, (service) => service.AddDataRowsToSqlConnector(token, id, ccid), Cron.Weekly, null, "critical"); // break; // case ScheduleType.MANUAL_SYNC: // jobKey = BackgroundJob.Enqueue<DERepository>(service => service.AddDataRowsToSqlConnector(token, id, ccid)); // break; // case ScheduleType.STREAMING_SYNC: // jobKey = string.Empty; // break; //} } else { switch (scheduleType) { case ScheduleType.EVERY_15_MINS: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, service => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.MinuteInterval(15), TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.CUSTOM: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, service => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.MinuteInterval((int)customScheduleInMinutes), TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.EVERY_60_MINS: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, service => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.Hourly, TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.ONCE_DAILY: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, service => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.Daily, TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.TWICE_DAILY: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, service => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.HourInterval(11), TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.TWICE_WEEKLY: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, service => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.DayInterval(2), TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.ONCE_WEEKLY: RecurringJob.AddOrUpdate <ISyncRepository>(jobKey, (service) => service.DeDupRowsFromDatabaseTable(token, id, ccid), DedupCron.Weekly, TimeZoneInfo.Local, Constants.JOB_QUEUE_NAME); break; case ScheduleType.MANUAL_SYNC: jobKey = BackgroundJob.Enqueue <ISyncRepository>(service => service.DeDupRowsFromDatabaseTable(token, id, ccid)); break; //case ScheduleType.STREAMING_SYNC: // jobKey = string.Empty; // break; } } if (!string.IsNullOrEmpty(jobKey)) { SyncRepository.UpdateSyncInfo(id: id, ccid: ccid, jobid: jobKey); } } catch (Exception ex) { syncStatus = 0; Console.WriteLine("Error: {0}", ex.Message); } return(await Task.FromResult(syncStatus)); }
/// <summary> /// Method: AddEditExtensionsConfig /// Description: It is used to add/update connector in connectors table /// </summary> /// <param name="connectorConfig"></param> public void AddEditExtensionsConfig(ConnectorConfig connectorConfig) { using (var transaction = _context.Database.BeginTransaction()) { Connectors entity; DeDupSettings dedupSettings = null; bool isNew = false; int isSyncTableExist = 0; int isSyncGoldenTableExist = 0; if (connectorConfig.connectorId.HasValue) { entity = Find(connectorConfig.ccid, connectorConfig.connectorId); if (entity != null) { //delete sync process if it is in progress while updating connector if (!string.IsNullOrEmpty(entity.job_id) || entity.schedule_type != ScheduleType.MANUAL_SYNC) { //delete old jobs JobScheduler.Instance.DeleteJob(ccid: entity.ccid, connectorId: entity.connector_id, jobId: entity.job_id, scheduleType: entity.schedule_type); //reset job id once deleted entity.job_id = string.Empty; if (entity.sync_status == 1) { //set status interrupted entity.sync_status = 10; } else if (entity.sync_status.HasValue) { //reset sync status entity.sync_status = 0; } } else if (entity.sync_status.HasValue) { //reset sync status entity.sync_status = 0; } if (entity.sync_status != 2 && entity.sync_status != 10) { entity.sync_count = null; entity.unique_records_count = null; entity.sync_updated_count = null; entity.total_records_count = null; entity.sync_log_json = null; entity.sync_started_at = null; entity.sync_ended_at = null; entity.last_sync_at = null; entity.last_sync_status = null; } dedupSettings = entity.DeDupSetting; //check table exist or not. if not then create table and sync isSyncTableExist = SyncRepository.SyncTableIsExist(connectorConfig); isSyncGoldenTableExist = SyncRepository.SyncGoldenTableIsExist(connectorConfig); } } else { isNew = true; entity = new Connectors() { ccid = connectorConfig.ccid }; //Set next connector id entity.connector_id = GetMaxID(entity.ccid, ref dedupSettings) + 1; } entity.connector_name = connectorConfig.connectorName; if (isNew) { entity.sync_src = connectorConfig.dataSource; entity.src_object_name = connectorConfig.sourceObjectName; //commented by Kathir on 12-Aug-2020 // entity.src_object_fields_json = JsonConvert.SerializeObject(connectorConfig.sourceObjectFields); if (entity.sync_src == DataSource.Heroku_Postgres || entity.sync_src == DataSource.Azure_Postgres || entity.sync_src == DataSource.AWS_Postgres || entity.sync_src == DataSource.Azure_SQL) { entity.src_config_json = JsonConvert.SerializeObject(connectorConfig.dbConfig); } if (connectorConfig.dedup_type != DedupType.Full_Dedup) { entity.connector_type = (ConnectorType)connectorConfig.syncDestination; entity.dest_object_name = connectorConfig.destObjectName; } else { if (connectorConfig.dataSource == DataSource.Heroku_Postgres) { entity.connector_type = ConnectorType.Heroku_Postgres; } if (connectorConfig.dataSource == DataSource.AWS_Postgres) { entity.connector_type = ConnectorType.AWS_Postgres; } if (connectorConfig.dataSource == DataSource.Azure_Postgres) { entity.connector_type = ConnectorType.Azure_Postgres; } entity.dest_object_name = string.Empty; } entity.src_schema = connectorConfig.dbSchema; entity.dedup_type = connectorConfig.dedup_type; entity.dedup_source_type = connectorConfig.dedupSourceType; if ((connectorConfig.dedupSourceType == SourceType.Copy_Source_data_to_Destination_and_Remove_Duplicates_from_Destination || connectorConfig.dedupSourceType == SourceType.Merge_Table_A_Data_to_Table_B_and_Remove_Duplicates_from_Table_B)) { entity.compare_object_fields = JsonConvert.SerializeObject(connectorConfig.compareObjectFieldsMapping); entity.compare_config_json = JsonConvert.SerializeObject(connectorConfig.dbConfig_compare); } //if (IsMultipleConfigSupported) { if (entity.connector_type == ConnectorType.Heroku_Postgres || entity.connector_type == ConnectorType.Azure_Postgres || entity.connector_type == ConnectorType.AWS_Postgres || entity.connector_type == ConnectorType.Azure_SQL) { entity.dest_config_json = JsonConvert.SerializeObject(connectorConfig.destDBConfig); } } //set global db setting if (dedupSettings != null) { //save sync database url if (connectorConfig.dbConfig != null && !string.IsNullOrEmpty(connectorConfig.dbConfig.syncDefaultDatabaseUrl)) { if (string.IsNullOrEmpty(dedupSettings.database_config_json)) { dedupSettings.database_config_json = JsonConvert.SerializeObject((new List <DatabaseConfig>() { connectorConfig.dbConfig })); _context.Entry(dedupSettings).State = EntityState.Modified; } else { var dbConfigs = dedupSettings.ToModel <List <DatabaseConfig> >(); if (dbConfigs != null && dbConfigs.FirstOrDefault(p => p.databaseType == connectorConfig.dbConfig.databaseType) == null) { dbConfigs.Add(connectorConfig.dbConfig); dedupSettings.database_config_json = JsonConvert.SerializeObject(dbConfigs); _context.Entry(dedupSettings).State = EntityState.Modified; } } } } else { dedupSettings = new DeDupSettings() { ccid = connectorConfig.ccid }; //save sync database url if (connectorConfig.dbConfig != null && !string.IsNullOrEmpty(connectorConfig.dbConfig.syncDefaultDatabaseUrl)) { dedupSettings.database_config_json = JsonConvert.SerializeObject((new List <DatabaseConfig>() { connectorConfig.dbConfig })); } //add dedupsetting _context.Entry(dedupSettings).State = EntityState.Added; } entity.unique_records_count = null; entity.sync_updated_count = null; entity.sync_count = null; entity.total_records_count = null; } entity.schedule_type = connectorConfig.scheduleType; entity.src_new_record_filter = connectorConfig.srcNewRecordFilter; entity.src_update_record_filter = connectorConfig.srcUpdateRecordFilter; entity.two_way_sync_priority = connectorConfig.twoWaySyncPriority; entity.dest_schema = connectorConfig.destDBSchema; entity.custom_schedule_in_minutes = connectorConfig.customScheduleInMinutes; //Added by Kathir on 20-8-2020 entity.dedup_method = connectorConfig.dedup_method; //if (connectorConfig.dedup_method == SimilarityType.Fuzzy_Compare) //{ // entity.fuzzy_ratio = connectorConfig.fuzzy_ratio; //} entity.fuzzy_ratio = connectorConfig.fuzzy_ratio; entity.review_before_delete = connectorConfig.review_before_delete; entity.backup_before_delete = connectorConfig.backup_before_delete; if (connectorConfig.dedup_type == DedupType.Full_Dedup) { entity.simulation_count = -1; } else { entity.simulation_count = connectorConfig.simulation_count; } if (string.IsNullOrEmpty(entity.src_object_name)) { entity.src_object_name = connectorConfig.sourceObjectName; } if ((connectorConfig.dedupSourceType == SourceType.Copy_Source_data_to_Destination_and_Remove_Duplicates_from_Destination || connectorConfig.dedupSourceType == SourceType.Merge_Table_A_Data_to_Table_B_and_Remove_Duplicates_from_Table_B)) { //entity.compare_object_fields = JsonConvert.SerializeObject(connectorConfig.compareObjectFieldsMapping); entity.compare_config_json = JsonConvert.SerializeObject(connectorConfig.dbConfig_compare); } //commented by Kathir on 12-Aug-2020 //if (string.IsNullOrEmpty(entity.src_object_fields_json)) //{ // entity.src_object_fields_json = JsonConvert.SerializeObject(connectorConfig.sourceObjectFields); //} if (string.IsNullOrEmpty(entity.dest_object_name)) { if (connectorConfig.dedup_type == DedupType.Full_Dedup && connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table) { entity.dest_object_name = connectorConfig.sourceObjectName; } else if (connectorConfig.dedup_type == DedupType.Full_Dedup && (connectorConfig.dedupSourceType == SourceType.Copy_Source_data_to_Destination_and_Remove_Duplicates_from_Destination || connectorConfig.dedupSourceType == SourceType.Merge_Table_A_Data_to_Table_B_and_Remove_Duplicates_from_Table_B)) { entity.dest_object_name = (connectorConfig.dbConfig_compare.table_type == SelectedTableType.Create_New_Table ? connectorConfig.dbConfig_compare.new_table_name : connectorConfig.dbConfig_compare.object_name); } else { entity.dest_object_name = connectorConfig.destObjectName; } } //Assigned source object fields to compare object field json entity.compare_object_fields = JsonConvert.SerializeObject(connectorConfig.sourceObjectFields); //if (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table) //{ // if (connectorConfig.sourceObjectFields != null && connectorConfig.sourceObjectFields.Count > 0) // { // foreach (string sel_fields in connectorConfig.sourceObjectFields) // { // connectorConfig.compareObjectFieldsMapping.Add(new KeyValuePair<string, string>(sel_fields, sel_fields)); // } // } // entity.compare_object_fields = JsonConvert.SerializeObject(connectorConfig.sourceObjectFields); //} if (connectorConfig.dedup_type == DedupType.Full_Dedup) { entity.dest_object_name = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.sourceObjectName : (connectorConfig.dbConfig_compare.table_type == SelectedTableType.Create_New_Table ? connectorConfig.dbConfig_compare.new_table_name : connectorConfig.dbConfig_compare.object_name)); connectorConfig.destDBConfig.syncDefaultDatabaseUrl = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbConfig.syncDefaultDatabaseUrl : connectorConfig.dbConfig_compare.syncDefaultDatabaseUrl); connectorConfig.destDBConfig.databaseType = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbConfig.databaseType : connectorConfig.dbConfig_compare.databaseType); connectorConfig.destDBConfig.dataSource = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbConfig.dataSource : connectorConfig.dbConfig_compare.dataSource); entity.dest_config_json = JsonConvert.SerializeObject(connectorConfig.destDBConfig); entity.dest_schema = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbSchema : connectorConfig.dbConfig_compare.db_schema); } //save extension setting _context.Entry(entity).State = isNew ? EntityState.Added : EntityState.Modified; _context.SaveChanges(); //if (isSyncTableExist == 0 && connectorConfig.dedup_type != DedupType.Full_Dedup) //{ // var errorMessage = string.Empty; // if (connectorConfig.syncDestination == ConnectorType.Heroku_Postgres // || connectorConfig.syncDestination == ConnectorType.Azure_Postgres // || connectorConfig.syncDestination == ConnectorType.AWS_Postgres) // { // var dbStatus = SyncRepository.CreatePostgresTable(connectorConfig, out errorMessage); // if (dbStatus == -1) // { // transaction.Rollback(); // throw new Exception(message: (string.IsNullOrEmpty(errorMessage) ? "Not able to create postgres table." : errorMessage)) { }; // } // else if (dbStatus == 2) // { // transaction.Rollback(); // throw new Exception(message: "The " + connectorConfig.destObjectName + " postgres table already exists.") { }; // } // } //} //if (isSyncGoldenTableExist == 0 && connectorConfig.dbConfig_compare.table_type == SelectedTableType.Create_New_Table) //{ // var errorMessage = string.Empty; // if (connectorConfig.dbConfig_compare.dataSource == DataSource.Heroku_Postgres // || connectorConfig.dbConfig_compare.dataSource == DataSource.Azure_Postgres // || connectorConfig.dbConfig_compare.dataSource == DataSource.AWS_Postgres) // { // var dbStatus = SyncRepository.CreatePostgresGoldenTable(connectorConfig, out errorMessage); // if (dbStatus == -1) // { // transaction.Rollback(); // throw new Exception(message: (string.IsNullOrEmpty(errorMessage) ? "Not able to create postgres table." : errorMessage)) { }; // } // else if (dbStatus == 2) // { // transaction.Rollback(); // throw new Exception(message: "The " + connectorConfig.dbConfig_compare.new_table_name + " postgres table already exists.") { }; // } // } //} transaction.Commit(); //Schedule job if scheduletype is not manual if (entity.schedule_type != ScheduleType.MANUAL_SYNC) { if (entity.schedule_type == ScheduleType.CUSTOM) { JobScheduler.Instance.ScheduleJob(entity.ccid, entity.connector_id, entity.connector_type, entity.schedule_type, (entity.custom_schedule_in_minutes.HasValue ? entity.custom_schedule_in_minutes.Value : 1200)); } else { JobScheduler.Instance.ScheduleJob(entity.ccid, entity.connector_id, entity.connector_type, entity.schedule_type); } } //}); } }
/// <summary> /// Description:This method is used to change the dedup type from Simulate to Full-Dedup. /// User can check 'N' number of time to simulate to verify the data before DeDup. /// Once they satisfied then it will change it to Full Dedup. /// Once its change to Full Dedup then we can not revert back to Simulate mode /// </summary> /// <param name="connectorConfig"></param> /// <param name="dbConfig"></param> /// <param name="ccid"></param> public string FinalizedForDedup_Repository(ConnectorConfig connectorConfig) { try { if (connectorConfig == null && (connectorConfig != null && (string.IsNullOrEmpty(connectorConfig.ccid) || !connectorConfig.connectorId.HasValue))) { return(""); } if ((connectorConfig.syncDestination == ConnectorType.Heroku_Postgres || connectorConfig.syncDestination == ConnectorType.Azure_Postgres || connectorConfig.syncDestination == ConnectorType.AWS_Postgres || connectorConfig.syncDestination == ConnectorType.Azure_SQL) && (connectorConfig.dbConfig == null || (connectorConfig.dbConfig != null && string.IsNullOrEmpty(connectorConfig.dbConfig.syncDefaultDatabaseUrl)))) { return(""); } var entity = Find(connectorConfig.ccid, connectorConfig.connectorId); //Drop the ctindex table & backup table before changing to Real Dedup mode StringBuilder sb = new StringBuilder(); sb.Append($"DROP TABLE IF EXISTS \"{entity.dest_schema}\".\"{entity.dest_object_name}_ctindex\";"); sb.Append($"DROP TABLE IF EXISTS \"{entity.dest_schema}\".\"{entity.dest_object_name}_deleted\";"); using (ConnectionFactory connectionFactory = new ConnectionFactory(connectorConfig.destDBConfig.syncDefaultDatabaseUrl)) { try { connectionFactory.DbConnection.ExecuteScalarAsync <int>(sb.ToString()).ConfigureAwait(false); } catch (Exception ex) { Console.WriteLine("Error:{0}", ex.Message); return(ex.Message); } } //Assign value to destination object entity.dest_object_name = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.sourceObjectName : (connectorConfig.dbConfig_compare.table_type == SelectedTableType.Create_New_Table ? connectorConfig.dbConfig_compare.new_table_name : connectorConfig.dbConfig_compare.object_name)); connectorConfig.destDBConfig.syncDefaultDatabaseUrl = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbConfig.syncDefaultDatabaseUrl : connectorConfig.dbConfig_compare.syncDefaultDatabaseUrl); connectorConfig.destDBConfig.dataSource = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbConfig.dataSource : connectorConfig.dbConfig_compare.dataSource); connectorConfig.destDBConfig.databaseType = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbConfig.databaseType : connectorConfig.dbConfig_compare.databaseType); entity.dest_config_json = JsonConvert.SerializeObject(connectorConfig.destDBConfig); entity.dest_schema = (connectorConfig.dedupSourceType == SourceType.Remove_Duplicates_from_a_Single_Table ? connectorConfig.dbSchema : connectorConfig.dbConfig_compare.db_schema); entity.sync_started_at = null; entity.sync_ended_at = null; entity.job_id = ""; entity.simulation_count = -1; entity.sync_count = null; entity.dedup_type = DedupType.Full_Dedup; entity.sync_status = null; entity.unique_records_count = 0; entity.sync_updated_count = 0; entity.sync_ended_at = null; entity.sync_started_at = null; entity.last_sync_at = null; entity.last_sync_status = null; //Delete destination table SyncRepository.RemovePGSyncTable(connectorConfig); _context.Entry(entity).State = EntityState.Modified; _context.SaveChanges(); } catch (Exception ex) { Console.WriteLine("Error:{0}", ex.Message); return(ex.Message); } return("success"); }
//Obtener un punto de medicion public ObservableCollection<RegistroModel> GetPuntoMedicion(long idPuntoMedicion) { ObservableCollection<RegistroModel> oc = new ObservableCollection<RegistroModel>(); try { SyncRepository sync = new SyncRepository(); long fechaActual = sync.GetCurrentDate(); using (var entity = new db_SeguimientoProtocolo_r2Entities()) { var res = (from o in entity.CI_REGISTRO where o.IdPuntoMedicion == idPuntoMedicion select o).ToList(); if (res != null && res.Count > 0) { res.ForEach(row => { oc.Add(new RegistroModel() { IdRegistro = row.IdRegistro, IdPuntoMedicion = row.IdPuntoMedicion, FechaCaptura = row.FechaCaptura, HoraRegistro = row.HoraRegistro, DiaRegistro = row.DiaRegistro, Valor = row.Valor, AccionActual = row.AccionActual, IsActive = row.IsActive, IsModified = row.IsModified, LastModifiedDate = row.LastModifiedDate, IdCondicion = row.IdCondicion, ServerLastModifiedDate = row.ServerLastModifiedDate, FechaNumerica = row.FechaNumerica }); }); }; } } catch (Exception ex) { } return oc; }