public void ProcessActivityTest() { Thread.CurrentPrincipal = new PrincipalUserAdapter("ae"); ProcessExecutionService processExecutionService = new ProcessExecutionService(); try { var taskLists = processExecutionService.GetTaskList("ae"); IList flows = null; foreach (IFlow task in taskLists) { //跑完進入End State flows = processExecutionService.PerformActivity(task.Id); } Assert.AreEqual(1, flows.Count); //跳到EndState Assert.AreEqual(2, ((FlowImpl)flows[0]).Node.Id); } catch (ExecutionException e) { Assert.Fail("ExcecutionException while starting a new holiday request: " + e.Message); } finally { // loginUtil.logout(); } }
public void ProcessActivityTest() { Thread.CurrentPrincipal = new PrincipalUserAdapter("ae"); ProcessExecutionService processExecutionService = new ProcessExecutionService(); try { //af申請的,ae是af的主管,登入者要換成ae var taskLists = processExecutionService.GetTaskList("ae"); IDictionary attributeValues = new Hashtable(); attributeValues.Add("evaluation result", Evaluation.APPROVE); foreach (IFlow task in taskLists) { //出現一個無法處理的錯誤 processExecutionService.PerformActivity(task.Id, attributeValues); } } catch (ExecutionException e) { Assert.Fail("ExcecutionException while starting a new holiday request: " + e.Message); } finally { // loginUtil.logout(); } }
public void StartProcessTest() { IProcessInstance processInstance = null; Thread.CurrentPrincipal = new PrincipalUserAdapter("ae"); ProcessExecutionService processExecutionApplicationService = new ProcessExecutionService(); try { IDictionary attributeValues = new Hashtable(); processInstance = processExecutionApplicationService.StartProcessInstance(1L, attributeValues); //這時已經在First State Assert.IsNotNull(processInstance); } catch (ExecutionException e) { Assert.Fail("ExcecutionException while starting a new holiday request: " + e.Message); } finally { // loginUtil.logout(); } }
public void StartProcessTest() { IProcessInstance processInstance = null; Thread.CurrentPrincipal = new PrincipalUserAdapter("ae"); MyProcessDefinitionService myProcessDefinitionService = new MyProcessDefinitionService(); ProcessExecutionService processExecutionService = new ProcessExecutionService(); try { IDictionary attributeValues = new Hashtable(); processInstance = processExecutionService.StartProcessInstance(1L, attributeValues); //這時已經在First State Assert.IsNotNull(processInstance); //會產生基本的Root Flow Assert.IsNotNull(processInstance.RootFlow); //root flow進入了ActivityState,Id=3 Assert.AreEqual(3, processInstance.RootFlow.Node.Id); //root flow的actor是ae Assert.AreEqual("ae", processInstance.RootFlow.GetActor().Id); /* * select *from [dbo].[NBPM_PROCESSINSTANCE] * select *from [dbo].[NBPM_FLOW] * select *from [dbo].[NBPM_LOG] * select *from [dbo].[NBPM_LOGDETAIL] */ } catch (ExecutionException e) { Assert.Fail("ExcecutionException while starting a new holiday request: " + e.Message); } finally { // loginUtil.logout(); } }
public void StartProcessTest() { IProcessInstance processInstance = null; Thread.CurrentPrincipal = new PrincipalUserAdapter("af"); MyProcessDefinitionService myProcessDefinitionService = new MyProcessDefinitionService(); ProcessExecutionService processExecutionService = new ProcessExecutionService(); try { IDictionary attributeValues = new Hashtable(); attributeValues.Add("start date", new DateTime(2016, 3, 1)); attributeValues.Add("end date", new DateTime(2016, 3, 2)); processInstance = processExecutionService.StartProcessInstance(1L, attributeValues); Assert.IsNotNull(processInstance); Assert.IsNotNull(processInstance.RootFlow); /* * select *from [dbo].[NBPM_PROCESSINSTANCE] --1 * select *from [dbo].[NBPM_FLOW] --1 * select *from [dbo].[NBPM_LOG] --3 * select *from [dbo].[NBPM_LOGDETAIL] --6 * select * from [dbo].[NBPM_ATTRIBUTEINSTANCE] --7 */ } catch (ExecutionException e) { Assert.Fail("ExcecutionException while starting a new holiday request: " + e.Message); } finally { // loginUtil.logout(); } }
protected override async Task Process() { // For the enlightenment of other, later, readers: // ogr2ogr will be used to process not only obvious conversion sources (eg shape files) but also // geojson files. Why, you might ask, because tippecanoe can import GeoJSON directly? It's because // passing the GeoJSON through ogr2ogr will ensure that the final GeoJSON is in the correct projection // and that it should be valid GeoJSON as well. QueuedConversionJob queued = null; try { queued = await _gdConversionQueue.GetJob(); } catch (Exception ex) { _logger.LogError($"GdalConversion failed to retrieve queued job", ex); } if (queued != null) // if no job queued, don't try { using (var workFolder = new TemporaryWorkFolder()) { try { var job = queued.Content; if (job?.DataLocation != null && job.LayerId != null && job.WorkspaceId != null) // if the job has missing values, don't process it, just delete it from queue. { var timer = new Stopwatch(); timer.Start(); _logger.LogDebug($"Processing GDAL Conversion for Layer {queued.Content.LayerId} within Queue Message {queued.Id}"); // Keep source and dest separate in case of file name collision. var sourcePath = workFolder.CreateSubFolder("source"); var destPath = workFolder.CreateSubFolder("dest"); var downloadedFilePath = await new Uri(job.DataLocation).DownloadToLocal(sourcePath); var inputFilePath = GetGdalInputFileParameter(downloadedFilePath, workFolder); var geoJsonFile = Path.Combine(destPath, $"{job.LayerId}.geojson"); var processArgument = GetProcessArgument(job.LayerName, geoJsonFile, inputFilePath); _logger.LogDebug($"executing ogr2ogr process with argument {processArgument}"); var executionResult = ProcessExecutionService.ExecuteProcess(ConverterFileName, processArgument); if (executionResult.success) { _logger.LogDebug($"ogr2ogr process successfully executed"); } else { _logger.LogError($"ogr2ogr process failed: {executionResult.error}"); throw new Exception($"ogr2ogr process failed: {executionResult.error}"); } // now we need to put the converted geojson file into storage var location = await _geoJsonStorage.Store($"{job.WorkspaceId}/{job.LayerId}.geojson", geoJsonFile); _logger.LogDebug("Upload of geojson file to storage complete."); timer.Stop(); _logger.LogDebug($"GDAL Conversion finished for Layer {job.LayerId} in {timer.ElapsedMilliseconds} ms."); // we created geoJson so we can put a request in for geojson to mvt conversion. await _mbConversionQueue.Queue(new ConversionJobData { LayerId = job.LayerId, WorkspaceId = job.WorkspaceId, LayerName = job.LayerName, Description = job.Description, DataLocation = location }); } // we completed GDAL conversion and creation of MVT conversion request, so remove the GDAL request from the queue await _gdConversionQueue.DeleteJob(queued); _logger.LogDebug("GDAL Conversion message deleted "); } catch (Exception ex) { if (queued.DequeueCount >= RetryLimit) { try { await _gdConversionQueue.DeleteJob(queued); if (queued.Content?.LayerId != null && queued.Content?.WorkspaceId != null) { await _statusTable.UpdateStatus(queued.Content.WorkspaceId, queued.Content.LayerId, LayerStatus.Failed); } _logger.LogError($"GDAL Conversion failed for layer {queued.Content?.LayerId} after reaching retry limit", ex); } catch (Exception e) { _logger.LogError($"GDAL Conversion failed to clear bad conversion for layer {queued.Content?.LayerId}", e); } } else { _logger.LogWarning($"GDAL Conversion failed for layer {queued.Content?.LayerId} will retry later", ex); } } } } else { await Task.Delay(_serviceOptions.ConvertPolling); } }
protected override async Task Process() { // there's two types of conversion to consider. // 1. spatial source data arrives and is placed in storage, we get a message and convert it // to geojson using gdal, and put the result in storage. We add a new req to the queue to // convert the geojson to mbtile. // 2. the geojson from the previous step (or possibly geojson directly) is in storage, we get // a message and convert to mbtile and place result in storage. QueuedConversionJob queued = null; try { queued = await _mbConversionQueue.GetJob(); } catch (Exception ex) { _logger.LogError($"MapBox Conversion failed to retrieve queued job", ex); } if (queued != null) // if no job queued, don't try { using (var workFolder = new TemporaryWorkFolder()) { try { var job = queued.Content; if (job?.DataLocation != null && job?.LayerId != null && job?.WorkspaceId != null) // if the job has missing values, don't process it, just delete it from queue. { // convert the geoJSON to a mapbox dataset var timer = new Stopwatch(); timer.Start(); // retrieve the geoJSON file from the supplied URI var inputFilePath = await new Uri(job.DataLocation).DownloadToLocal(workFolder.Path); var mbTilesFilePath = Path.Combine(workFolder.Path, $"{job.LayerId}.mbtiles"); var processArgument = GetProcessArgument(job.LayerName, job.Description, mbTilesFilePath, inputFilePath); _logger.LogDebug($"executing tippecanoe process with argument {processArgument}"); var executionResult = ProcessExecutionService.ExecuteProcess(ConverterFileName, processArgument); if (executionResult.success) { _logger.LogDebug($"tippecanoe process successfully executed"); } else { _logger.LogError($"tippecanoe process failed: {executionResult.error}"); } // now we need to put the converted mbtile file into storage await _tileStorage.Store($"{job.WorkspaceId}/{job.LayerId}.mbtiles", mbTilesFilePath); _logger.LogDebug("Upload of mbtile file to storage complete."); timer.Stop(); _logger.LogDebug($"MapBox Conversion finished for Layer {job.LayerId} in {timer.ElapsedMilliseconds} ms."); try { await _statusTable.UpdateStatus(job.WorkspaceId, job.LayerId, LayerStatus.Finished); _logger.LogDebug($"Layer {job.LayerId} status updated to Finished"); } catch (Exception ex) { _logger.LogError($"Error when updating Layer {job.LayerId} status to Finished", ex); throw; } await _topicClient.SendMessage(new MapLayerUpdateData { MapLayerId = job.LayerId, WorkspaceId = job.WorkspaceId, Type = MapLayerUpdateType.Update }); } await _mbConversionQueue.DeleteJob(queued); _logger.LogDebug("Deleted MapBox Conversion message"); } catch (Exception ex) { if (queued.DequeueCount >= RetryLimit) { try { await _mbConversionQueue.DeleteJob(queued); if (queued.Content?.LayerId != null && queued.Content?.WorkspaceId != null) { await _statusTable.UpdateStatus(queued.Content.WorkspaceId, queued.Content.LayerId, LayerStatus.Failed); } _logger.LogError($"MapBox Conversion failed for layer {queued.Content?.LayerId} after reaching retry limit", ex); } catch (Exception e) { _logger.LogError($"MapBox Conversion failed to clear bad conversion for layer {queued.Content?.LayerId}", e); } } else { _logger.LogWarning($"MapBox Conversion failed for layer {queued.Content?.LayerId} and will retry later", ex); } } } } else { await Task.Delay(_serviceOptions.ConvertPolling); } }