/// <summary> /// Begins the work. /// </summary> protected override void BeginWork() { BootParameters.ShouldNotBe(null); base.BeginWork(); _jobParameter = (AnalyticsProjectInfo)XmlUtility.DeserializeObject(BootParameters, typeof(AnalyticsProjectInfo)); _analyticProject = new AnalyticsProject(); _documentBachSize = Convert.ToInt32( ApplicationConfigurationManager.GetValue("IncludeDocumentsIntoProjectInSubSystemJobBatchSize", "AnalyticsProject")); _dataset = DataSetBO.GetDataSetDetailForDataSetId(Convert.ToInt64(_jobParameter.DatasetId, CultureInfo.CurrentCulture)); _jobParameter.DocumentSource.CollectionId = _dataset.CollectionId; _totalDocumentCount = _analyticProject.GetProjectDocumentsCountByTaskId( Convert.ToInt64(_jobParameter.MatterId, CultureInfo.CurrentCulture), _jobParameter.ProjectCollectionId, _jobParameter.PrimarySystemJobId); //Update job log initial state var jobSummaryKeyValuePairs = new EVKeyValuePairs(); JobMgmtBO.UpdateJobResult(WorkAssignment.JobId, 0, _totalDocumentCount, jobSummaryKeyValuePairs); if (_jobParameter.IsRerunJob || _jobParameter.IsAddAdditionalDocuments) //Rerun job or Add additional documents- need get to get existing IndexId ,if already created { _indexId = AnalyticsProject.GetIndexIdForProject(_jobParameter.MatterId, WorkAssignment.JobId, _dataset.CollectionId, _jobParameter.ProjectCollectionId, false); } if (string.IsNullOrEmpty(_indexId)) { _indexId = "idx-" + Guid.NewGuid().ToString().ToLowerInvariant(); _analyticProject.InsertIndexId(_jobParameter.MatterId, WorkAssignment.JobId, _dataset.CollectionId, _jobParameter.ProjectCollectionId, _indexId); } AnalyticsProject.CreateAnalyticalIndex(_jobParameter.MatterId, WorkAssignment.JobId, _indexId); //Create Index in Spark SVM.. IncreaseProcessedDocumentsCount(_totalDocumentCount); }
/// <summary> /// Begins the work. /// </summary> protected override void BeginWork() { BootParameters.ShouldNotBe(null); base.BeginWork(); _jobParameter = (AnalyticsProjectInfo) XmlUtility.DeserializeObject(BootParameters, typeof (AnalyticsProjectInfo)); _analyticProject = new AnalyticsProject(); _documentBachSize = Convert.ToInt32( ApplicationConfigurationManager.GetValue("IncludeDocumentsIntoProjectInSubSystemJobBatchSize", "AnalyticsProject")); _dataset = DataSetBO.GetDataSetDetailForDataSetId(Convert.ToInt64(_jobParameter.DatasetId, CultureInfo.CurrentCulture)); _jobParameter.DocumentSource.CollectionId = _dataset.CollectionId; _totalDocumentCount = _analyticProject.GetProjectDocumentsCountByTaskId( Convert.ToInt64(_jobParameter.MatterId, CultureInfo.CurrentCulture), _jobParameter.ProjectCollectionId, _jobParameter.PrimarySystemJobId); //Update job log initial state var jobSummaryKeyValuePairs = new EVKeyValuePairs(); JobMgmtBO.UpdateJobResult(WorkAssignment.JobId, 0, _totalDocumentCount, jobSummaryKeyValuePairs); if (_jobParameter.IsRerunJob || _jobParameter.IsAddAdditionalDocuments) //Rerun job or Add additional documents- need get to get existing IndexId ,if already created { _indexId = AnalyticsProject.GetIndexIdForProject(_jobParameter.MatterId, WorkAssignment.JobId, _dataset.CollectionId, _jobParameter.ProjectCollectionId,false); } if(string.IsNullOrEmpty(_indexId)) { _indexId = "idx-" + Guid.NewGuid().ToString().ToLowerInvariant(); _analyticProject.InsertIndexId(_jobParameter.MatterId, WorkAssignment.JobId, _dataset.CollectionId, _jobParameter.ProjectCollectionId, _indexId); } AnalyticsProject.CreateAnalyticalIndex(_jobParameter.MatterId, WorkAssignment.JobId, _indexId); //Create Index in Spark SVM.. IncreaseProcessedDocumentsCount(_totalDocumentCount); }