public static void UpdateSiteStatus(DataFactory.DataConfig providerConfig, POCO.System system, POCO.SakaiSite site, string cpStatus) { switch (providerConfig.ProviderType) { case "azure.tableservice": throw new NotImplementedException(); break; case "internal.mongodb": var collection = Utils.GetMongoCollection <SakaiSiteEntity>(providerConfig, MongoTableNames.SakaiSites); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(system.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(site.SITE_ID), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <SakaiSiteEntity> filter = Utils.GenerateMongoFilter <SakaiSiteEntity>(filters); string updateParam = "{$set: {TimeCreated: '" + site.CREATEDON.ToUniversalTime().ToString(Utils.ISODateFormat) + "', TimeLastModified: '" + site.MODIFIEDON.ToUniversalTime().ToString(Utils.ISODateFormat) + "', CPStatus: '" + cpStatus + "'}}"; BsonDocument updateDoc = BsonDocument.Parse(updateParam); // Update the batch status collection.UpdateOne(filter, updateDoc); break; } }
public static void Patch001_UpdateRecordAssociationDataRowKey(DataConfig providerConfig, POCO.SakaiSite sakaiSite, SakaiContentResource sakaiResource, SakaiFile currentSakaiFile, string sakaiUIPath, string recordAssociationSuffix) { switch (providerConfig.ProviderType) { case "azure.tableservice": throw new NotImplementedException(); break; case "internal.mongodb": // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(currentSakaiFile.RowKey), "eq"); filters.Add(pkFilter); FilterDefinition <MongoSakaiUpdatePatch001> filter = Utils.GenerateMongoFilter <MongoSakaiUpdatePatch001>(filters); // Create the update string updateParam = "{$set: {RowKey: '" + Utils.CleanTableKey(sakaiUIPath) + "'}}"; BsonDocument updateDoc = BsonDocument.Parse(updateParam); // Update IMongoCollection <MongoSakaiUpdatePatch001> collection = Utils.GetMongoCollection <MongoSakaiUpdatePatch001>(providerConfig, "recordassociation" + recordAssociationSuffix); UpdateResult result = collection.UpdateMany(filter, updateDoc); break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return; }
public IActionResult GetItemStats() { // Create filters List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", "cpstat", "eq"); filters.Add(pkFilter); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", "fileslastupdated", "eq"); filters.Add(rkFilter); DataFactory.DataConfig datacfg = Utils.GetDataConfig(); List <POCO.Stat> stats = DataFactory.Stats.GetStats(datacfg, filters); //// Create the table if it doesn't exist. ////log.Info("Getting table reference"); //CloudTable table = Utils.GetCloudTable("stlpstats", _logger); //// Check for keys //string pkFilter = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "cpstat"); //string rkFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.Equal, "fileslastupdated"); //string combinedFilter = TableQuery.CombineFilters(pkFilter, TableOperators.And, rkFilter); //TableQuery<StatsEntity> query = new TableQuery<StatsEntity>().Where(combinedFilter); //List<StatsEntity> allStats = new List<StatsEntity>(); //TableContinuationToken token = null; //var runningQuery = new TableQuery<StatsEntity>() //{ // FilterString = query.FilterString, // SelectColumns = query.SelectColumns //}; //do //{ // runningQuery.TakeCount = query.TakeCount - allStats.Count; // Task<TableQuerySegment<StatsEntity>> tSeg = table.ExecuteQuerySegmentedAsync<StatsEntity>(runningQuery, token); // tSeg.Wait(); // token = tSeg.Result.ContinuationToken; // allStats.AddRange(tSeg.Result); //} while (token != null && (query.TakeCount == null || allStats.Count < query.TakeCount.Value)); //!ct.IsCancellationRequested && // Check if a single entry has been returned string entityAsJson = ""; if (stats.Count == 1) { StatFilesLastUpdated statsSorted = JsonConvert.DeserializeObject <StatFilesLastUpdated>(stats[0].JsonStats); statsSorted.stat.Sort((x, y) => DateTime.Compare(y.LastUpdatedDateTime, x.LastUpdatedDateTime)); entityAsJson = JsonConvert.SerializeObject(statsSorted); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public IActionResult GetLogEventProcessingByFilter([FromHeader] string filter) { string entityAsJson = ""; try { _logger.LogInformation("CPAPI: GetLogEventProcessingByFilter"); // Deserialize the ontology filter LogEventProcessingFilter oFilter = new LogEventProcessingFilter(); if (filter != null && filter.Length > 0) { _logger.LogDebug("Deserializing filter of length: " + filter.Length); oFilter = JsonConvert.DeserializeObject <LogEventProcessingFilter>(filter); } // Validate the filter data if (oFilter == null) { return(StatusCode((int)System.Net.HttpStatusCode.BadRequest)); } List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); // Check if an Event has been supplied if (oFilter.Event != null && oFilter.Event.Count == 0) { foreach (string e in oFilter.Event) { DataFactory.Filter eventfilter = new DataFactory.Filter("Event", e, "eq"); filters.Add(eventfilter); } } // Call the data factory DataFactory.DataConfig dataConfig = Utils.GetDataConfig(); List <POCO.LogEventProcessingTime> recordKeyPhrases = DataFactory.Log.GetProcessingTime(dataConfig, filters, oFilter.YearMonth); entityAsJson = JsonConvert.SerializeObject(recordKeyPhrases, Formatting.Indented); } catch (Exception ex) { string exceptionMsg = "Log EventProcessing GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public static bool AddRegexMatchs(DataConfig providerConfig, List <CaptureRegexMatch> matches) { bool isSaved = false; switch (providerConfig.ProviderType) { case "azure.tableservice": List <TableOperation> ops = new List <TableOperation>(); CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.RecordAssociationRegexMatch); foreach (POCO.CaptureRegexMatch match in matches) { AzureRegexMatch az = new AzureRegexMatch(match); TableOperation operation = TableOperation.InsertOrReplace(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); } break; case "internal.mongodb": IMongoCollection <MongoRegexMatchUpsert> collection = Utils.GetMongoCollection <MongoRegexMatchUpsert>(providerConfig, MongoTableNames.RecordAssociationRegexMatch); foreach (POCO.CaptureRegexMatch match in matches) { MongoRegexMatchUpsert mongoObject = Utils.ConvertType <MongoRegexMatchUpsert>(match); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoRegexMatchUpsert> filter = Utils.GenerateMongoFilter <MongoRegexMatchUpsert>(filters); // Create the upsert options MongoDB.Driver.ReplaceOptions options = new ReplaceOptions(); options.IsUpsert = true; // Upsert collection.ReplaceOne(filter, mongoObject, options); } break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } isSaved = true; return(isSaved); }
public static string UpdateAuditContentFileProcessStatus(DataConfig providerConfig, POCO.O365.AuditContentEntity contentFile) { switch (providerConfig.ProviderType) { // AZURE case "azure.tableservice": // Create an update object POCO.O365.AuditContentEntityUpdateStatus updateStatus = new AuditContentEntityUpdateStatus(); updateStatus.PartitionKey = contentFile.PartitionKey; updateStatus.RowKey = contentFile.RowKey; updateStatus.ProcessStatus = contentFile.ProcessStatus; AzureAuditContentEntityUpdateStatus az = new AzureAuditContentEntityUpdateStatus(updateStatus); az.ETag = "*"; CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.AuditContentFiles); TableOperation merge = TableOperation.InsertOrMerge(az); // Execute the insert operation. Task tResult = table.ExecuteAsync(merge); tResult.Wait(); break; // MONGO case "internal.mongodb": // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(contentFile.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(contentFile.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoAuditContentEntityUpdateStatus> filter = Utils.GenerateMongoFilter <MongoAuditContentEntityUpdateStatus>(filters); IMongoCollection <MongoAuditContentEntityUpdateStatus> collection = Utils.GetMongoCollection <MongoAuditContentEntityUpdateStatus>(providerConfig, MongoTableNames.AuditContentFiles); var update = Builders <MongoAuditContentEntityUpdateStatus> .Update .Set("ProcessStatus", contentFile.ProcessStatus); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); return(string.Empty); default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(string.Empty); }
public IActionResult GetByFilter([FromQuery] string filter) { string entityAsJson = ""; try { _logger.LogInformation("CPAPI: Get"); // Deserialize the ontology filter OntologyExplorerFilter oFilter = new OntologyExplorerFilter(); if (filter != null && filter.Length > 0) { _logger.LogDebug("Deserializing ontology filter of length: " + filter.Length); oFilter = JsonConvert.DeserializeObject <OntologyExplorerFilter>(filter); } List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); // Check for filters if (oFilter.ontology.Count > 0) { string combinedFilter = ""; foreach (string of in oFilter.ontology) { string cleanFilterPKey = Utils.CleanTableKey(of); DataFactory.Filter pkfilt = new DataFactory.Filter("PartitionKey", of, "eq"); filters.Add(pkfilt); } } List <POCO.OntologyMatchSummary> matchsummaries = new List <POCO.OntologyMatchSummary>(); matchsummaries = DataFactory.Ontology.GetOntologyMatchSummary(Utils.GetDataConfig(), filters); entityAsJson = JsonConvert.SerializeObject(matchsummaries, Formatting.Indented); } catch (Exception ex) { string exceptionMsg = "Ontology GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public static string AddFileProcessingException(DataConfig providerConfig, POCO.System system, POCO.FileProcessException fileException) { string tableName = string.Empty; switch (providerConfig.ProviderType) { case "azure.tableservice": // Calcuate table name based on System Id and file process result (partitionkey) tableName = "stlpfileexception-" + fileException.PartitionKey + "-" + system.SystemId.ToString(); AzureFileProcessException az = new AzureFileProcessException(fileException); CloudTable table = Utils.GetCloudTable(providerConfig, tableName); TableOperation operation = TableOperation.InsertOrReplace(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": // Calcuate table name based on System Id and file process result (partitionkey) tableName = "fileexception-" + fileException.PartitionKey + "-" + system.SystemId.ToString(); IMongoCollection <MongoFileProcessException> collection = Utils.GetMongoCollection <MongoFileProcessException>(providerConfig, tableName); MongoFileProcessException mongoObject = Utils.ConvertType <MongoFileProcessException>(fileException); // Create the upsert filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", fileException.PartitionKey, "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", fileException.RowKey, "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoFileProcessException> filter = Utils.GenerateMongoFilter <MongoFileProcessException>(filters); // Create the upsert options MongoDB.Driver.ReplaceOptions options = new ReplaceOptions(); options.IsUpsert = true; // Upsert collection.ReplaceOne(filter, mongoObject, options); return(string.Empty); default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(string.Empty); }
public static bool UpdateRecordConfig(DataConfig providerConfig, POCO.System system, string config) { bool isUpdatedOk = false; POCO.RecordConfigUpdate updateRecordCfg = new RecordConfigUpdate(); updateRecordCfg.PartitionKey = system.PartitionKey; updateRecordCfg.RowKey = system.RowKey; updateRecordCfg.JsonRecordIdentificationConfig = config; switch (providerConfig.ProviderType) { case "azure.tableservice": AzureRecordIdentificationConfig az = new AzureRecordIdentificationConfig(updateRecordCfg); CloudTable table = Utils.GetCloudTable(providerConfig, System.TableNames.System); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); isUpdatedOk = true; break; case "internal.mongodb": IMongoCollection <MongoRecordIdentificationConfig> collection = Utils.GetMongoCollection <MongoRecordIdentificationConfig>(providerConfig, System.TableNames.System); //MongoSystemStatUpdate mongoObject = Utils.ConvertType<MongoSystemStatUpdate>(systemStat); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", updateRecordCfg.PartitionKey, "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", updateRecordCfg.RowKey, "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoRecordIdentificationConfig> filter = Utils.GenerateMongoFilter <MongoRecordIdentificationConfig>(filters); var update = Builders <MongoRecordIdentificationConfig> .Update .Set("JsonRecordIdentificationConfig", updateRecordCfg.JsonRecordIdentificationConfig); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); isUpdatedOk = true; break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return(isUpdatedOk); }
public static bool SetConfigEnabled(DataConfig providerConfig, string systemPartitionKey, string systemRowKey, bool enableSystem) { bool isUpdatedOk = false; POCO.SystemEnabledUpdate updateEnabled = new POCO.SystemEnabledUpdate(); updateEnabled.PartitionKey = systemPartitionKey; updateEnabled.RowKey = systemRowKey; updateEnabled.Enabled = enableSystem; switch (providerConfig.ProviderType) { case "azure.tableservice": AzureSystemEnabledUpdate az = new AzureSystemEnabledUpdate(updateEnabled); CloudTable table = Utils.GetCloudTable(providerConfig, System.TableNames.System); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); isUpdatedOk = true; break; case "internal.mongodb": IMongoCollection <MongoSystemEnabledUpdate> collection = Utils.GetMongoCollection <MongoSystemEnabledUpdate>(providerConfig, System.TableNames.System); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", updateEnabled.PartitionKey, "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", updateEnabled.RowKey, "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoSystemEnabledUpdate> filter = Utils.GenerateMongoFilter <MongoSystemEnabledUpdate>(filters); var update = Builders <MongoSystemEnabledUpdate> .Update .Set("Enabled", updateEnabled.Enabled); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); isUpdatedOk = true; break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return(isUpdatedOk); }
public static long PATCH003_UpdateSakaiFileItemUri(DataConfig providerConfig, POCO.SakaiFile file) { switch (providerConfig.ProviderType) { case "azure.tableservice": throw new NotImplementedException(); break; case "internal.mongodb": IMongoCollection <MongoSakaiUpdatePatch003> collection = Utils.GetMongoCollection <MongoSakaiUpdatePatch003>(providerConfig, MongoTableNames.SakaiFiles); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(file.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(file.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoSakaiUpdatePatch003> filter = Utils.GenerateMongoFilter <MongoSakaiUpdatePatch003>(filters); //string updateParam = "{$set: {ItemUri: '" + file.SourceRelativeUrl.ToString() + "'}}"; //BsonDocument updateDoc = BsonDocument.Parse(updateParam); var updateDoc = Builders <MongoSakaiUpdatePatch003> .Update .Set("ItemUri", file.SourceRelativeUrl.ToString()); // Update the batch status UpdateResult result = collection.UpdateOne(filter, updateDoc); if (result.IsAcknowledged && result.IsModifiedCountAvailable) { return(result.ModifiedCount); } else { return(0); } break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return(0); }
public static void UpdateSentenceStat(DataConfig providerConfig, POCO.System system, List <POCO.SentenceStat> sentenceStats) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureSentenceStatUpdate az = new AzureSentenceStatUpdate(system, sentenceStats); CloudTable table = Utils.GetCloudTable(providerConfig, System.TableNames.System); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoSystemStatUpdate> collection = Utils.GetMongoCollection <MongoSystemStatUpdate>(providerConfig, "stlpsystems"); //MongoSystemStatUpdate mongoObject = Utils.ConvertType<MongoSystemStatUpdate>(systemStat); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(system.PartitionKey), "eq"); //DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(system.RowKey), "eq"); filters.Add(pkFilter); //filters.Add(rkFilter); FilterDefinition <MongoSystemStatUpdate> filter = Utils.GenerateMongoFilter <MongoSystemStatUpdate>(filters); // Serialize the stats object string jsonStatsSerialized = JsonConvert.SerializeObject(sentenceStats); //string updateParam = "{$set: {JsonSentenceStats: '" + jsonStatsSerialized + "'}}"; //BsonDocument updateDoc = BsonDocument.Parse(updateParam); var update = Builders <MongoSystemStatUpdate> .Update .Set("JsonSentenceStats", jsonStatsSerialized); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return; }
public static string AddFileProcessingLog(DataConfig providerConfig, POCO.LogFileProcessing log) { switch (providerConfig.ProviderType) { case "azure.tableservice": // Cycle the tables each month to manage size string tableNameSuffix = DateTime.UtcNow.ToString(Utils.TableSuffixDateFormatYM); string tableName = "stlplogfileprocessresult" + tableNameSuffix; AzureLogFileProcessing az = new AzureLogFileProcessing(log); CloudTable table = Utils.GetCloudTable(providerConfig, tableName); TableOperation operation = TableOperation.InsertOrReplace(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoLogFileProcessing> collection = Utils.GetMongoCollection <MongoLogFileProcessing>(providerConfig, "logfileprocessing"); MongoLogFileProcessing mongoObject = Utils.ConvertType <MongoLogFileProcessing>(log); // Create the upsert filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", log.PartitionKey, "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", log.RowKey, "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoLogFileProcessing> filter = Utils.GenerateMongoFilter <MongoLogFileProcessing>(filters); // Create the upsert options MongoDB.Driver.ReplaceOptions options = new ReplaceOptions(); options.IsUpsert = true; // Upsert collection.ReplaceOne(filter, mongoObject, options); return(string.Empty); default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(string.Empty); }
public IActionResult GetByMatchTerm([FromBody] FilterMatchTerms filter) { string entityAsJson; try { _logger.LogInformation("CPAPI: Get"); List <POCO.OntologyTermMatch> ontologyMatches = new List <POCO.OntologyTermMatch>(); DataFactory.DataConfig datacfg = Utils.GetDataConfig(); foreach (FilterMatchTerm filterterm in filter.filterterms) { List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); string cleanFilterPKey = Utils.CleanTableKey(filterterm.ontologyuri); DataFactory.Filter pkfilt = new DataFactory.Filter("PartitionKey", cleanFilterPKey, "eq"); filters.Add(pkfilt); DataFactory.Filter termfilt = new DataFactory.Filter("TermRowKey", filterterm.matchterm, "eq"); filters.Add(termfilt); // Filter on this term and add to our final list List <POCO.OntologyTermMatch> matches = DataFactory.Ontology.GetOntologyTermMatches(datacfg, filters); ontologyMatches.AddRange(matches); } entityAsJson = JsonConvert.SerializeObject(ontologyMatches, Formatting.Indented); } catch (Exception ex) { string exceptionMsg = "Ontology GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(new UnprocessableEntityObjectResult(exceptionMsg)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public static string AddLastAction(DataConfig providerConfig, POCO.LogLastAction lastAction) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureLastAction az = new AzureLastAction(lastAction); CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.LogLastAction); TableOperation operation = TableOperation.InsertOrReplace(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoLastAction> collection = Utils.GetMongoCollection <MongoLastAction>(providerConfig, MongoTableNames.LogLastAction); MongoLastAction mongoObject = Utils.ConvertType <MongoLastAction>(lastAction); // Create the upsert filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey("LastAction"), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(lastAction.EventName), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoLastAction> filter = Utils.GenerateMongoFilter <MongoLastAction>(filters); // Create the upsert options MongoDB.Driver.ReplaceOptions options = new ReplaceOptions(); options.IsUpsert = true; // Upsert string debugfilter = Newtonsoft.Json.JsonConvert.SerializeObject(filters); string debugobject = Newtonsoft.Json.JsonConvert.SerializeObject(mongoObject); collection.ReplaceOne(filter, mongoObject, options); return(string.Empty); default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(string.Empty); }
//public static List<POCO.O365.SPFolder> GetFolders(DataConfig providerConfig, List<Filter> filters) //{ // List<POCO.O365.SPFolder> webInfo = new List<POCO.O365.SPFolder>(); // switch (providerConfig.ProviderType) // { // case "azure.tableservice": // string combinedFilter = Utils.GenerateAzureFilter(filters); // List<AzureSPFolder> azdata = new List<AzureSPFolder>(); // AzureTableAdaptor<AzureSPFolder> adaptor = new AzureTableAdaptor<AzureSPFolder>(); // azdata = adaptor.ReadTableData(providerConfig, AzureTableNames.SPFolder, combinedFilter); // foreach (var doc in azdata) // { // webInfo.Add(doc.Value); // } // break; // case "internal.mongodb": // var collection = Utils.GetMongoCollection<MongoSPFolder>(providerConfig, MongoTableNames.SPFolder); // FilterDefinition<MongoSPFolder> filter = Utils.GenerateMongoFilter<MongoSPFolder>(filters); // //TODO paging // var documents = collection.Find(filter).Sort("{\"_id\":1}").Limit(1000).ToList(); // foreach (var doc in documents) // { // webInfo.Add(doc); // } // break; // default: // throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); // } // return webInfo; //} //public static void UpdateSPOWebInfoLastProcessed(DataConfig providerConfig, POCO.O365.SPOWebInfoEntity webInfo) //{ // switch (providerConfig.ProviderType) // { // case "azure.tableservice": // AzureSPOWebInfoEntity az = new AzureSPOWebInfoEntity(webInfo); // CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.SPOTracking); // TableOperation operation = TableOperation.InsertOrMerge(az); // Task tUpdate = table.ExecuteAsync(operation); // tUpdate.Wait(); // break; // case "internal.mongodb": // IMongoCollection<MongoSPOWebInfoEntity> collection = Utils.GetMongoCollection<MongoSPOWebInfoEntity>(providerConfig, MongoTableNames.SPOTracking); // MongoSPOWebInfoEntity mongoObject = Utils.ConvertType<MongoSPOWebInfoEntity>(webInfo); // // Create the update filter // List<DataFactory.Filter> filters = new List<DataFactory.Filter>(); // DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); // DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); // filters.Add(pkFilter); // filters.Add(rkFilter); // FilterDefinition<MongoSPOWebInfoEntity> filter = Utils.GenerateMongoFilter<MongoSPOWebInfoEntity>(filters); // var update = Builders<MongoSPOWebInfoEntity>.Update // .Set("LastItemModifiedDate", webInfo.LastItemModifiedDate) // .Set("LastItemUserModifiedDate", webInfo.LastItemUserModifiedDate); // // Update the batch status // UpdateResult result = collection.UpdateOne(filter, update); // return; // default: // throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); // } // return; //} public static void UpdateEOFolder(DataConfig providerConfig, POCO.O365.EOFolderUpdate folderUpdate) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureEOFolderUpdate az = new AzureEOFolderUpdate(folderUpdate); CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.EOFolder); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoEOFolderUpdate> collection = Utils.GetMongoCollection <MongoEOFolderUpdate>(providerConfig, MongoTableNames.EOFolder); MongoEOFolderUpdate mongoObject = Utils.ConvertType <MongoEOFolderUpdate>(folderUpdate); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoEOFolderUpdate> filter = Utils.GenerateMongoFilter <MongoEOFolderUpdate>(filters); var update = Builders <MongoEOFolderUpdate> .Update .Set("TimeCreated", folderUpdate.TimeCreated) .Set("TimeLastModified", folderUpdate.TimeLastModified) .Set("ItemCount", folderUpdate.ItemCount) .Set("Name", folderUpdate.Name) .Set("CPFolderStatus", folderUpdate.CPFolderStatus); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return; }
public static void UpdateFolder(DataConfig providerConfig, POCO.SakaiFolder ntfsFolder) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureSakaiFolder az = new AzureSakaiFolder(ntfsFolder); CloudTable table = Utils.GetCloudTable(providerConfig, SakaiResource.AzureTableNames.SakaiFolders); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoSakaiFolderUpsert> collection = Utils.GetMongoCollection <MongoSakaiFolderUpsert>(providerConfig, SakaiResource.MongoTableNames.SakaiFolders); MongoSakaiFolderUpsert mongoObject = Utils.ConvertType <MongoSakaiFolderUpsert>(ntfsFolder); // Create the upsert filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoSakaiFolderUpsert> filter = Utils.GenerateMongoFilter <MongoSakaiFolderUpsert>(filters); // Create the upsert options MongoDB.Driver.ReplaceOptions options = new ReplaceOptions(); options.IsUpsert = true; // Upsert collection.ReplaceOne(filter, mongoObject, options); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return; }
public static long Patch002_UpdateRecordAssociationDataRowKey(DataConfig providerConfig, string oldRowKey, string newRowKey, string recordAssociationTable) { long rowsUpdated = 0; switch (providerConfig.ProviderType) { case "azure.tableservice": throw new NotImplementedException(); break; case "internal.mongodb": // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(oldRowKey), "eq"); filters.Add(rkFilter); FilterDefinition <MongoSakaiUpdatePatch002> filter = Utils.GenerateMongoFilter <MongoSakaiUpdatePatch002>(filters); // Create the update string updateParam = "{$set: {RowKey: '" + Utils.CleanTableKey(newRowKey) + "'}}"; BsonDocument updateDoc = BsonDocument.Parse(updateParam); // Update IMongoCollection <MongoSakaiUpdatePatch002> collection = Utils.GetMongoCollection <MongoSakaiUpdatePatch002>(providerConfig, recordAssociationTable); UpdateResult result = collection.UpdateMany(filter, updateDoc); if (result.IsAcknowledged) { if (result.IsModifiedCountAvailable) { rowsUpdated = result.ModifiedCount; } } break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(rowsUpdated); }
public static void UpsertHashValueForFile(DataConfig providerConfig, POCO.FileHash hash) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureFileHash az = new AzureFileHash(hash); CloudTable table = Utils.GetCloudTable(providerConfig, POCO.TableNames.Azure.FileHashTableNames.FileHash); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoFileHash> collection = Utils.GetMongoCollection <MongoFileHash>(providerConfig, POCO.TableNames.Mongo.FileHashTableNames.FileHash); MongoFileHash mongoObject = Utils.ConvertType <MongoFileHash>(hash); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoFileHash> filter = Utils.GenerateMongoFilter <MongoFileHash>(filters); // Create the upsert options MongoDB.Driver.ReplaceOptions options = new ReplaceOptions(); options.IsUpsert = true; // Upsert collection.ReplaceOne(filter, mongoObject, options); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return; }
public static void UpdateFileBatchStatus(DataConfig providerConfig, POCO.FileBatchStatus fileBatchStatus, string tableName) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureFileBatchStatusUpdate az = new AzureFileBatchStatusUpdate(fileBatchStatus); CloudTable table = Utils.GetCloudTable(providerConfig, tableName); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoFileBatchStatusUpdate> collection = Utils.GetMongoCollection <MongoFileBatchStatusUpdate>(providerConfig, tableName); MongoFileBatchStatusUpdate mongoObject = Utils.ConvertType <MongoFileBatchStatusUpdate>(fileBatchStatus); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoFileBatchStatusUpdate> filter = Utils.GenerateMongoFilter <MongoFileBatchStatusUpdate>(filters); string updateParam = "{$set: {BatchGuid: '" + fileBatchStatus.BatchGuid.ToString() + "', BatchStatus: '" + fileBatchStatus.BatchStatus + "', JsonFileProcessResult: '" + fileBatchStatus.JsonFileProcessResult + "'}}"; BsonDocument updateDoc = BsonDocument.Parse(updateParam); // Update the batch status collection.UpdateOne(filter, updateDoc); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return; }
internal static void UpdateFileSize(DataConfig providerConfig, string tableName, POCO.CPFileSize fileSize) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureFileSize az = new AzureFileSize(fileSize); CloudTable table = Utils.GetCloudTable(providerConfig, tableName); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoFileMIMEType> collection = Utils.GetMongoCollection <MongoFileMIMEType>(providerConfig, tableName); //MongoSystemStatUpdate mongoObject = Utils.ConvertType<MongoSystemStatUpdate>(systemStat); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(fileSize.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(fileSize.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoFileMIMEType> filter = Utils.GenerateMongoFilter <MongoFileMIMEType>(filters); var update = Builders <MongoFileMIMEType> .Update .Set("SizeInBytes", fileSize.SizeInBytes); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return; }
public static void UpdateSPOWebInfoLastProcessed(DataConfig providerConfig, POCO.O365.SPOWebInfoEntity webInfo) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureSPOWebInfoEntity az = new AzureSPOWebInfoEntity(webInfo); CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.SPOTracking); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoSPOWebInfoEntity> collection = Utils.GetMongoCollection <MongoSPOWebInfoEntity>(providerConfig, MongoTableNames.SPOTracking); MongoSPOWebInfoEntity mongoObject = Utils.ConvertType <MongoSPOWebInfoEntity>(webInfo); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoSPOWebInfoEntity> filter = Utils.GenerateMongoFilter <MongoSPOWebInfoEntity>(filters); var update = Builders <MongoSPOWebInfoEntity> .Update .Set("LastItemModifiedDate", webInfo.LastItemModifiedDate) .Set("LastItemUserModifiedDate", webInfo.LastItemUserModifiedDate); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); return; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return; }
public static string UpdateRecordAuthorityMatch(DataConfig providerConfig, POCO.RecordAuthorityMatch raMatch) { switch (providerConfig.ProviderType) { case "azure.tableservice": AzureRecordAuthorityMatch az = new AzureRecordAuthorityMatch(raMatch); CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.RecordAssociations); TableOperation operation = TableOperation.InsertOrMerge(az); Task tUpdate = table.ExecuteAsync(operation); tUpdate.Wait(); break; case "internal.mongodb": IMongoCollection <MongoRecordAuthorityMatch> collection = Utils.GetMongoCollection <MongoRecordAuthorityMatch>(providerConfig, MongoTableNames.RecordAssociations); MongoRecordAuthorityMatch mongoObject = Utils.ConvertType <MongoRecordAuthorityMatch>(raMatch); // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(raMatch.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(raMatch.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoRecordAuthorityMatch> filter = Utils.GenerateMongoFilter <MongoRecordAuthorityMatch>(filters); // Replace current document collection.ReplaceOne(filter, mongoObject); return(string.Empty); default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(string.Empty); }
public static string UpdateAuditableEventFactors(DataConfig providerConfig, AuditLogEntry logEntry, string factors, string auditLookupType) { string tableName = string.Empty; switch (providerConfig.ProviderType) { // AZURE case "azure.tableservice": switch (auditLookupType) { case "byuser": { tableName = AzureTableNames.O365AuditLogEntryActionableByUser; break; } case "byday": { tableName = AzureTableNames.O365AuditLogEntryActionableByDay; break; } default: { tableName = AzureTableNames.O365AuditLogEntryActionable; break; } } // Create an factors update object matching our LogEntry POCO.O365.AuditLogEntryFactorsUpdate factorsObject = new AuditLogEntryFactorsUpdate(); factorsObject.PartitionKey = logEntry.PartitionKey; factorsObject.RowKey = logEntry.RowKey; factorsObject.Factors = factors; AzureAuditLogEntryFactorsUpdate az = new AzureAuditLogEntryFactorsUpdate(factorsObject); az.ETag = "*"; CloudTable table = Utils.GetCloudTable(providerConfig, tableName); TableOperation merge = TableOperation.InsertOrMerge(az); // Execute the insert operation. Task tResult = table.ExecuteAsync(merge); tResult.Wait(); break; // MONGO case "internal.mongodb": switch (auditLookupType) { case "byuser": { tableName = MongoTableNames.O365AuditLogEntryActionableByUser; break; } case "byday": { tableName = MongoTableNames.O365AuditLogEntryActionableByDay; break; } default: { tableName = MongoTableNames.O365AuditLogEntryActionable; break; } } // Create the update filter List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(logEntry.PartitionKey), "eq"); DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(logEntry.RowKey), "eq"); filters.Add(pkFilter); filters.Add(rkFilter); FilterDefinition <MongoAuditLogEntryFactorsUpdate> filter = Utils.GenerateMongoFilter <MongoAuditLogEntryFactorsUpdate>(filters); IMongoCollection <MongoAuditLogEntryFactorsUpdate> collection = Utils.GetMongoCollection <MongoAuditLogEntryFactorsUpdate>(providerConfig, tableName); var update = Builders <MongoAuditLogEntryFactorsUpdate> .Update .Set("Factors", logEntry.Factors); // Update the batch status UpdateResult result = collection.UpdateOne(filter, update); return(string.Empty); default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } //TODO return id of new object if supported return(string.Empty); }
public IActionResult GetByFilter([FromQuery] string filter) { string entityAsJson = ""; List <POCO.RecordAssociationNamedEntityReverse> namedEntities = new List <POCO.RecordAssociationNamedEntityReverse>(); try { _logger.LogInformation("CPAPI: Get By Named Entity Filter"); // Deserialize the ontology filter NamedEntityFilter oFilter = new NamedEntityFilter(); if (filter != null && filter.Length > 0) { _logger.LogDebug("Deserializing Named Entity filter of length: " + filter.Length); oFilter = JsonConvert.DeserializeObject <NamedEntityFilter>(filter); } List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); if (oFilter.namedentities.Count > 0) { foreach (string nefilter in oFilter.namedentities) { string cleanKey = Utils.CleanTableKey(nefilter); DataFactory.Filter pkFilterGE = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(cleanKey), "ge"); DataFactory.Filter pkFilterLT = new DataFactory.Filter("PartitionKey", Utils.GetLessThanFilter(cleanKey), "lt"); filters.Add(pkFilterGE); filters.Add(pkFilterLT); } } List <POCO.RecordAssociationNamedEntityReverse> namedentities = new List <POCO.RecordAssociationNamedEntityReverse>(); DataFactory.DataConfig dataConfig = Utils.GetDataConfig(); namedentities = DataFactory.NamedEntity.GetReverseNamedEntities(dataConfig, filters); //CloudTable table = Utils.GetCloudTableNoCreate("stlprecordassociationnamedentityreverse", _logger); //// Create a default query //TableQuery<NamedEntity> query = new TableQuery<NamedEntity>(); //if (oFilter.namedentities.Count > 0) //{ // string combinedFilter = ""; // foreach (string rif in oFilter.namedentities) // { // string cleanFilterPKey = Utils.CleanTableKey(rif); // string cleanFilterRKey = ""; // Utils.CleanTableKey(rif); // string pkqueryStart = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.GreaterThanOrEqual, cleanFilterPKey); // string pkqueryEnd = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.LessThan, Utils.GetLessThanFilter(cleanFilterPKey)); // string pkqueryCombined = TableQuery.CombineFilters(pkqueryStart, TableOperators.And, pkqueryEnd); // if (combinedFilter != "") // { // combinedFilter = TableQuery.CombineFilters(combinedFilter, TableOperators.And, pkqueryCombined); // } // else // { // combinedFilter = pkqueryCombined; // } // // Check if an item key has been provided // if (cleanFilterRKey != "") // { // string rkqueryStart = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, cleanFilterRKey); // string rkqueryEnd = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThan, Utils.GetLessThanFilter(cleanFilterRKey)); // string rkqueryCombined = TableQuery.CombineFilters(rkqueryStart, TableOperators.And, rkqueryEnd); // if (combinedFilter != "") // { // combinedFilter = TableQuery.CombineFilters(combinedFilter, TableOperators.Or, rkqueryCombined); // } // else // { // combinedFilter = rkqueryCombined; // } // } // } // // Create final combined query // query = new TableQuery<NamedEntity>().Where(combinedFilter); //} //List<NamedEntity> namedEntityEntities = new List<NamedEntity>(); //TableContinuationToken token = null; //var runningQuery = new TableQuery<NamedEntity>() //{ // FilterString = query.FilterString, // SelectColumns = query.SelectColumns //}; //do //{ // runningQuery.TakeCount = query.TakeCount - namedEntityEntities.Count; // Task<TableQuerySegment<NamedEntity>> tSeg = table.ExecuteQuerySegmentedAsync<NamedEntity>(runningQuery, token); // tSeg.Wait(); // token = tSeg.Result.ContinuationToken; // namedEntityEntities.AddRange(tSeg.Result); //} while (token != null && (query.TakeCount == null || namedEntityEntities.Count < query.TakeCount.Value) && namedEntityEntities.Count < 1000); //!ct.IsCancellationRequested && //namedEntityEntities.Sort((x, y) => String.Compare(x.RowKey, y.RowKey)); // Return only distinct keyphrases List <string> namedEntityKeys = new List <string>(); foreach (POCO.RecordAssociationNamedEntityReverse ne in namedentities) { if (!namedEntityKeys.Contains(ne.PartitionKey)) { // Add the entity to the output // and the key namedEntities.Add(ne); namedEntityKeys.Add(ne.PartitionKey); } } namedEntities.Sort((x, y) => String.Compare(x.PartitionKey, y.PartitionKey)); // Serialize entityAsJson = JsonConvert.SerializeObject(namedEntities, Formatting.Indented); } catch (Exception ex) { string exceptionMsg = "Named Entity GET BY FILTER exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
private List <DataFactory.Filter> GetActionableEventsDataFilter(ActionableEventsFilter eventsFilter) { List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); if (eventsFilter == null) { return(filters); } // Check which filter type string filterType = GetActionableEventsDataFilterType(eventsFilter); string fileFieldName = string.Empty; string dateFieldName = string.Empty; string userFieldName = string.Empty; switch (filterType) { case "byfile": fileFieldName = "PartitionKey"; dateFieldName = "CreationTime"; userFieldName = "UserId"; break; case "bydate": fileFieldName = "RowKey"; dateFieldName = "PartitionKey"; userFieldName = "UserId"; break; case "byuser": fileFieldName = "RowKey"; dateFieldName = "CreationTime"; userFieldName = "PartitionKey"; break; default: break; } // Check if a file path has been provided if (eventsFilter.filepath != null && eventsFilter.filepath.Trim() != string.Empty) { DataFactory.Filter fileFilter = new DataFactory.Filter(fileFieldName, eventsFilter.filepath.Trim(), "ge"); filters.Add(fileFilter); fileFilter = new DataFactory.Filter(fileFieldName, Utils.CleanTableKey(Utils.GetLessThanFilter(eventsFilter.filepath.Trim())), "lt"); filters.Add(fileFilter); } // Check if both date ranges have been provided if ((eventsFilter.datefrom != null && eventsFilter.datefrom.Trim() != string.Empty) && (eventsFilter.dateto != null && eventsFilter.dateto.Trim() != string.Empty)) { // Check if the dates are the same if (eventsFilter.datefrom.Trim() == eventsFilter.dateto.Trim()) { // Set an equal filter for the date DataFactory.Filter date = new DataFactory.Filter(dateFieldName, Utils.CleanTableKey(eventsFilter.datefrom.Trim()), "ge"); filters.Add(date); DataFactory.Filter toDate = new DataFactory.Filter(dateFieldName, Utils.GetLessThanFilter(Utils.CleanTableKey(eventsFilter.dateto.Trim())), "lt"); filters.Add(toDate); } else { // Set a range filter DataFactory.Filter fromDate = new DataFactory.Filter(dateFieldName, Utils.CleanTableKey(eventsFilter.datefrom.Trim()), "ge"); filters.Add(fromDate); DataFactory.Filter toDate = new DataFactory.Filter(dateFieldName, Utils.CleanTableKey(eventsFilter.dateto.Trim()), "le"); filters.Add(toDate); } } // Check if either date ranges have been provided else { // Check if From date is set if (eventsFilter.datefrom != null && eventsFilter.datefrom.Trim() != string.Empty) { // Only set a ge filter DataFactory.Filter fromDate = new DataFactory.Filter(dateFieldName, Utils.CleanTableKey(eventsFilter.datefrom.Trim()), "ge"); filters.Add(fromDate); } // Check if To date is set if (eventsFilter.dateto != null && eventsFilter.dateto.Trim() != string.Empty) { // Only set a ge filter DataFactory.Filter toDate = new DataFactory.Filter(dateFieldName, Utils.CleanTableKey(eventsFilter.dateto.Trim()), "le"); filters.Add(toDate); } } foreach (string userid in eventsFilter.userids) { if (userid.Trim() != string.Empty) { DataFactory.Filter userFilter = new DataFactory.Filter(userFieldName, Utils.CleanTableKey(userid.Trim()), "eq"); filters.Add(userFilter); } } foreach (string operation in eventsFilter.operations) { if (operation.Trim() != string.Empty) { DataFactory.Filter opFilter = new DataFactory.Filter("Operation", operation, "eq"); filters.Add(opFilter); } } foreach (string workload in eventsFilter.workloads) { if (workload.Trim() != string.Empty) { DataFactory.Filter wlFilter = new DataFactory.Filter("Workload", workload, "eq"); filters.Add(wlFilter); } } return(filters); }
public IActionResult GetBySensitiveFilter([FromQuery] string filter) { string entityAsJson = ""; try { _logger.LogInformation("CPAPI: GetBySensitiveFilter"); // Deserialize the sensitive data filter SensitiveFilter oFilter = new SensitiveFilter(); if (filter != null && filter.Length > 0) { string filterDecoded = System.Net.WebUtility.HtmlDecode(filter); filterDecoded = System.Net.WebUtility.UrlDecode(filterDecoded); _logger.LogDebug("Deserializing filter of length: " + filterDecoded.Length); oFilter = JsonConvert.DeserializeObject <SensitiveFilter>(filterDecoded); } // Create filters List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); if (oFilter.types.Count > 0) { foreach (string type in oFilter.types) { string typeKey = Utils.CleanTableKey(type); //cleanFilterRKey = System.Web.HttpUtility.UrlEncode(cleanFilterRKey); //cleanFilterRKey = cleanFilterRKey.Replace("&", "&"); if (typeKey != "") { DataFactory.Filter f1 = new DataFactory.Filter("CaptureType", typeKey, "eq"); filters.Add(f1); } } } else { _logger.LogInformation("Sensitive Data query BLANK"); } DataFactory.DataConfig dataConfig = Utils.GetDataConfig(); List <POCO.CaptureRegexMatch> captureMatches = DataFactory.Ontology.GetRegexMatches(dataConfig, filters); // Sort the data by descending score captureMatches = captureMatches.OrderByDescending(x => x.ConfidenceScore).ToList(); entityAsJson = JsonConvert.SerializeObject(captureMatches, Formatting.Indented); } catch (Exception ex) { string exceptionMsg = "Capture GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public IActionResult GetDeleted([FromQuery] string filter, [FromHeader] string CPDataPaging) { int pageCount = 0; string entityAsJson = ""; try { _logger.LogInformation("CPAPI: GetDeleted"); // Get any Paging data requests POCO.DataPaging dataPaging = new POCO.DataPaging(); if (CPDataPaging != null && CPDataPaging != string.Empty) { _logger.LogDebug("Deserializing datapaging of length: " + CPDataPaging.Length); string pagingDecoded = System.Net.WebUtility.HtmlDecode(CPDataPaging); pagingDecoded = System.Net.WebUtility.UrlDecode(pagingDecoded); dataPaging = JsonConvert.DeserializeObject <POCO.DataPaging>(pagingDecoded); } DataFactory.DataConfig dataConfig = Utils.GetDataConfig(); List <POCO.O365.AuditLogEntry> logEntries = new List <POCO.O365.AuditLogEntry>(); List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); string nextPageId = string.Empty; DataFactory.Filter deleteFilter = new DataFactory.Filter("Operation", "FileDeleted", "eq"); logEntries = DataFactory.O365.AuditLog.GetActionableEvents(dataConfig, string.Empty, filters, string.Empty, Utils.GetMaxRows(), out nextPageId); // Get the last 3 months of log data by default //DateTime today = DateTime.UtcNow; //for (int i = 0; i < 2; i++) //{ // // Calculate the suffix of the table // DateTime tableSuffixDate = today.AddMonths(-1 * i); // string tableSuffix = tableSuffixDate.ToString(Utils.TableSuffixDateFormatYM); // // Add the range for the month to our final collection // List<POCO.O365.AuditLogEntry> monthEntries = DataFactory.O365.AuditLog.GetActionableEvents(dataConfig, tableSuffix); // logEntries.AddRange(monthEntries); //} // Sort the data by descending date logEntries.Sort((x, y) => DateTime.Compare(y.CreationDateTime, x.CreationDateTime)); // Check if a data page has been specified PagedData paged = new PagedData(); if (logEntries.Count > 0 && dataPaging.page > 0 && dataPaging.perPage > 0) { List <POCO.O365.AuditLogEntry> pageOfData = new List <POCO.O365.AuditLogEntry>(); // Check that the requested data is in range dataPaging.page--; // pages are zero-based to calculate the correct range int startOfPage = (dataPaging.page * dataPaging.perPage); if (logEntries.Count > startOfPage + dataPaging.perPage - 1) { pageOfData = logEntries.GetRange(startOfPage, dataPaging.perPage); } else { _logger.LogError("Data paging request out of range"); return(StatusCode((int)System.Net.HttpStatusCode.BadRequest)); } paged.data = pageOfData; paged.totalRecords = logEntries.Count; entityAsJson = JsonConvert.SerializeObject(paged, Formatting.Indented); } else { paged.data = logEntries; paged.totalRecords = logEntries.Count; entityAsJson = JsonConvert.SerializeObject(paged, Formatting.Indented); } } catch (Exception ex) { string exceptionMsg = "Record GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public IActionResult GetDeletedByItemFilter([FromQuery] string filter, [FromHeader] string CPDataPaging) { int pageCount = 0; string entityAsJson = ""; try { _logger.LogInformation("CPAPI: GetDeletedByItemFilter"); // Deserialize the ontology filter RecordFilter oFilter = new RecordFilter(); if (filter != null && filter.Length > 0) { _logger.LogDebug("Deserializing Item filter of length: " + filter.Length); oFilter = JsonConvert.DeserializeObject <RecordFilter>(filter); } // Get any Paging data requests POCO.DataPaging dataPaging = new POCO.DataPaging(); if (CPDataPaging != null && CPDataPaging != string.Empty) { _logger.LogDebug("Deserializing datapaging of length: " + CPDataPaging.Length); string pagingDecoded = System.Net.WebUtility.HtmlDecode(CPDataPaging); pagingDecoded = System.Net.WebUtility.UrlDecode(pagingDecoded); dataPaging = JsonConvert.DeserializeObject <POCO.DataPaging>(pagingDecoded); } // Create filters List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); DataFactory.Filter deleteFilter = new DataFactory.Filter("Operation", "FileDeleted", "eq"); if (oFilter.records.Count > 0) { foreach (RecordItemFilter rif in oFilter.records) { string cleanFilterPKey = Utils.CleanTableKey(rif.itemuri); //cleanFilterRKey = System.Web.HttpUtility.UrlEncode(cleanFilterRKey); //cleanFilterRKey = cleanFilterRKey.Replace("&", "&"); if (cleanFilterPKey != "") { DataFactory.Filter f1 = new DataFactory.Filter("PartitionKey", cleanFilterPKey, "ge"); filters.Add(f1); DataFactory.Filter f2 = new DataFactory.Filter("PartitionKey", Utils.GetLessThanFilter(cleanFilterPKey), "lt"); filters.Add(f2); } } } else { _logger.LogInformation("RecordFilter query BLANK"); } DataFactory.DataConfig dataConfig = Utils.GetDataConfig(); List <POCO.O365.AuditLogEntry> logEntries = new List <POCO.O365.AuditLogEntry>(); string nextPageId = string.Empty; logEntries = DataFactory.O365.AuditLog.GetActionableEvents(dataConfig, string.Empty, filters, dataPaging.thisPageId, Utils.GetMaxRows(), out nextPageId); // Get the last 3 months of log data by default //DateTime today = DateTime.UtcNow; //for (int i = 0; i < 2; i++) //{ // // Calculate the suffix of the table // DateTime tableSuffixDate = today.AddMonths(-1 * i); // string tableSuffix = tableSuffixDate.ToString(Utils.TableSuffixDateFormatYM); // // Add the range for the month to our final collection // List<POCO.O365.AuditLogEntry> monthEntries = DataFactory.O365.AuditLog.GetActionableEventsForItem(dataConfig, filters, tableSuffix); // logEntries.AddRange(monthEntries); //} // Sort the data by descending date logEntries.Sort((x, y) => DateTime.Compare(y.CreationDateTime, x.CreationDateTime)); // Check if a data page has been specified PagedData paged = new PagedData(); if (logEntries.Count > 0) { List <POCO.O365.AuditLogEntry> pageOfData = new List <POCO.O365.AuditLogEntry>(); // Check that the requested data is in range dataPaging.page--; // pages are zero-based to calculate the correct range int startOfPage = (dataPaging.page * dataPaging.perPage); if (logEntries.Count > startOfPage + dataPaging.perPage - 1) { pageOfData = logEntries.GetRange(startOfPage, dataPaging.perPage); } else { _logger.LogError("Data paging request out of range"); return(StatusCode((int)System.Net.HttpStatusCode.BadRequest)); } // Set the PageCount Response Header //decimal totalPages = (decimal)(logEntries.Count / dataPaging.perPage); //pageCount = (int)Math.Ceiling(totalPages); paged.data = pageOfData; paged.totalRecords = logEntries.Count; entityAsJson = JsonConvert.SerializeObject(paged, Formatting.Indented); } else { paged.data = logEntries; paged.totalRecords = logEntries.Count; entityAsJson = JsonConvert.SerializeObject(paged, Formatting.Indented); } } catch (Exception ex) { string exceptionMsg = "Record GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }
public IActionResult GetByItemFilter([FromQuery] string filter) { string entityAsJson = ""; try { _logger.LogInformation("CPAPI: GetByItemFilter"); // Deserialize the ontology filter RecordFilter oFilter = new RecordFilter(); if (filter != null && filter.Length > 0) { string filterDecoded = System.Net.WebUtility.HtmlDecode(filter); filterDecoded = System.Net.WebUtility.UrlDecode(filterDecoded); _logger.LogDebug("Deserializing Item filter of length: " + filterDecoded.Length); oFilter = JsonConvert.DeserializeObject <RecordFilter>(filterDecoded); } // Create filters List <DataFactory.Filter> filters = new List <DataFactory.Filter>(); if (oFilter.records.Count > 0) { foreach (RecordItemFilter rif in oFilter.records) { string cleanFilterPKey = Utils.CleanTableKey(rif.itemuri); //cleanFilterRKey = System.Web.HttpUtility.UrlEncode(cleanFilterRKey); //cleanFilterRKey = cleanFilterRKey.Replace("&", "&"); if (cleanFilterPKey != "") { DataFactory.Filter f1 = new DataFactory.Filter("PartitionKey", cleanFilterPKey, "eq"); filters.Add(f1); } } } else { _logger.LogInformation("RecordFilter query BLANK"); } DataFactory.DataConfig dataConfig = Utils.GetDataConfig(); // Get the last 3 months of log data by default List <POCO.O365.AuditLogEntry> logEntries = new List <POCO.O365.AuditLogEntry>(); DateTime today = DateTime.UtcNow; for (int i = 0; i < 2; i++) { // Calculate the suffix of the table DateTime tableSuffixDate = today.AddMonths(-1 * i); string tableSuffix = tableSuffixDate.ToString(Utils.TableSuffixDateFormatYM); // Add the range for the month to our final collection List <POCO.O365.AuditLogEntry> monthEntries = DataFactory.O365.AuditLog.GetForItem(dataConfig, filters, tableSuffix); logEntries.AddRange(monthEntries); } // Sort the data by descending date logEntries.Sort((x, y) => DateTime.Compare(y.CreationDateTime, x.CreationDateTime)); entityAsJson = JsonConvert.SerializeObject(logEntries, Formatting.Indented); } catch (Exception ex) { string exceptionMsg = "Record GET exception: " + ex.Message; //log.Info("Exception occurred extracting text from uploaded file \r\nError: " + ex.Message); if (ex.InnerException != null) { exceptionMsg = exceptionMsg + "[" + ex.InnerException.Message + "]"; } _logger.LogError(exceptionMsg); return(StatusCode((int)System.Net.HttpStatusCode.InternalServerError)); } ObjectResult result = new ObjectResult(entityAsJson); return(result); }