Пример #1
0
        public static bool AddRegexMatchs(DataConfig providerConfig, List <CaptureRegexMatch> matches)
        {
            bool isSaved = false;

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":
                List <TableOperation> ops   = new List <TableOperation>();
                CloudTable            table = Utils.GetCloudTable(providerConfig, AzureTableNames.RecordAssociationRegexMatch);

                foreach (POCO.CaptureRegexMatch match in matches)
                {
                    AzureRegexMatch az        = new AzureRegexMatch(match);
                    TableOperation  operation = TableOperation.InsertOrReplace(az);

                    Task tUpdate = table.ExecuteAsync(operation);
                    tUpdate.Wait();
                }

                break;

            case "internal.mongodb":

                IMongoCollection <MongoRegexMatchUpsert> collection = Utils.GetMongoCollection <MongoRegexMatchUpsert>(providerConfig, MongoTableNames.RecordAssociationRegexMatch);

                foreach (POCO.CaptureRegexMatch match in matches)
                {
                    MongoRegexMatchUpsert mongoObject = Utils.ConvertType <MongoRegexMatchUpsert>(match);


                    // Create the update filter
                    List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                    DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq");
                    DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq");
                    filters.Add(pkFilter);
                    filters.Add(rkFilter);
                    FilterDefinition <MongoRegexMatchUpsert> filter = Utils.GenerateMongoFilter <MongoRegexMatchUpsert>(filters);

                    // Create the upsert options
                    MongoDB.Driver.ReplaceOptions options = new ReplaceOptions();
                    options.IsUpsert = true;

                    // Upsert
                    collection.ReplaceOne(filter, mongoObject, options);
                }

                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            isSaved = true;
            return(isSaved);
        }
Пример #2
0
        public static List <POCO.FileProcessException> GetFileProcessException(DataConfig providerConfig, POCO.System system, string exceptionType)
        {
            string tableName = string.Empty;

            List <POCO.FileProcessException> docs = new List <POCO.FileProcessException>();

            // Create filter
            List <Filter> filters = new List <Filter>();
            Filter        pkfilt  = new Filter("PartitionKey", Utilities.Converters.CleanTableKey(exceptionType), "eq");

            filters.Add(pkfilt);

            switch (providerConfig.ProviderType)
            {
            case ProviderType.Azure:

                // Calcuate table name based on System Id and file process result (partitionkey)
                tableName = "stlpfileexception-" + Utilities.Converters.CleanTableKey(exceptionType) + "-" + system.SystemId.ToString();

                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureFileProcessException> azdata = new List <AzureFileProcessException>();
                AzureTableAdaptor <AzureFileProcessException> adaptor = new AzureTableAdaptor <AzureFileProcessException>();
                azdata = adaptor.ReadTableData(providerConfig, tableName, combinedFilter);

                foreach (var doc in azdata)
                {
                    docs.Add(doc.Value);
                }

                break;

            case ProviderType.Mongo:

                // Calcuate table name based on System Id and file process result (partitionkey)
                tableName = "fileexception-" + Utilities.Converters.CleanTableKey(exceptionType) + "-" + system.SystemId.ToString();

                var collection = Utils.GetMongoCollection <MongoFileProcessException_read>(providerConfig, tableName);

                FilterDefinition <MongoFileProcessException_read> filter = Utils.GenerateMongoFilter <MongoFileProcessException_read>(filters);

                var documents = collection.Find(filter).ToList();

                foreach (var doc in documents)
                {
                    docs.Add(doc);
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(docs);
        }
Пример #3
0
        public static List <POCO.SakaiFile> GetFiles(DataConfig providerConfig, List <Filter> filters, string thisPageId, int rowLimit, out string nextPageId)
        {
            nextPageId = string.Empty;
            List <POCO.SakaiFile> files = new List <POCO.SakaiFile>();

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                throw new NotImplementedException();

                break;

            case "internal.mongodb":
                var collection = Utils.GetMongoCollection <MongoSakaiFile>(providerConfig, MongoTableNames.SakaiFiles);

                // Add an _id filter if a page has been requested
                if (thisPageId != null && thisPageId != string.Empty)
                {
                    filters.Insert(0, new Filter("_id", thisPageId, "gt"));
                }

                FilterDefinition <MongoSakaiFile> filter = Utils.GenerateMongoFilter <MongoSakaiFile>(filters);

                //DEBUG output the filter values
                //foreach (Castlepoint.DataFactory.Filter debugFilter in filters)
                //{
                //    // Output the filter field names and values
                //    Console.WriteLine("DEBUG filter: " + debugFilter.FieldName + " : " + debugFilter.FieldValue);
                //}
                var documents = collection.Find(filter).Sort("{\"_id\":1}").Limit(rowLimit).ToList();


                foreach (var sakaifile in documents)
                {
                    files.Add(sakaifile);
                }

                // Get the next page id
                if (documents.Count == rowLimit)
                {
                    // Set the next page id
                    nextPageId = documents[documents.Count - 1]._id.ToString();
                }


                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(files);
        }
        public static List <POCO.BasecampProjectEntity> GetProjects(DataConfig providerConfig, POCO.System system, POCO.BasecampEntity basecampProject)
        {
            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", Utils.CleanTableKey(system.PartitionKey), "eq");
            Filter        rkFilter = new Filter("RowKey", Utils.CleanTableKey(basecampProject.url), "eq");

            filters.Add(pkFilter);
            filters.Add(rkFilter);

            return(BasecampProject.GetProjects(providerConfig, filters));
        }
Пример #5
0
        public static bool ForceRescan(DataConfig providerConfig, POCO.SakaiFile file)
        {
            POCO.FileBatchStatus batchstatus = new FileBatchStatus(file.PartitionKey, file.RowKey);
            batchstatus.BatchGuid             = Guid.Empty;
            batchstatus.BatchStatus           = string.Empty;
            batchstatus.JsonFileProcessResult = "{}";

            DataFactory.File.UpdateFileBatchStatus(providerConfig, batchstatus, "ntfsfiles");

            return(true);
        }
Пример #6
0
        public static List <POCO.LogClassification> GetLogClassification(DataConfig providerConfig, List <Filter> filters, string tableNameSuffix_yyyymm)
        {
            // Validate the YYYYMM suffix
            if (tableNameSuffix_yyyymm.Length != 6)
            {
                throw new ApplicationException("Invalid size for table suffix, must be YYYYMM");
            }

            string tableName = string.Empty;

            List <POCO.LogClassification> docs = new List <POCO.LogClassification>();

            switch (providerConfig.ProviderType)
            {
            case ProviderType.Azure:

                tableName = AzureTableNames.LogClassification + tableNameSuffix_yyyymm;

                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureLogClassification> azdata = new List <AzureLogClassification>();
                AzureTableAdaptor <AzureLogClassification> adaptor = new AzureTableAdaptor <AzureLogClassification>();
                azdata = adaptor.ReadTableData(providerConfig, tableName, combinedFilter);

                foreach (var doc in azdata)
                {
                    docs.Add(doc.Value);
                }

                break;

            case ProviderType.Mongo:

                tableName = MongoTableNames.LogClassification + tableNameSuffix_yyyymm;

                var collection = Utils.GetMongoCollection <MongoLogClassification_Read>(providerConfig, tableName);

                FilterDefinition <MongoLogClassification_Read> filter = Utils.GenerateMongoFilter <MongoLogClassification_Read>(filters);

                var documents = collection.Find(filter).ToList();

                foreach (var doc in documents)
                {
                    docs.Add(doc);
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(docs);
        }
Пример #7
0
        public static string AddSite(DataConfig providerConfig, POCO.System system, POCO.SakaiSite sourceSite)
        {
            POCO.SakaiSiteEntity siteEntity = new SakaiSiteEntity(Utils.CleanTableKey(system.PartitionKey), Utils.CleanTableKey(sourceSite.SITE_ID));
            siteEntity.CPStatus          = string.Empty;
            siteEntity.ItemCount         = 0;
            siteEntity.Name              = sourceSite.TITLE;
            siteEntity.ServerRelativeUrl = sourceSite.SITE_ID;
            siteEntity.TimeCreated       = sourceSite.CREATEDON.ToUniversalTime();
            siteEntity.TimeLastModified  = sourceSite.MODIFIEDON.ToUniversalTime();

            return(SakaiSite.AddSite(providerConfig, siteEntity));
        }
Пример #8
0
        public static List <POCO.SakaiSiteEntity> GetSites(DataConfig providerConfig, POCO.System system, POCO.SakaiSite site)
        {
            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", Utils.CleanTableKey(system.PartitionKey), "eq");

            filters.Add(pkFilter);
            Filter rkFilter = new Filter("RowKey", Utils.CleanTableKey(site.SITE_ID), "eq");

            filters.Add(rkFilter);

            return(SakaiSite.GetSites(providerConfig, filters));
        }
Пример #9
0
        public static List <POCO.RecordAuthorityFilter> GetAuthorityFilters(DataConfig providerConfig, POCO.RecordAuthorityFilter rafilt)
        {
            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", rafilt.PartitionKey, "eq");

            filters.Add(pkFilter);
            Filter rkFilter = new Filter("RowKey", rafilt.RowKey, "eq");

            filters.Add(rkFilter);

            return(GetAssignedForSystem(providerConfig, filters));
        }
Пример #10
0
        public static string AddProject(DataConfig providerConfig, POCO.System system, POCO.BasecampEntity sourceProject)
        {
            POCO.BasecampProjectEntity projectEntity = new BasecampProjectEntity(Utils.CleanTableKey(system.PartitionKey), Utils.CleanTableKey(sourceProject.url));
            projectEntity.CPStatus          = string.Empty;
            projectEntity.ItemCount         = 0;
            projectEntity.Name              = sourceProject.name;
            projectEntity.ServerRelativeUrl = sourceProject.url;
            projectEntity.TimeCreated       = sourceProject.created_at.ToUniversalTime();
            projectEntity.TimeLastModified  = sourceProject.updated_at.ToUniversalTime();

            return(BasecampProject.AddProject(providerConfig, projectEntity));
        }
Пример #11
0
        public static bool UpdateRecordConfig(DataConfig providerConfig, POCO.System system, string config)
        {
            bool isUpdatedOk = false;

            POCO.RecordConfigUpdate updateRecordCfg = new RecordConfigUpdate();
            updateRecordCfg.PartitionKey = system.PartitionKey;
            updateRecordCfg.RowKey       = system.RowKey;
            updateRecordCfg.JsonRecordIdentificationConfig = config;

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                AzureRecordIdentificationConfig az = new AzureRecordIdentificationConfig(updateRecordCfg);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, System.TableNames.System);
                TableOperation operation = TableOperation.InsertOrMerge(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                isUpdatedOk = true;

                break;

            case "internal.mongodb":
                IMongoCollection <MongoRecordIdentificationConfig> collection = Utils.GetMongoCollection <MongoRecordIdentificationConfig>(providerConfig, System.TableNames.System);
                //MongoSystemStatUpdate mongoObject = Utils.ConvertType<MongoSystemStatUpdate>(systemStat);

                // Create the update filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", updateRecordCfg.PartitionKey, "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", updateRecordCfg.RowKey, "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoRecordIdentificationConfig> filter = Utils.GenerateMongoFilter <MongoRecordIdentificationConfig>(filters);

                var update = Builders <MongoRecordIdentificationConfig> .Update
                             .Set("JsonRecordIdentificationConfig", updateRecordCfg.JsonRecordIdentificationConfig);

                // Update the batch status
                UpdateResult result = collection.UpdateOne(filter, update);

                isUpdatedOk = true;

                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }
            return(isUpdatedOk);
        }
Пример #12
0
        public static string AddFileProcessingException(DataConfig providerConfig, POCO.System system, POCO.FileProcessException fileException)
        {
            string tableName = string.Empty;

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                // Calcuate table name based on System Id and file process result (partitionkey)
                tableName = "stlpfileexception-" + fileException.PartitionKey + "-" + system.SystemId.ToString();

                AzureFileProcessException az = new AzureFileProcessException(fileException);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, tableName);
                TableOperation operation = TableOperation.InsertOrReplace(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                break;

            case "internal.mongodb":

                // Calcuate table name based on System Id and file process result (partitionkey)
                tableName = "fileexception-" + fileException.PartitionKey + "-" + system.SystemId.ToString();

                IMongoCollection <MongoFileProcessException> collection = Utils.GetMongoCollection <MongoFileProcessException>(providerConfig, tableName);
                MongoFileProcessException mongoObject = Utils.ConvertType <MongoFileProcessException>(fileException);

                // Create the upsert filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", fileException.PartitionKey, "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", fileException.RowKey, "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoFileProcessException> filter = Utils.GenerateMongoFilter <MongoFileProcessException>(filters);

                // Create the upsert options
                MongoDB.Driver.ReplaceOptions options = new ReplaceOptions();
                options.IsUpsert = true;

                // Upsert
                collection.ReplaceOne(filter, mongoObject, options);
                return(string.Empty);

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            //TODO return id of new object if supported
            return(string.Empty);
        }
Пример #13
0
        public static bool SetConfigEnabled(DataConfig providerConfig, string systemPartitionKey, string systemRowKey, bool enableSystem)
        {
            bool isUpdatedOk = false;

            POCO.SystemEnabledUpdate updateEnabled = new POCO.SystemEnabledUpdate();
            updateEnabled.PartitionKey = systemPartitionKey;
            updateEnabled.RowKey       = systemRowKey;
            updateEnabled.Enabled      = enableSystem;

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                AzureSystemEnabledUpdate az = new AzureSystemEnabledUpdate(updateEnabled);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, System.TableNames.System);
                TableOperation operation = TableOperation.InsertOrMerge(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                isUpdatedOk = true;

                break;

            case "internal.mongodb":
                IMongoCollection <MongoSystemEnabledUpdate> collection = Utils.GetMongoCollection <MongoSystemEnabledUpdate>(providerConfig, System.TableNames.System);

                // Create the update filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", updateEnabled.PartitionKey, "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", updateEnabled.RowKey, "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoSystemEnabledUpdate> filter = Utils.GenerateMongoFilter <MongoSystemEnabledUpdate>(filters);

                var update = Builders <MongoSystemEnabledUpdate> .Update
                             .Set("Enabled", updateEnabled.Enabled);

                // Update the batch status
                UpdateResult result = collection.UpdateOne(filter, update);

                isUpdatedOk = true;

                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }
            return(isUpdatedOk);
        }
        public static string AddMessage(DataConfig providerConfig, POCO.System system, POCO.GFIMailbox mailbox, POCO.GFIMessage message)
        {
            string partitionKey = system.PartitionKey;

            POCO.GFIMessageEntity docEntity = new GFIMessageEntity(Utils.CleanTableKey(partitionKey), Utils.CleanTableKey(message.MarcId.ToString()));
            docEntity.CPStatus          = string.Empty;
            docEntity.ItemCount         = 0;
            docEntity.Name              = message.Subject;
            docEntity.ServerRelativeUrl = string.Empty;
            docEntity.TimeCreated       = message.SentDate.ToUniversalTime();
            docEntity.TimeLastModified  = message.ReceivedDate.ToUniversalTime();

            return(GFIArchiverMailbox.AddMessage(providerConfig, docEntity));
        }
Пример #15
0
        public static void UpdateFileBatchStatus(DataConfig providerConfig, POCO.FileBatchStatus fileBatchStatus)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":
                DataFactory.File.UpdateFileBatchStatus(providerConfig, fileBatchStatus, AzureTableNames.EOMessage);
                break;

            case "internal.mongodb":
                DataFactory.File.UpdateFileBatchStatus(providerConfig, fileBatchStatus, MongoTableNames.EOMessage);
                break;
            }
            return;
        }
        public static void Delete(DataConfig providerConfig, POCO.ProcessingBatchStatus processingBatchStatus)
        {
            List <Filter> filters = new List <Filter>();
            Filter        pk      = new Filter("PartitionKey", processingBatchStatus.PartitionKey, "eq");

            filters.Add(pk);
            Filter rk = new Filter("RowKey", processingBatchStatus.RowKey, "eq");

            filters.Add(rk);
            Filter typekey = new Filter("ProcessType", processingBatchStatus.ProcessType, "eq");

            filters.Add(typekey);

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                AzureProcessingBatchStatus az = new AzureProcessingBatchStatus(processingBatchStatus);
                az.ETag = "*";

                CloudTable     table     = Utils.GetCloudTable(providerConfig, AzureTableNames.SystemProcessingStatus);
                TableOperation operation = TableOperation.Delete(az);

                Task <TableResult> tDelete = table.ExecuteAsync(operation);
                tDelete.Wait();

                // Check for "success with no status" code
                if (tDelete.Result.HttpStatusCode != 204)
                {
                    // TODO
                    bool isNotDeleted = true;
                }

                break;

            case "internal.mongodb":
                FilterDefinition <BsonDocument> filter = Utils.GenerateMongoFilter <BsonDocument>(filters);

                // Delete the rows
                IMongoCollection <BsonDocument> collection = Utils.GetMongoCollection <BsonDocument>(providerConfig, MongoTableNames.SystemProcessingStatus);
                DeleteResult result = collection.DeleteMany(filter);

                return;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }
            return;
        }
Пример #17
0
        public static string AddFile(DataConfig providerConfig, POCO.System system, POCO.BasecampEntity sourceProject, POCO.BasecampDocument sourceDocument)
        {
            string partitionKey = GetBasecampBucketUrl(sourceProject.url);

            POCO.BasecampDocumentEntity docEntity = new BasecampDocumentEntity(Utils.CleanTableKey(partitionKey), Utils.CleanTableKey(sourceDocument.url));
            docEntity.CPStatus          = string.Empty;
            docEntity.ItemCount         = 0;
            docEntity.Name              = sourceDocument.title;
            docEntity.ServerRelativeUrl = sourceDocument.url;
            docEntity.TimeCreated       = sourceDocument.created_at.ToUniversalTime();
            docEntity.TimeLastModified  = sourceDocument.updated_at.ToUniversalTime();
            docEntity.Title             = sourceDocument.title;

            return(BasecampProject.AddFile(providerConfig, docEntity));
        }
Пример #18
0
        public static void AddFileBatchStatus(DataConfig providerConfig, POCO.FileBatchStatus fileBatchStatus)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":
                DataFactory.File.AddFileBatchStatus(providerConfig, fileBatchStatus, AzureTableNames.EOMessageBatchStatus);
                break;

            case "internal.mongodb":
                DataFactory.File.AddFileBatchStatus(providerConfig, fileBatchStatus, MongoTableNames.EOMessageBatchStatus);
                break;
            }

            //TODO return id of new object if supported
            return;
        }
Пример #19
0
        public static List <POCO.CaptureRegexMatch> GetRegexMatches(DataConfig providerConfig, POCO.System system, string fileUri)
        {
            // Create a filter to match the Ontology provided
            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", system.PartitionKey, "eq");

            filters.Add(pkFilter);
            string rowkey = fileUri;
            Filter rk     = new Filter("RowKey", rowkey, "ge");

            filters.Add(rk);
            rk = new Filter("RowKey", Utils.GetLessThanFilter(rowkey), "lt");
            filters.Add(rk);

            return(GetRegexMatches(providerConfig, filters));
        }
Пример #20
0
        public static long PATCH003_UpdateSakaiFileItemUri(DataConfig providerConfig, POCO.SakaiFile file)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                throw new NotImplementedException();

                break;

            case "internal.mongodb":
                IMongoCollection <MongoSakaiUpdatePatch003> collection = Utils.GetMongoCollection <MongoSakaiUpdatePatch003>(providerConfig, MongoTableNames.SakaiFiles);

                // Create the update filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(file.PartitionKey), "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(file.RowKey), "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoSakaiUpdatePatch003> filter = Utils.GenerateMongoFilter <MongoSakaiUpdatePatch003>(filters);

                //string updateParam = "{$set: {ItemUri: '" + file.SourceRelativeUrl.ToString() + "'}}";
                //BsonDocument updateDoc = BsonDocument.Parse(updateParam);

                var updateDoc = Builders <MongoSakaiUpdatePatch003> .Update
                                .Set("ItemUri", file.SourceRelativeUrl.ToString());

                // Update the batch status
                UpdateResult result = collection.UpdateOne(filter, updateDoc);

                if (result.IsAcknowledged && result.IsModifiedCountAvailable)
                {
                    return(result.ModifiedCount);
                }
                else
                {
                    return(0);
                }

                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(0);
        }
Пример #21
0
        public static List <POCO.OntologyTermMatchResults> GetOntologyMatchSummary(DataConfig providerConfig, POCO.OntologyAssigned ontologyAssigned)
        {
            // Get the ontology terms for this ontology
            List <POCO.OntologyTerm> oTerms = DataFactory.Ontology.GetOntologyTerms(providerConfig, ontologyAssigned);

            // Convert the ontology terms into match results
            List <POCO.OntologyTermMatchResults> results = new List <POCO.OntologyTermMatchResults>();

            foreach (POCO.OntologyTerm term in oTerms)
            {
                // Create a matching result object
                POCO.OntologyTermMatchResults result = new POCO.OntologyTermMatchResults();
                result.PartitionKey = term.PartitionKey;
                result.RowKey       = term.RowKey;
                result.Term         = term.Term;
                result.ParentTerm   = term.ParentTerm;
                results.Add(result);
            }

            // Get all the ontology term matches for this ontology
            List <POCO.OntologyTermMatch> termMatches = DataFactory.Ontology.GetOntologyTermMatches(providerConfig, ontologyAssigned.RowKey);

            foreach (POCO.OntologyTermMatch match in termMatches)
            {
                // Find the Term Match Result for this Term Match
                POCO.OntologyTermMatchResults matchedResult = results.Find(r => r.PartitionKey == match.PartitionKey && r.RowKey == match.TermRowKey);
                if (matchedResult != null)
                {
                    matchedResult.NumRecordAssociationMatches++;
                }
            }

            //TODO use Linq to select all the parent nodes instead
            // Walk the ontology tree and sum child counts to parent counts
            for (int i = 0; i < results.Count; i++)
            {
                POCO.OntologyTermMatchResults result = results[i];
                // Find a parent ontology term (which has no ParentTerm = OntologyUri (PartitionKey))
                if (result.ParentTerm == result.PartitionKey)
                {
                    // Recursively sum the child terms up to its parent
                    SumOntologySummary(ref result, ref results);
                }
            }

            return(results);
        }
Пример #22
0
        public static void UpdateSentenceStat(DataConfig providerConfig, POCO.System system, List <POCO.SentenceStat> sentenceStats)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                AzureSentenceStatUpdate az = new AzureSentenceStatUpdate(system, sentenceStats);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, System.TableNames.System);
                TableOperation operation = TableOperation.InsertOrMerge(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                break;

            case "internal.mongodb":
                IMongoCollection <MongoSystemStatUpdate> collection = Utils.GetMongoCollection <MongoSystemStatUpdate>(providerConfig, "stlpsystems");
                //MongoSystemStatUpdate mongoObject = Utils.ConvertType<MongoSystemStatUpdate>(systemStat);

                // Create the update filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(system.PartitionKey), "eq");
                //DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(system.RowKey), "eq");
                filters.Add(pkFilter);
                //filters.Add(rkFilter);
                FilterDefinition <MongoSystemStatUpdate> filter = Utils.GenerateMongoFilter <MongoSystemStatUpdate>(filters);

                // Serialize the stats object
                string jsonStatsSerialized = JsonConvert.SerializeObject(sentenceStats);

                //string updateParam = "{$set: {JsonSentenceStats: '" + jsonStatsSerialized + "'}}";
                //BsonDocument updateDoc = BsonDocument.Parse(updateParam);

                var update = Builders <MongoSystemStatUpdate> .Update
                             .Set("JsonSentenceStats", jsonStatsSerialized);


                // Update the batch status
                UpdateResult result = collection.UpdateOne(filter, update);

                return;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }
            return;
        }
Пример #23
0
        public static POCO.OntologyMatchStatus GetOntologyMatchStatus(DataConfig providerConfig, RecordToRecordAssociation recassoc, OntologyAssigned ontology)
        {
            POCO.OntologyMatchStatus matchStatus = null;

            // Create a filter to match the Ontology provided
            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", ontology.RowKey, "eq");

            filters.Add(pkFilter);
            Filter rkFilter = new Filter("RowKey", recassoc.RowKey, "eq");

            filters.Add(rkFilter);

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":
                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureOntologyMatchStatus> azs             = new List <AzureOntologyMatchStatus>();
                AzureTableAdaptor <AzureOntologyMatchStatus> az = new AzureTableAdaptor <AzureOntologyMatchStatus>();
                azs = az.ReadTableData(providerConfig, AzureTableNames.OntologyMatchStatus, combinedFilter);

                if (azs.Count > 0)
                {
                    matchStatus = azs[0].Value;
                }

                return(matchStatus);

            case "internal.mongodb":
                var collection = Utils.GetMongoCollection <MongoOntologyMatchStatus>(providerConfig, MongoTableNames.OntologyMatchStatus);

                FilterDefinition <MongoOntologyMatchStatus> filter = Utils.GenerateMongoFilter <MongoOntologyMatchStatus>(filters);

                var documents = collection.Find(filter).ToList();
                if (documents.Count > 0)
                {
                    matchStatus = documents[0];
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(matchStatus);
        }
Пример #24
0
        public static List <POCO.SakaiFile> GetFiles(DataConfig providerConfig, List <Filter> filters)
        {
            List <POCO.SakaiFile> files = new List <POCO.SakaiFile>();

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureSakaiFile> azdata = new List <AzureSakaiFile>();
                AzureTableAdaptor <AzureSakaiFile> adaptor = new AzureTableAdaptor <AzureSakaiFile>();
                azdata = adaptor.ReadTableData(providerConfig, SakaiResource.AzureTableNames.SakaiFiles, combinedFilter);

                foreach (var doc in azdata)
                {
                    files.Add(doc.Value);
                }

                break;

            case "internal.mongodb":
                var collection = Utils.GetMongoCollection <MongoSakaiFile>(providerConfig, SakaiResource.MongoTableNames.SakaiFiles);
                FilterDefinition <MongoSakaiFile> filter = Utils.GenerateMongoFilter <MongoSakaiFile>(filters);

                //DEBUG output the filter values
                //foreach (Castlepoint.DataFactory.Filter debugFilter in filters)
                //{
                //    // Output the filter field names and values
                //    Console.WriteLine("DEBUG filter: " + debugFilter.FieldName + " : " + debugFilter.FieldValue);
                //}

                var documents = collection.Find(filter).ToList();

                foreach (var SakaiFile in documents)
                {
                    files.Add(SakaiFile);
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(files);
        }
Пример #25
0
        public static string AddFileProcessingLog(DataConfig providerConfig, POCO.LogFileProcessing log)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                // Cycle the tables each month to manage size
                string tableNameSuffix = DateTime.UtcNow.ToString(Utils.TableSuffixDateFormatYM);
                string tableName       = "stlplogfileprocessresult" + tableNameSuffix;

                AzureLogFileProcessing az = new AzureLogFileProcessing(log);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, tableName);
                TableOperation operation = TableOperation.InsertOrReplace(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                break;

            case "internal.mongodb":
                IMongoCollection <MongoLogFileProcessing> collection = Utils.GetMongoCollection <MongoLogFileProcessing>(providerConfig, "logfileprocessing");
                MongoLogFileProcessing mongoObject = Utils.ConvertType <MongoLogFileProcessing>(log);

                // Create the upsert filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", log.PartitionKey, "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", log.RowKey, "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoLogFileProcessing> filter = Utils.GenerateMongoFilter <MongoLogFileProcessing>(filters);

                // Create the upsert options
                MongoDB.Driver.ReplaceOptions options = new ReplaceOptions();
                options.IsUpsert = true;

                // Upsert
                collection.ReplaceOne(filter, mongoObject, options);
                return(string.Empty);

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            //TODO return id of new object if supported
            return(string.Empty);
        }
Пример #26
0
        public static List <POCO.RecordAuthorityFilter> GetAssignedForRecord(DataConfig providerConfig, POCO.Record record)
        {
            List <POCO.RecordAuthorityFilter> assignedrecordauthoritys = new List <POCO.RecordAuthorityFilter>();

            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", record.RowKey, "eq");

            filters.Add(pkFilter);

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureRecordAuthorityFilter> azdata = new List <AzureRecordAuthorityFilter>();
                AzureTableAdaptor <AzureRecordAuthorityFilter> adaptor = new AzureTableAdaptor <AzureRecordAuthorityFilter>();
                azdata = adaptor.ReadTableData(providerConfig, AzureTableNames.RecordsAssignedRecordsAuthority, combinedFilter);

                foreach (var doc in azdata)
                {
                    assignedrecordauthoritys.Add(doc.Value);
                }

                break;

            case "internal.mongodb":
                var collection = Utils.GetMongoCollection <MongoRecordAuthorityFilter>(providerConfig, MongoTableNames.RecordsAssignedRecordsAuthority);

                FilterDefinition <MongoRecordAuthorityFilter> filter = Utils.GenerateMongoFilter <MongoRecordAuthorityFilter>(filters);

                var documents = collection.Find(filter).ToList();

                foreach (var assignedra in documents)
                {
                    assignedrecordauthoritys.Add(assignedra);
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(assignedrecordauthoritys);
        }
Пример #27
0
        public static List <POCO.OntologyAssigned> GetRecordAssignedOntology(DataConfig providerConfig, POCO.Record record)
        {
            List <POCO.OntologyAssigned> ontologies = new List <POCO.OntologyAssigned>();

            List <Filter> filters      = new List <Filter>();
            Filter        systemFilter = new Filter("PartitionKey", Utils.CleanTableKey(record.PartitionKey), "eq");

            filters.Add(systemFilter);

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureOntologyAssigned> azs             = new List <AzureOntologyAssigned>();
                AzureTableAdaptor <AzureOntologyAssigned> az = new AzureTableAdaptor <AzureOntologyAssigned>();
                azs = az.ReadTableData(providerConfig, AzureTableNames.RecordsAssignedOntology, combinedFilter);

                foreach (var doc in azs)
                {
                    ontologies.Add(doc.Value);
                }

                return(ontologies);

            case "internal.mongodb":
                var collection = Utils.GetMongoCollection <MongoOntologyAssigned>(providerConfig, MongoTableNames.RecordsAssignedOntology);

                FilterDefinition <MongoOntologyAssigned> filter = Utils.GenerateMongoFilter <MongoOntologyAssigned>(filters);

                var documents = collection.Find(filter).ToList();

                foreach (var ontology in documents)
                {
                    ontologies.Add(ontology);
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(ontologies);
        }
Пример #28
0
        public static List <POCO.OntologyTerm> GetOntologyTerms(DataConfig providerConfig, string ontologyUri)
        {
            List <POCO.OntologyTerm> ontologyTerms = new List <POCO.OntologyTerm>();

            // Create a filter to match the Ontology provided
            List <Filter> filters  = new List <Filter>();
            Filter        pkFilter = new Filter("PartitionKey", ontologyUri, "eq");

            filters.Add(pkFilter);

            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":
                string combinedFilter = Utils.GenerateAzureFilter(filters);

                List <AzureOntologyTerm> azs             = new List <AzureOntologyTerm>();
                AzureTableAdaptor <AzureOntologyTerm> az = new AzureTableAdaptor <AzureOntologyTerm>();
                azs = az.ReadTableData(providerConfig, AzureTableNames.OntologyTerms, combinedFilter);

                foreach (var doc in azs)
                {
                    ontologyTerms.Add(doc.Value);
                }

                return(ontologyTerms);

            case "internal.mongodb":
                var collection = Utils.GetMongoCollection <MongoOntologyTerm>(providerConfig, MongoTableNames.OntologyTerms);

                FilterDefinition <MongoOntologyTerm> filter = Utils.GenerateMongoFilter <MongoOntologyTerm>(filters);

                var documents = collection.Find(filter).ToList();

                foreach (var ontology in documents)
                {
                    ontologyTerms.Add(ontology);
                }
                break;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            return(ontologyTerms);
        }
Пример #29
0
        //public static List<POCO.O365.SPFolder> GetFolders(DataConfig providerConfig, List<Filter> filters)
        //{
        //    List<POCO.O365.SPFolder> webInfo = new List<POCO.O365.SPFolder>();

        //    switch (providerConfig.ProviderType)
        //    {
        //        case "azure.tableservice":

        //            string combinedFilter = Utils.GenerateAzureFilter(filters);

        //            List<AzureSPFolder> azdata = new List<AzureSPFolder>();
        //            AzureTableAdaptor<AzureSPFolder> adaptor = new AzureTableAdaptor<AzureSPFolder>();
        //            azdata = adaptor.ReadTableData(providerConfig, AzureTableNames.SPFolder, combinedFilter);

        //            foreach (var doc in azdata)
        //            {
        //                webInfo.Add(doc.Value);
        //            }

        //            break;
        //        case "internal.mongodb":
        //            var collection = Utils.GetMongoCollection<MongoSPFolder>(providerConfig, MongoTableNames.SPFolder);

        //            FilterDefinition<MongoSPFolder> filter = Utils.GenerateMongoFilter<MongoSPFolder>(filters);

        //            //TODO paging
        //            var documents = collection.Find(filter).Sort("{\"_id\":1}").Limit(1000).ToList();

        //            foreach (var doc in documents)
        //            {
        //                webInfo.Add(doc);
        //            }
        //            break;
        //        default:
        //            throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
        //    }

        //    return webInfo;
        //}

        //public static void UpdateSPOWebInfoLastProcessed(DataConfig providerConfig, POCO.O365.SPOWebInfoEntity webInfo)
        //{
        //    switch (providerConfig.ProviderType)
        //    {
        //        case "azure.tableservice":
        //            AzureSPOWebInfoEntity az = new AzureSPOWebInfoEntity(webInfo);

        //            CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.SPOTracking);
        //            TableOperation operation = TableOperation.InsertOrMerge(az);
        //            Task tUpdate = table.ExecuteAsync(operation);
        //            tUpdate.Wait();

        //            break;

        //        case "internal.mongodb":
        //            IMongoCollection<MongoSPOWebInfoEntity> collection = Utils.GetMongoCollection<MongoSPOWebInfoEntity>(providerConfig, MongoTableNames.SPOTracking);
        //            MongoSPOWebInfoEntity mongoObject = Utils.ConvertType<MongoSPOWebInfoEntity>(webInfo);

        //            // Create the update filter
        //            List<DataFactory.Filter> filters = new List<DataFactory.Filter>();
        //            DataFactory.Filter pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq");
        //            DataFactory.Filter rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq");
        //            filters.Add(pkFilter);
        //            filters.Add(rkFilter);
        //            FilterDefinition<MongoSPOWebInfoEntity> filter = Utils.GenerateMongoFilter<MongoSPOWebInfoEntity>(filters);

        //            var update = Builders<MongoSPOWebInfoEntity>.Update
        //                .Set("LastItemModifiedDate", webInfo.LastItemModifiedDate)
        //                .Set("LastItemUserModifiedDate", webInfo.LastItemUserModifiedDate);

        //            // Update the batch status
        //            UpdateResult result = collection.UpdateOne(filter, update);

        //            return;

        //        default:
        //            throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
        //    }
        //    return;
        //}

        public static void UpdateEOFolder(DataConfig providerConfig, POCO.O365.EOFolderUpdate folderUpdate)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":
                AzureEOFolderUpdate az = new AzureEOFolderUpdate(folderUpdate);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, AzureTableNames.EOFolder);
                TableOperation operation = TableOperation.InsertOrMerge(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                break;

            case "internal.mongodb":
                IMongoCollection <MongoEOFolderUpdate> collection = Utils.GetMongoCollection <MongoEOFolderUpdate>(providerConfig, MongoTableNames.EOFolder);
                MongoEOFolderUpdate mongoObject = Utils.ConvertType <MongoEOFolderUpdate>(folderUpdate);

                // Create the update filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey(mongoObject.PartitionKey), "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(mongoObject.RowKey), "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoEOFolderUpdate> filter = Utils.GenerateMongoFilter <MongoEOFolderUpdate>(filters);

                var update = Builders <MongoEOFolderUpdate> .Update
                             .Set("TimeCreated", folderUpdate.TimeCreated)
                             .Set("TimeLastModified", folderUpdate.TimeLastModified)
                             .Set("ItemCount", folderUpdate.ItemCount)
                             .Set("Name", folderUpdate.Name)
                             .Set("CPFolderStatus", folderUpdate.CPFolderStatus);


                // Update the batch status
                UpdateResult result = collection.UpdateOne(filter, update);

                return;

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }
            return;
        }
Пример #30
0
        public static string AddLastAction(DataConfig providerConfig, POCO.LogLastAction lastAction)
        {
            switch (providerConfig.ProviderType)
            {
            case "azure.tableservice":

                AzureLastAction az = new AzureLastAction(lastAction);

                CloudTable     table     = Utils.GetCloudTable(providerConfig, AzureTableNames.LogLastAction);
                TableOperation operation = TableOperation.InsertOrReplace(az);
                Task           tUpdate   = table.ExecuteAsync(operation);
                tUpdate.Wait();

                break;

            case "internal.mongodb":
                IMongoCollection <MongoLastAction> collection = Utils.GetMongoCollection <MongoLastAction>(providerConfig, MongoTableNames.LogLastAction);
                MongoLastAction mongoObject = Utils.ConvertType <MongoLastAction>(lastAction);

                // Create the upsert filter
                List <DataFactory.Filter> filters  = new List <DataFactory.Filter>();
                DataFactory.Filter        pkFilter = new DataFactory.Filter("PartitionKey", Utils.CleanTableKey("LastAction"), "eq");
                DataFactory.Filter        rkFilter = new DataFactory.Filter("RowKey", Utils.CleanTableKey(lastAction.EventName), "eq");
                filters.Add(pkFilter);
                filters.Add(rkFilter);
                FilterDefinition <MongoLastAction> filter = Utils.GenerateMongoFilter <MongoLastAction>(filters);

                // Create the upsert options
                MongoDB.Driver.ReplaceOptions options = new ReplaceOptions();
                options.IsUpsert = true;

                // Upsert
                string debugfilter = Newtonsoft.Json.JsonConvert.SerializeObject(filters);
                string debugobject = Newtonsoft.Json.JsonConvert.SerializeObject(mongoObject);
                collection.ReplaceOne(filter, mongoObject, options);
                return(string.Empty);

            default:
                throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType);
            }

            //TODO return id of new object if supported
            return(string.Empty);
        }