/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { // flush the cache and return success MemCacheD.FlushAll(); Response.data = JSONRPC.success; return(true); }
/// <summary> /// Execute /// </summary> /// <param name="theAdo"></param> /// <param name="theCubeDTO"></param> /// <param name="theReleaseDto"></param> /// <param name="theResponse"></param> /// <returns></returns> static internal bool ExecuteReadMetadata(ADO theAdo, Cube_DTO_Read theCubeDTO, Release_DTO theReleaseDto, JSONRPC_Output theResponse) { var ado = new Cube_ADO(theAdo); // The matrix constructor will load all the metadata from the db when instances specification var theMatrix = new Matrix(theAdo, theReleaseDto, theCubeDTO.language).ApplySearchCriteria(theCubeDTO); theMatrix.FormatType = theCubeDTO.Format.FrmType; theMatrix.FormatVersion = theCubeDTO.Format.FrmVersion; if (theMatrix == null) { theResponse.data = null; return(true); } var jsonStat = theMatrix.GetJsonStatObject(); theResponse.data = new JRaw(Serialize.ToJson(jsonStat)); if (theReleaseDto.RlsLiveDatetimeTo != null) { MemCacheD.Store_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadMetadata", theCubeDTO, theResponse.data, theReleaseDto.RlsLiveDatetimeTo, Resources.Constants.C_CAS_DATA_CUBE_READ_METADATA + theMatrix.Code); } else { MemCacheD.Store_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadMetadata", theCubeDTO, theResponse.data, new DateTime(), Resources.Constants.C_CAS_DATA_CUBE_READ_METADATA + theMatrix.Code); } return(true); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { if (DTO.Format.FrmDirection != FormatDirection.DOWNLOAD.ToString()) { return(false); } ////See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadPreMetadata", DTO); if (cache.hasData) { Response.data = cache.data; return(true); } var item = new Matrix_ADO(Ado).Read(DTO.release, DTO.language, SamAccountName); var result = Release_ADO.GetReleaseDTO(item); if (result == null) { Response.data = null; return(true); } DTO.language = item.LngIsoCode; return(Cube_BSO_ReadMetadata.ExecuteReadMetadata(Ado, DTO, result, Response, false)); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { //so that caches don't get mixed up.. DTO.meta = _meta; //See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadCollection", DTO); if (cache.hasData) { Response.data = cache.data; return(true); } if (Throttle_BSO.IsThrottled(Ado, HttpContext.Current.Request, Request, SamAccountName)) { Log.Instance.Debug("Request throttled"); Response.error = Label.Get("error.throttled"); } Cube_BSO cBso = new Cube_BSO(); // cache store is done in the following function Response.data = cBso.ExecuteReadCollection(Ado, DTO, _meta); return(true); }
/// <summary> /// If an update has taken place, we must flush the caches for all associated matrixes (because the contact details are part of the matrix) /// </summary> /// <param name="Ado"></param> /// <param name="dto"></param> private void FlushAssociatedMatrixes(ADO Ado, Group_DTO_Update dto) { Matrix_ADO mAdo = new Matrix_ADO(Ado); //Get all the matrixes for Group var readGroupAccess = mAdo.ReadByGroup(dto.GrpCodeOld, Configuration_BSO.GetCustomConfig(ConfigType.global, "language.iso.code")); if (!readGroupAccess.hasData) { return; } //look maybe at ensuring there are no dupes (or maybe a switch to first return only live data..) foreach (var matrix in readGroupAccess.data) { //if (matrix.IsLive) //{ MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_DATASET + matrix.MtrCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_METADATA + matrix.MtrCode); // } } //foreach (var matrix in readGroupAccess.data) //{ // MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_DATASET + matrix.RlsCode); // MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_METADATA + matrix.RlsCode); //} }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { //See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadCollection", DTO); Navigation_ADO adoNav = new Navigation_ADO(Ado); //If we're cacheing, we only want the cache to live until the next scheduled release goes live //Also, if there is a next release before the scheduled cache expiry time, then we won't trust the cache var nextRelease = adoNav.ReadNextLiveDate(DateTime.Now); DateTime nextReleaseDate = default; if (nextRelease.hasData) { if (!nextRelease.data[0].NextRelease.Equals(DBNull.Value)) { nextReleaseDate = Convert.ToDateTime(nextRelease.data[0].NextRelease); } } if (cache.hasData && nextReleaseDate >= cache.expiresAt) { Response.data = cache.data; return(true); } Cube_BSO cBso = new Cube_BSO(); // cache store is done in the following function Response.data = cBso.ExecuteReadCollection(Ado, DTO); return(true); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { DTO.CcnUsername = SamAccountName; if (!IsUserAuthenticated()) { return(true); } JSONRPC_Output response = new JSONRPC_Output(); //The cache key is created to be unique to a given CcnUsername. MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Security", "Account_API", "ReadCurrentAccesss", DTO.CcnUsername); if (cache.hasData) { Response.data = cache.data; return(true); } Account_BSO bso = new Account_BSO(); ADO_readerOutput output = bso.ReadCurrentAccess(Ado, DTO.CcnUsername); if (!output.hasData) { Log.Instance.Debug("No Account data found"); return(false); } Response.data = output.data; return(true); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { Release_ADO adoRelease = new Release_ADO(Ado); Release_DTO dtoRelease = Release_ADO.GetReleaseDTO(adoRelease.Read(DTO.RlsCode, SamAccountName)); DTO.MtrCode = dtoRelease.MtrCode; //We can do this now because the MtrCode is available to us MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_DATASET + DTO.MtrCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_METADATA + DTO.MtrCode); int updated; if (dtoRelease.CmmCode == 0) { dtoRelease.CmmValue = DTO.CmmValue; updated = adoRelease.CreateComment(dtoRelease, SamAccountName); } else { dtoRelease.CmmValue = DTO.CmmValue; updated = adoRelease.UpdateComment(dtoRelease, SamAccountName); } if (updated == 0) { Log.Instance.Debug("Failed to update Release Comment"); Response.error = Label.Get("error.update"); return(false); } Response.data = JSONRPC.success; return(true); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { if (DTO.Format.FrmDirection != FormatDirection.DOWNLOAD.ToString()) { return(false); } ////See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadMetadata", DTO); if (cache.hasData) { Response.data = cache.data; return(true); } if (Throttle_BSO.IsThrottled(Ado, HttpContext.Current.Request, Request, SamAccountName)) { Log.Instance.Debug("Request throttled"); Response.error = Label.Get("error.throttled"); } var items = new Release_ADO(Ado).ReadLiveNow(DTO.matrix, DTO.language); var result = Release_ADO.GetReleaseDTO(items); if (result == null) { Response.data = null; return(true); } //The Language of the received data may be different from the request - so we make sure it corresponds to the language of the metadata DTO.language = items.LngIsoCode; return(ExecuteReadMetadata(Ado, DTO, result, Response)); }
/// <summary> /// Success /// </summary> protected override void OnExecutionSuccess() { Log.Instance.Debug("Record Read"); //If a cache DTO exists, then we have been given a directive to store the results in the cache if (cDTO != null) { MemCacheD.Store_BSO <dynamic>(cDTO.Namespace, cDTO.ApiName, cDTO.Method, DTO, Response.data, cDTO.TimeLimit, cDTO.Cas + cDTO.Domain); } }
internal static bool UpdateConfigFromFiles() { dynamic serverConfig = Utility.JsonDeserialize_IgnoreLoopingReference(File.ReadAllText(HttpContext.Current.Server.MapPath(Utility.GetCustomConfig("APP_CONFIG_RESOURCES_MAP_PATH")) + Utility.GetCustomConfig("APP_CONFIG_SERVER_JSON_PATH"))); dynamic globalConfig = Utility.JsonDeserialize_IgnoreLoopingReference(File.ReadAllText(HttpContext.Current.Server.MapPath(Utility.GetCustomConfig("APP_CONFIG_RESOURCES_MAP_PATH")) + Utility.GetCustomConfig("APP_CONFIG_GLOBAL_JSON_PATH"))); MemCacheD.Store_BSO("PxStat.Security", "Configuration", "Read", ConfigType.global.ToString(), globalConfig, default(DateTime)); MemCacheD.Store_BSO("PxStat.Security", "Configuration", "Read", ConfigType.server.ToString(), serverConfig, default(DateTime)); return(true); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { var adoSubject = new Subject_ADO(Ado); int nUpdatedSubjectId = 0; //We can't allow duplicate named Subjects, so we must check first if (adoSubject.UpdateExists(DTO)) { Response.error = Label.Get("error.duplicate"); return(false); } if (DTO.LngIsoCode != Configuration_BSO.GetCustomConfig(ConfigType.global, "language.iso.code")) { SubjectLanguage_BSO subjectLanguageBso = new SubjectLanguage_BSO(); nUpdatedSubjectId = subjectLanguageBso.CreateOrUpdate(DTO, Ado); if (nUpdatedSubjectId == 0) { Log.Instance.Debug("Update of SubjectLanguage failed"); Response.error = Label.Get("error.update"); return(false); } } else { nUpdatedSubjectId = adoSubject.Update(DTO, SamAccountName); } if (nUpdatedSubjectId == 0) { Log.Instance.Debug("Update of Subject failed"); Response.error = Label.Get("error.update"); return(false); } //We must now delete all of the keywords for the subject Keyword_Subject_BSO_Mandatory kbBso = new Keyword_Subject_BSO_Mandatory(); int nchanged = kbBso.Delete(Ado, DTO, true); if (nchanged == 0) { Log.Instance.Debug("No keywords deleted"); } //We can now recreate the keywords for the subject kbBso.Create(Ado, DTO, nUpdatedSubjectId); //Reset the relevant caches MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_NAVIGATION_SEARCH); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_NAVIGATION_READ); Response.data = JSONRPC.success; return(true); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { Navigation_ADO adoNav = new Navigation_ADO(Ado); //If we're cacheing, we only want the cache to live until the next scheduled release goes live //Also, if there is a next release before the scheduled cache expiry time, then we won't trust the cache var nextRelease = adoNav.ReadNextLiveDate(DateTime.Now); DateTime nextReleaseDate = default; if (nextRelease.hasData) { if (!nextRelease.data[0].NextRelease.Equals(DBNull.Value)) { nextReleaseDate = Convert.ToDateTime(nextRelease.data[0].NextRelease); } } //Read the cached value for this if it's available but only if there isn't a next release before the cache expiry time MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.System.Navigation", "Navigation_API", "Read", DTO); if (cache.hasData && nextReleaseDate >= cache.expiresAt) { Response.data = cache.data; return(true); } //No cache available, so read from the database and cache that result ADO_readerOutput result = adoNav.Read(DTO); if (result.hasData) { List <dynamic> formattedOutput = formatOutput(result.data); if (nextRelease != null) { if (nextRelease.hasData) { if (!nextRelease.data[0].NextRelease.Equals(DBNull.Value)) { nextReleaseDate = Convert.ToDateTime(nextRelease.data[0].NextRelease); } } } Response.data = formattedOutput; MemCacheD.Store_BSO <dynamic>("PxStat.System.Navigation", "Navigation_API", "Read", DTO, Response.data, nextReleaseDate, Resources.Constants.C_CAS_NAVIGATION_READ); return(true); } return(false); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { Navigation_ADO adoNav = new Navigation_ADO(Ado); Navigation_BSO nBso = new Navigation_BSO(Ado, DTO); bool wordSearch = false; if (!String.IsNullOrEmpty(DTO.Search)) { DTO.SearchTerms = nBso.PrepareSearchData(); wordSearch = true; } //DTO.search has too much variation to use as a cache key - the search terms are formatted in the table so we don't need it any more DTO.Search = ""; MemCachedD_Value cache = MemCacheD.Get_BSO("PxStat.System.Navigation", "Navigation_API", "Search", DTO); // if (cache.hasData) { Response.data = cache.data; return(true); } if (wordSearch) { Response.data = nBso.RunWordSearch(); } else { Response.data = nBso.RunEntitySearch(); } DateTime minDateItem = default; Release_ADO rAdo = new Release_ADO(Ado); dynamic dateQuery = rAdo.ReadNextReleaseDate(); if (dateQuery != null) { minDateItem = dateQuery.RlsDatetimeNext.Equals(DBNull.Value) ? default(DateTime) : dateQuery.RlsDatetimeNext; } else { minDateItem = default; } MemCacheD.Store_BSO("PxStat.System.Navigation", "Navigation_API", "Search", DTO, Response.data, minDateItem, Resources.Constants.C_CAS_NAVIGATION_SEARCH); return(true); }
/// <summary> /// Success /// </summary> protected override void OnExecutionSuccess() { Log.Instance.Debug("Record updated"); //See if there's a cache in the process. If so then we need to flush the cache. if (MethodReader.MethodHasAttribute(Request.method, "CacheFlush")) { cDTO = new CacheMetadata("CacheFlush", Request.method, DTO); foreach (Cas cas in cDTO.CasList) { MemCacheD.CasRepositoryFlush(cas.CasRepository + cas.Domain); } } }
/// <summary> /// On success /// </summary> protected override void OnExecutionSuccess() { Log.Instance.Debug("Record deleted"); //See if there's a cache in the process. If so then we need to flush the cache. if (MethodReader.MethodHasAttribute(Request.method, Utility.GetCustomConfig("APP_CACHE_FLUSH_ATTRIBUTE"))) { cDTO = new CacheMetadata(Utility.GetCustomConfig("APP_CACHE_FLUSH_ATTRIBUTE"), Request.method, DTO); foreach (Cas cas in cDTO.CasList) { MemCacheD.CasRepositoryFlush(cas.CasRepository + cas.Domain); } } }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { GeoMap_ADO gAdo = new GeoMap_ADO(Ado); if (gAdo.Delete(DTO, SamAccountName) == 0) { Response.error = Label.Get("error.create"); return(false); } MemCacheD.Remove_BSO <dynamic>("PxStat.Data", "GeoMap_BSO_Read", "Read", DTO.GmpCode); Response.data = JSONRPC.success; return(true); }
internal Static_Output Read(Static_API staticRequest) { Static_Output output = new Static_Output() { }; output.mimeType = "application/json"; ////See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "GeoMap_BSO_Read", "Read", staticRequest.parameters[1]); if (cache.hasData) { output.response = cache.data; output.statusCode = HttpStatusCode.OK; return(output); } using (GeoMap_BSO bso = new GeoMap_BSO(new ADO("defaultConnection"))) { try { var dataRead = bso.Read(staticRequest.parameters[1]); if (dataRead.hasData) { output.statusCode = HttpStatusCode.OK; GeoJson readGeoJson = JsonConvert.DeserializeObject <GeoJson>(dataRead.data[0].GmpGeoJson); MemCacheD.Store_BSO("PxStat.Data", "GeoMap_BSO_Read", "Read", staticRequest.parameters[1], readGeoJson, default(DateTime)); output.response = Utility.JsonSerialize_IgnoreLoopingReference(JsonConvert.DeserializeObject <GeoJson>(dataRead.data[0].GmpGeoJson)); } else { output.statusCode = HttpStatusCode.NotFound; } } catch (Exception ex) { Log.Instance.Error(ex.Message); Log.Instance.Error(ex.StackTrace); output.statusCode = HttpStatusCode.InternalServerError; } }; return(output); }
/// <summary> /// The Subscriber Key Cache is used for throttling traffic /// This method refreshes the Subscriber Key Cache /// </summary> /// <param name="ado"></param> internal void RefreshSubscriberKeyCache(ADO ado) { Subscriber_ADO sAdo = new Subscriber_ADO(ado); var response = sAdo.ReadSubscriberKeys(); if (response.Count > 0) { List <string> sList = new List <string>(); foreach (var item in response) { sList.Add(item.SbrKey); } MemCacheD.Store_BSO("PxStat.Subscription", "Subscriber_BSO", "RefreshSubscriberKeyCache", "RefreshSubscriberKeyCache", sList, default(DateTime)); } }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { if (DTO.jStatQueryExtension.extension.Pivot != null) { if (DTO.jStatQueryExtension.extension.Format.Type != "CSV" && DTO.jStatQueryExtension.extension.Format.Type != "XLSX") { DTO.jStatQueryExtension.extension.Pivot = null; } } //if the role details haven't been supplied then look it up from the metadata in the database if (DTO.Role == null) { DTO.Role = new Cube_BSO().UpdateRoleFromMetadata(Ado, DTO); } //The Language of the received data may be different from the request - so we make sure it corresponds to the language of the dataset (???) var items = new Release_ADO(Ado).ReadLiveNow(DTO.jStatQueryExtension.extension.Matrix, DTO.jStatQueryExtension.extension.Language.Code); ////See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadDataset", DTO); if (cache.hasData) { Response.data = cache.data; return(true); } if (Throttle_BSO.IsThrottled(Ado, HttpContext.Current.Request, Request, SamAccountName)) { Log.Instance.Debug("Request throttled"); Response.error = Label.Get("error.throttled"); } var result = Release_ADO.GetReleaseDTO(items); if (result == null) { Response.data = null; return(true); } var data = ExecuteReadDataset(Ado, DTO, result, Response, DTO.jStatQueryExtension.extension.Language.Code, DTO.jStatQueryExtension.extension.Language.Culture, defaultPivot); return(data); }
internal ADO_readerOutput ReadCurrentAccess(ADO Ado, string ccnUsername) { //Validation of parameters and user have been successful. We may now proceed to read from the database var adoAccount = new Account_ADO(); ADO_readerOutput result = adoAccount.Read(Ado, ccnUsername); if (result.hasData) { // Set the cache based on the data returned MemCacheD.Store_BSO <dynamic>("PxStat.Security", "Account_API", "ReadCurrentAccesss", ccnUsername, result.data, new DateTime()); return(result); } Log.Instance.Debug("No Account data found"); return(result); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { //if the role details haven't been supplied then look it up from the metadata in the database if (DTO.Role == null) { DTO.Role = new Cube_BSO().UpdateRoleFromMetadata(Ado, DTO); } //The Language of the received data may be different from the request - so we make sure it corresponds to the language of the dataset (???) var items = new Release_ADO(Ado).ReadLiveNow(DTO.jStatQueryExtension.extension.Matrix, DTO.jStatQueryExtension.extension.Language.Code); /* string requestLanguage = DTO.language; * DTO.language = items.LngIsoCode; */ ////See if this request has cached data MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadDataset", DTO); if (cache.hasData) { Response.data = cache.data; return(true); } var result = Release_ADO.GetReleaseDTO(items); if (result == null) { Response.data = null; return(true); } var data = ExecuteReadDataset(Ado, DTO, result, Response, DTO.jStatQueryExtension.extension.Language.Code, DTO.jStatQueryExtension.extension.Language.Culture); return(data); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { string webconfig = AppDomain.CurrentDomain.BaseDirectory + "Web.config"; // To get the memcached server data from web.config because not part of appsettings XDocument config = XDocument.Load(webconfig); var port = config.Descendants("enyim.com")?.Descendants("servers")?.Descendants("add")?.Attributes("port"); var address = config.Descendants("enyim.com")?.Descendants("servers")?.Descendants("add")?.Attributes("address"); ServerStats stats = MemCacheD.GetStats(); if (stats == null) { return(false); } Dictionary <string, string> result = new Dictionary <string, string>(); if (!(port == null || address == null))//if port or address are null return false otherwise get the stats of the first server { for (int i = 0; i < Enum.GetNames(typeof(StatItem)).Length; i++) { // takes first item from each of address and port var value = stats.GetRaw(new IPEndPoint(IPAddress.Parse(address.First().Value.ToString()), Int32.Parse(port.First().Value)), (StatItem)i); string key = ((StatItem)i).ToString(); result.Add(key, value); } Response.data = result; return(true); } else { return(false); } }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { Release_ADO adoRelease = new Release_ADO(Ado); Release_DTO dtoRelease = Release_ADO.GetReleaseDTO(adoRelease.Read(DTO.RlsCode, SamAccountName)); DTO.MtrCode = dtoRelease.MtrCode; //We can do this now because the MtrCode is available to us MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_DATASET + DTO.MtrCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_METADATA + DTO.MtrCode); bool historicalTest = adoRelease.IsHistorical(DTO.RlsCode); int deleted = 0; if (!historicalTest) { deleted = adoRelease.DeleteComment(dtoRelease, SamAccountName); } if (deleted == 0) { //Can't create a comment so we can't proceed Log.Instance.Debug("Can't delete comment - request refused"); Response.error = Label.Get("error.delete"); return(false); } Response.data = JSONRPC.success; return(true); }
/// <summary> /// Get the collection with metadata /// </summary> /// <param name="theAdo"></param> /// <param name="theCubeDTO"></param> /// <param name="theResponse"></param> /// <returns></returns> internal dynamic ExecuteReadCollection(ADO theAdo, Cube_DTO_ReadCollection DTO) { var ado = new Cube_ADO(theAdo); var dbData = ado.ReadCollectionMetadata(DTO.language, DTO.datefrom, DTO.product); List <dynamic> jsonStatCollection = new List <dynamic>(); //Get a list of individual matrix data entities List <dynamic> releases = getReleases(dbData); var theJsonStatCollection = new JsonStatCollection(); theJsonStatCollection.Link = new JsonStatCollectionLink(); theJsonStatCollection.Link.Item = new List <Item>(); List <Format_DTO_Read> formats = new List <Format_DTO_Read>(); using (Format_BSO format = new Format_BSO(new ADO("defaultConnection"))) { formats = format.Read(new Format_DTO_Read() { FrmDirection = Utility.GetCustomConfig("APP_FORMAT_DOWNLOAD_NAME") }); }; //For each of these, get a list of statistics and a list of classifications //Then get the JSON-stat for that metadata and add to jsonStatCollection foreach (var rls in releases) { List <dynamic> thisReleaseMetadata = dbData.Where(x => x.RlsCode == rls.RlsCode).Where(x => x.LngIsoCode == rls.LngIsoCode).ToList <dynamic>(); List <dynamic> stats = getStatistics(thisReleaseMetadata); List <dynamic> classifications = getClassifications(thisReleaseMetadata); List <dynamic> periods = getPeriods(thisReleaseMetadata); theJsonStatCollection.Link.Item.Add(GetJsonStatRelease(thisReleaseMetadata, stats, classifications, periods, formats)); } //Get the minimum next release date. The cache can only live until then. //If there's no next release date then the cache will live for the maximum configured amount. DateTime minDateItem = default; dynamic minimum = null; if (dbData != null) { minimum = dbData.Where(x => x.RlsLiveDatetimeFrom > DateTime.Now).Min(x => x.RlsLiveDatetimeFrom); minDateItem = minimum ?? default(DateTime); } if (minDateItem < DateTime.Now) { minDateItem = default(DateTime); } var result = new JRaw(Serialize.ToJson(theJsonStatCollection)); MemCacheD.Store_BSO <dynamic>("PxStat.Data", "Cube_API", "ReadCollection", DTO, result, minDateItem, Constants.C_CAS_DATA_CUBE_READ_COLLECTION); // return the formatted data. This is an array of JSON-stat objects. return(result); }
internal JSONRPC_Output WorkflowSignoffCreate(ADO Ado, WorkflowSignoff_DTO DTO, string SamAccountName) { JSONRPC_Output response = new JSONRPC_Output(); ADO_readerOutput moderators = new ADO_readerOutput(); ADO_readerOutput powerUsers = new ADO_readerOutput(); var adoWorkflowRequest = new WorkflowRequest_ADO(); var adoWorkflowResponse = new WorkflowResponse_ADO(); Release_DTO dtoWip = null; if (!adoWorkflowResponse.IsInUse(Ado, DTO)) // is current workflow -- this should be the response!! { //No Live workflow found so we can't proceed Log.Instance.Debug("No Current workflow response found for this Release Code"); response.error = Label.Get("error.create"); return(response); } //Is this awaiting signoff? var adoWorkflow = new Workflow_ADO(); ADO_readerOutput resultStatus = adoWorkflow.ReadAwaitingSignoff(Ado, SamAccountName, DTO.RlsCode, Configuration_BSO.GetCustomConfig(ConfigType.global, "language.iso.code")); if (!resultStatus.hasData) { //Release not awaiting signoff so we can't proceed Log.Instance.Debug("Release not in status Awaiting Signoff"); response.error = Label.Get("error.update"); return(response); } Security.ActiveDirectory_DTO signoffUser = new Security.ActiveDirectory_DTO() { CcnUsername = SamAccountName }; Security.ActiveDirectory_ADO accAdo = new Security.ActiveDirectory_ADO(); Security.Account_DTO_Read accDto = new Security.Account_DTO_Read() { CcnUsername = signoffUser.CcnUsername }; DTO.SignoffAccount = accAdo.GetUser(Ado, accDto); var adoSignoff = new WorkflowSignoff_ADO(); //Create a comment var adoComment = new Comment_ADO(); int commentCode = adoComment.Create(Ado, DTO, SamAccountName); if (commentCode == 0) { // Can't create a comment so we can't proceed Log.Instance.Debug("Can't create a comment "); response.error = Label.Get("error.create"); return(response); } DTO.CmmCode = commentCode; //We must read the Request and in order to see how we are going to proceed WorkflowRequest_ADO adoWrq = new WorkflowRequest_ADO(); List <WorkflowRequest_DTO> dtoWrqList = adoWrq.Read(Ado, DTO.RlsCode, true); if (dtoWrqList.Count > 1) { //Multiple requests found for this release Log.Instance.Debug("More than one request found for this release "); response.error = Label.Get("error.create"); return(response); } //there must be exactly one live Workflow request at this point WorkflowRequest_DTO dtoWrq = dtoWrqList.Find(x => x.RqsCode != null); //Get the current Release Release_ADO adoRelease = new Release_ADO(Ado); Release_DTO dtoRelease = Release_ADO.GetReleaseDTO(adoRelease.Read(DTO.RlsCode, SamAccountName)); if (dtoRelease == null) { Log.Instance.Debug("Release not found"); response.error = Label.Get("error.create"); return(response); } Account_BSO aBso = new Account_BSO(Ado); moderators = aBso.getReleaseUsers(DTO.RlsCode, null); powerUsers = aBso.getUsersOfPrivilege(Constants.C_SECURITY_PRIVILEGE_POWER_USER); //Is this a Reject? if (DTO.SgnCode.Equals(Constants.C_WORKFLOW_STATUS_REJECT)) { int res = adoSignoff.Create(Ado, DTO, SamAccountName); if (res == 0) { //Can't create a Workflow Signoff so we can't proceed Log.Instance.Debug("Can't create a Workflow Signoff "); response.error = Label.Get("error.create"); return(response); } WorkflowRequest_DTO_Update dtoReq = new WorkflowRequest_DTO_Update(DTO.RlsCode); dtoReq.WrqCurrentFlag = false; //update the request int reqUpdate = adoWorkflowRequest.Update(Ado, dtoReq, SamAccountName); if (reqUpdate == 0) { //Can't save the Request so we can't proceed Log.Instance.Debug("Can't save the Workflow Request"); response.error = Label.Get("error.update"); return(response); } DTO.MtrCode = dtoRelease.MtrCode; // we need this to see which cache we must flush response.data = JSONRPC.success; Email_BSO_NotifyWorkflow notifyReject = new Email_BSO_NotifyWorkflow(); try { notifyReject.EmailSignoff(dtoWrq, DTO, dtoRelease, moderators, powerUsers); } catch { } return(response); } //Not a Reject so we proceed... switch (dtoWrq.RqsCode) { case Constants.C_WORKFLOW_REQUEST_PUBLISH: if (String.IsNullOrEmpty(dtoRelease.PrcCode)) { //There must be a valid product for this release Log.Instance.Debug("No product found for the release "); response.error = Label.Get("error.publish"); return(response); } //Update the current release LiveDatetimeTo to the request Date time dtoRelease.RlsLiveDatetimeFrom = dtoWrq.WrqDatetime; //set the release live flag //update the release version and set the current revision to 0 DateTime switchDate; switchDate = DateTime.Now > dtoWrq.WrqDatetime ? DateTime.Now : dtoWrq.WrqDatetime; dtoRelease.RlsVersion++; dtoRelease.RlsRevision = 0; dtoRelease.RlsLiveFlag = true; dtoRelease.RlsExceptionalFlag = dtoWrq.WrqExceptionalFlag != null ? dtoWrq.WrqExceptionalFlag.Value : false; dtoRelease.RlsReservationFlag = dtoWrq.WrqReservationFlag != null ? dtoWrq.WrqReservationFlag.Value : false; dtoRelease.RlsArchiveFlag = dtoWrq.WrqArchiveFlag != null ? dtoWrq.WrqArchiveFlag.Value : false; dtoRelease.RlsExperimentalFlag = dtoWrq.WrqExperimentalFlag != null ? dtoWrq.WrqExperimentalFlag.Value : false; dtoRelease.RlsLiveDatetimeFrom = switchDate; //get the current live release Release_DTO releaseDTONow = Release_ADO.GetReleaseDTO(adoRelease.ReadLiveNow(DTO.RlsCode)); //Save the changes for the release we're changing int update = adoRelease.Update(dtoRelease, SamAccountName); if (update == 0) { Log.Instance.Debug("Can't update the Release, RlsCode:" + dtoRelease.RlsCode); response.error = Label.Get("error.update"); return(response); } if (releaseDTONow != null) { //...if there is a previous release if (releaseDTONow.RlsCode != 0) { //Update the Live LiveDatetimeTo to the request Datetime releaseDTONow.RlsLiveDatetimeTo = switchDate; //Save the changes for the previous release adoRelease.Update(releaseDTONow, SamAccountName); } } break; case Constants.C_WORKFLOW_REQUEST_PROPERTY: //update release to transfer all flag values from the request to the release dtoRelease.RlsReservationFlag = dtoWrq.WrqReservationFlag != null ? dtoWrq.WrqReservationFlag.Value : false; dtoRelease.RlsArchiveFlag = dtoWrq.WrqArchiveFlag != null ? dtoWrq.WrqArchiveFlag.Value : false; dtoRelease.RlsExperimentalFlag = dtoWrq.WrqExperimentalFlag != null ? dtoWrq.WrqExperimentalFlag.Value : false; //Save the release int updateCount = adoRelease.Update(dtoRelease, SamAccountName); if (updateCount == 0) { //Update of Release failed Log.Instance.Debug("Can't update the Release, RlsCode:" + DTO.RlsCode); response.error = Label.Get("error.update"); return(response); } //if there is a WIP or a pending live associated with this matrix then we need to update the WIP/Pending Live as well: Release_BSO rBso = new Release_BSO(Ado); dynamic wipForLive = rBso.GetWipForLive(dtoRelease.RlsCode, SamAccountName); if (wipForLive == null) { wipForLive = rBso.GetPendingLiveForLive(dtoRelease.RlsCode, SamAccountName); } if (wipForLive != null) { //if a workflow exists for wipForLive, then we must update the flags on that workflow as well var wfForLive = adoWorkflowRequest.Read(Ado, wipForLive.RlsCode, true); if (wfForLive != null) { if (wfForLive.Count > 0) { adoWorkflowRequest.Update(Ado, new WorkflowRequest_DTO_Update() { RlsCode = wipForLive.RlsCode, WrqArchiveFlag = dtoWrq.WrqArchiveFlag, WrqCurrentFlag = true, WrqExperimentalFlag = dtoWrq.WrqExperimentalFlag, WrqReservationFlag = dtoWrq.WrqReservationFlag }, SamAccountName); } } dtoWip = Release_ADO.GetReleaseDTO(adoRelease.Read(wipForLive.RlsCode, SamAccountName)); dtoWip.RlsReservationFlag = dtoRelease.RlsReservationFlag; dtoWip.RlsArchiveFlag = dtoRelease.RlsArchiveFlag; dtoWip.RlsExperimentalFlag = dtoRelease.RlsExperimentalFlag; if (adoRelease.Update(dtoWip, SamAccountName) == 0) { Log.Instance.Debug("Failed to update associated WIP " + dtoWip.MtrCode + " " + dtoWip.RlsVersion + '.' + dtoWip.RlsRevision); } //if this wip has a workflow request, then the workflow request details must also be updated List <WorkflowRequest_DTO> wfList = adoWrq.Read(Ado, dtoWip.RlsCode, true); if (wfList.Count > 0) { foreach (var wf in wfList) { wf.WrqReservationFlag = dtoWrq.WrqReservationFlag; wf.WrqArchiveFlag = dtoWrq.WrqArchiveFlag; wf.WrqExperimentalFlag = dtoWrq.WrqExperimentalFlag; adoWrq.Update(Ado, new WorkflowRequest_DTO_Update() { RlsCode = wf.RlsCode, WrqCurrentFlag = dtoWrq.WrqCurrentFlag, WrqArchiveFlag = dtoWrq.WrqArchiveFlag, WrqExperimentalFlag = dtoWrq.WrqExperimentalFlag, WrqReservationFlag = dtoWrq.WrqReservationFlag }, SamAccountName); } } } break; case Constants.C_WORKFLOW_REQUEST_DELETE: //We can't soft delete the release just yet. We need it to be live until the Request is updated. break; case Constants.C_WORKFLOW_REQUEST_ROLLBACK: //Delete the future release if it exists and set the current to_date to null //Otherwise delete the current release and make the previous release current by setting its to_date to null if (adoRelease.IsLiveNext(dtoRelease.RlsCode)) //this is a future release so get the previous release to roll back to (even if that previous is now historical) { Compare_ADO cAdo = new Compare_ADO(Ado); Release_DTO dtoNowRelease = Release_ADO.GetReleaseDTO(adoRelease.Read(cAdo.ReadPreviousRelease(DTO.RlsCode), SamAccountName)); dtoNowRelease.RlsLiveDatetimeTo = default(DateTime); int rows = adoRelease.Update(dtoNowRelease, SamAccountName); if (rows == 0) { Log.Instance.Debug("Can't update the Release, RlsCode:" + dtoNowRelease.RlsCode); response.error = Label.Get("error.update"); return(response); } //As things stand, dtoRelease is the requested Release (which is a Live Next). This will be deleted in the Delete section below } else { //This isn't a future release - it had better be a Live Now (with a previous) if (!adoRelease.IsLiveNow(dtoRelease.RlsCode)) { //If the request is neither a Live Now release then there's a problem Log.Instance.Debug("Can't delete the Release, RlsCode:" + dtoRelease.RlsCode + ". Release is not current live"); response.error = Label.Get("error.delete"); return(response); } //Find the release that we're trying to rollback to Release_DTO dtoPrevious = Release_ADO.GetReleaseDTO(adoRelease.ReadLivePrevious(dtoRelease.RlsCode)); if (dtoPrevious.RlsCode == 0) { //Previous release not found //You can't roll back unless there's something to roll back to, so... Log.Instance.Debug("Can't delete the Release, RlsCode:" + dtoRelease.RlsCode + ". Release is not current live"); response.error = Label.Get("error.delete"); return(response); } //We set the DatetimeTo to null in the previous release dtoPrevious.RlsLiveDatetimeTo = default(DateTime); int rows = adoRelease.Update(dtoPrevious, SamAccountName); if (rows == 0) { Log.Instance.Debug("Can't update the Release, RlsCode:" + dtoPrevious.RlsCode); response.error = Label.Get("error.update"); return(response); } //Do the rollback of the current release dtoRelease.RlsVersion = dtoPrevious.RlsVersion; dtoRelease.RlsLiveDatetimeFrom = default(DateTime); rows = adoRelease.Update(dtoRelease, SamAccountName); if (rows == 0) { Log.Instance.Debug("Can't update the Release, RlsCode:" + dtoRelease.RlsCode); response.error = Label.Get("error.update"); return(response); } adoRelease.IncrementRevision(dtoRelease.RlsCode, SamAccountName); } break; default: response.error = Label.Get("error.update"); return(response); } int signoffID = adoSignoff.Create(Ado, DTO, SamAccountName); if (signoffID == 0) { //Can't create a Workflow Signoff so we can't proceed Log.Instance.Debug("Can't create a Workflow Signoff "); response.error = Label.Get("error.create"); return(response); } //In all cases, if we have reached this stage, we must update the request to make it non-current WorkflowRequest_DTO_Update dtoRequest = new WorkflowRequest_DTO_Update(DTO.RlsCode); dtoRequest.WrqCurrentFlag = false; //save the request int updated = adoWorkflowRequest.Update(Ado, dtoRequest, SamAccountName); if (updated == 0) { //Can't save the Request so we can't proceed Log.Instance.Debug("Can't save the Workflow Signoff"); response.error = Label.Get("error.update"); return(response); } // We may now proceed with the soft delete Release_BSO_Delete bsoDelete = new Release_BSO_Delete(); System.Navigation.Keyword_Release_ADO krbAdo = new System.Navigation.Keyword_Release_ADO(); switch (dtoWrq.RqsCode) { case Constants.C_WORKFLOW_REQUEST_DELETE: //Soft delete the Release. We had to hold this over to last because the Request updates wouldn't work without a live Release dtoRelease.RlsCode = DTO.RlsCode; Request_ADO adoRequest = new Request_ADO(); if (adoRelease.IsLiveNow(dtoRelease.RlsCode)) { Release_DTO dtoNowRelease = Release_ADO.GetReleaseDTO(adoRelease.ReadLiveNow(dtoRequest.RlsCode)); //Set the toDate to now, thus setting the release to historical if (dtoNowRelease != null) { dtoNowRelease.RlsLiveDatetimeTo = DateTime.Now; int updateCount = adoRelease.Update(dtoNowRelease, SamAccountName); if (updateCount == 0) { Log.Instance.Debug("Can't update the Release, RlsCode:" + dtoNowRelease.RlsCode); response.error = Label.Get("error.update"); return(response); } } //Delete the search keywords for this release krbAdo.Delete(Ado, DTO.RlsCode, null, true); } else if (adoRelease.IsLiveNext(dtoRelease.RlsCode) || adoRelease.IsWip(dtoRelease.RlsCode)) { //Find the previous list if it exists Compare_ADO cAdo = new Compare_ADO(Ado); Release_DTO dtoPreviousRelease = Release_ADO.GetReleaseDTO(adoRelease.Read(cAdo.ReadPreviousRelease(DTO.RlsCode), SamAccountName)); //if there is a previous live set it to historical, but not if we're deleting a WIP if (dtoPreviousRelease != null && !adoRelease.IsWip(dtoRelease.RlsCode)) { //Delete the search keywords for the previous release krbAdo.Delete(Ado, dtoPreviousRelease.RlsCode, null, true); dtoPreviousRelease.RlsLiveDatetimeTo = DateTime.Now; int updateCount = adoRelease.Update(dtoPreviousRelease, SamAccountName); if (updateCount == 0) { Log.Instance.Debug("Can't update the Release, RlsCode:" + dtoPreviousRelease.RlsCode); response.error = Label.Get("error.update"); return(response); } } //Delete the search keywords for this release krbAdo.Delete(Ado, DTO.RlsCode, null, true); // We may now proceed with the soft delete if (bsoDelete.Delete(Ado, DTO.RlsCode, SamAccountName, true) == 0) { Log.Instance.Debug("Can't delete the Release, RlsCode:" + DTO.RlsCode); response.error = Label.Get("error.delete"); return(response); } } else { //Only LiveNow, LiveNext and WIP releases can be deleted. Anything else means there's a problem. Log.Instance.Debug("Can't delete the Release - invalid release status, RlsCode:" + DTO.RlsCode); response.error = Label.Get("error.delete"); return(response); } break; case Constants.C_WORKFLOW_REQUEST_ROLLBACK: //First, if there is a WIP ahead of this live release then that WIP must be deleted Release_ADO releaseAdo = new Release_ADO(Ado); var releaseDTORead = new Release_DTO_Read() { MtrCode = dtoRelease.MtrCode }; var latestRelease = releaseAdo.ReadLatest(releaseDTORead); if (latestRelease != null) { if (dtoRelease.RlsCode != latestRelease.RlsCode) { if (bsoDelete.Delete(Ado, latestRelease.RlsCode, SamAccountName, true) == 0) { Log.Instance.Debug("Can't delete the Release, RlsCode:" + latestRelease.RlsCode); response.error = Label.Get("error.delete"); return(response); } } } // Only Live Next gets soft deleted, while Live Now is turned historical above if (adoRelease.IsLiveNext(dtoRelease.RlsCode)) { if (bsoDelete.Delete(Ado, DTO.RlsCode, SamAccountName, true) == 0) { Log.Instance.Debug("Can't delete the Release, RlsCode:" + DTO.RlsCode); response.error = Label.Get("error.delete"); return(response); } //Delete the search keywords for this release krbAdo.Delete(Ado, DTO.RlsCode, null, true); } // Delete the requested release (it may have been live but may also have been demoted to WIP by now) if (adoRelease.IsWip(dtoRelease.RlsCode)) { if (bsoDelete.Delete(Ado, dtoRelease.RlsCode, SamAccountName, true) == 0) { Log.Instance.Debug("Can't delete the Release, RlsCode:" + dtoRelease.RlsCode); response.error = Label.Get("error.delete"); return(response); } //Delete the search keywords for this release krbAdo.Delete(Ado, DTO.RlsCode, null, true); } break; } DTO.MtrCode = dtoRelease.MtrCode; // we need this to see which cache we must flush response.data = JSONRPC.success; Email_BSO_NotifyWorkflow notify = new Email_BSO_NotifyWorkflow(); var sendMailThread = new Thread(() => { //If an email error occurs, just ignore it and continue as before try { notify.EmailSignoff(dtoWrq, DTO, dtoRelease, moderators, powerUsers); } catch { } }); sendMailThread.Start(); // Clean up caching MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_ADDITION + DTO.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_DELETION + DTO.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_AMENDMENT + DTO.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_DATASET + DTO.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_METADATA + DTO.RlsCode); if (dtoWip != null) { MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_ADDITION + dtoWip.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_DELETION + dtoWip.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_AMENDMENT + dtoWip.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_DATASET + dtoWip.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_METADATA + dtoWip.RlsCode); } MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_NAVIGATION_SEARCH); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_NAVIGATION_READ); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_COLLECTION); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_DATASET + DTO.MtrCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_DATASET + DTO.MtrCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_METADATA + DTO.MtrCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_COLLECTION_PXAPI); return(response); }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { Build_BSO buildBso = new Build_BSO(); if (!buildBso.HasBuildPermission(Ado, SamAccountName, "import")) { Response.error = Label.Get("error.privilege"); return(false); } Stopwatch swMatrix = new Stopwatch(); swMatrix.Start(); var signature = Utility.GetMD5(Utility.GetCustomConfig("APP_SALSA") + Utility.JsonSerialize_IgnoreLoopingReference(DTO.GetSignatureDTO())); if (signature != DTO.Signature) { Response.error = Label.Get("error.validation"); return(false); } Matrix theMatrixData; PxDoc = PxStatEngine.ParsePxInput(DTO.MtrInput); theMatrixData = new Matrix(PxDoc, DTO); Matrix_BSO mBso = new Matrix_BSO(Ado); int releaseId; // Check if a WIP Release already exists for the Matrix to Upload var latestRelease = mBso.GetLatestRelease(theMatrixData); if (latestRelease != null && !DTO.Overwrite && releaseAdo.IsWip(latestRelease.RlsCode)) // { Group_DTO_Create dtoGroup = this.GetGroup(DTO.GrpCode); if (latestRelease.GrpCode != DTO.GrpCode) { Response.data = String.Format(Label.Get("px.duplicate-different-group"), theMatrixData.Code, latestRelease.GrpName + " (" + latestRelease.GrpCode + ")", dtoGroup.GrpName + " (" + DTO.GrpCode + ")"); } else { Response.data = String.Format(Label.Get("px.duplicate"), theMatrixData.Code); } return(true); } // Check if this Release already has a pending WorkflowRequest if (latestRelease != null && new WorkflowRequest_ADO().IsCurrent(Ado, latestRelease.RlsCode)) { Response.error = String.Format(Label.Get("error.workflow"), theMatrixData.Code); return(false); } // Check if this Release has another pending live release if (latestRelease != null && new Release_ADO(Ado).IsLiveNext(latestRelease.RlsCode)) { Response.error = String.Format(Label.Get("px.pendinglive"), theMatrixData.Code); return(false); } //Check if the matrix code is locked in the dataset table using (DatasetAdo dAdo = new DatasetAdo(new ADO("defaultConnection"))) { ADO_readerOutput dResult = dAdo.ReadDatasetLocked(theMatrixData.Code); if (dResult.hasData) { DateTime lockedTime = dResult.data[0].DttDatetimeLocked.Equals(DBNull.Value) ? default : (DateTime)dResult.data[0].DttDatetimeLocked; if (lockedTime.AddMinutes(Configuration_BSO.GetCustomConfig(ConfigType.server, "release.lockTimeMinutes")) > DateTime.Now) { Response.error = Label.Get("error.release.locked"); return(false); } } dAdo.DatasetLockUpdate(theMatrixData.Code, DateTime.Now); } if (latestRelease != null) { if (latestRelease.RlsLiveFlag) { releaseId = mBso.CloneRelease(latestRelease.RlsCode, DTO.GrpCode, SamAccountName); mBso.CloneComment(latestRelease.RlsCode, releaseId, SamAccountName); } else { if (latestRelease.GrpCode != DTO.GrpCode) { releaseId = releaseAdo.IncrementRevision(latestRelease.RlsCode, SamAccountName, DTO.GrpCode); } else { releaseId = releaseAdo.IncrementRevision(latestRelease.RlsCode, SamAccountName); } matrixAdo.Delete(latestRelease.RlsCode, SamAccountName); } // Clean up caching MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_ADDITION + latestRelease.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_DELETION + latestRelease.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_COMPARE_READ_AMENDMENT + latestRelease.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_DATASET + latestRelease.RlsCode); MemCacheD.CasRepositoryFlush(Resources.Constants.C_CAS_DATA_CUBE_READ_PRE_METADATA + latestRelease.RlsCode); } else { releaseId = mBso.CreateRelease(theMatrixData, 0, 1, DTO.GrpCode, SamAccountName); } mBso.CreateMatrix(theMatrixData, releaseId, SamAccountName, DTO); swMatrix.Stop(); Log.Instance.Info(string.Format("Matrix object created in {0} ms", Math.Round((double)swMatrix.ElapsedMilliseconds))); Stopwatch swLoad = new Stopwatch(); swLoad.Start(); //Do a Cartesian join to correctly label each data point with its dimensions //Create bulk tables from this and load them to the database var asyncTask = buildBso.CreateAndLoadDataTables(Ado, theMatrixData, true); //We must specifically retrieve any exceptions from the Task and then throw them. Otherwise they will be silent. if (asyncTask.Exception != null) { throw asyncTask.Exception; } matrixAdo.MarkMatrixAsContainingData(theMatrixData.MainSpec.MatrixId, true); Keyword_Release_BSO_CreateMandatory krBSO = new Keyword_Release_BSO_CreateMandatory(); krBSO.Create(Ado, releaseId, SamAccountName, theMatrixData); swLoad.Stop(); Log.Instance.Info(string.Format("Matrix loaded in DB in {0} ms", Math.Round((double)swLoad.ElapsedMilliseconds))); using (DatasetAdo dAdo = new DatasetAdo(new ADO("defaultConnection"))) { dAdo.DatasetLockUpdate(theMatrixData.Code, default); } Response.data = JSONRPC.success; return(true); }
//Should we allow FirebaseId to be set by a DTO parameter? -would make things easier here.... //or maybe better idea, set it at the template level //also, maybe see about reading subscriptions from the cache rather than from a db read request internal static bool IsThrottled(ADO Ado, HttpRequest hRequest, JSONRPC_API request, string samAccountName = null) { //We need MemcacheD to use this if (!Convert.ToBoolean(ConfigurationManager.AppSettings["API_MEMCACHED_ENABLED"])) { return(false); } int window; int cutoff; string user = null; bool subscribed = false; //Did the user send a SubscriberKey in the header of the request? if (hRequest.Headers.AllKeys.Contains("SubscriberKey")) { //They send a SubscriberKey, but is it in our list of valid tokens? var keyListCache = MemCacheD.Get_BSO("PxStat.Subscription", "Subscriber_BSO", "RefreshSubscriberKeyCache", "RefreshSubscriberKeyCache"); if (!keyListCache.hasData) { //No cache - try creating one new Subscriber_BSO().RefreshSubscriberKeyCache(Ado); keyListCache = MemCacheD.Get_BSO("PxStat.Subscription", "Subscriber_BSO", "RefreshSubscriberKeyCache", "RefreshSubscriberKeyCache"); } if (keyListCache.hasData) { //Does the request contain a valid subscription token? var keyValues = keyListCache.data.ToObject <List <string> >(); if (keyValues.Contains(hRequest.Headers.GetValues("SubscriberKey").FirstOrDefault())) { user = hRequest.Headers.GetValues("SubscriberKey").FirstOrDefault(); subscribed = true; } } } //An AD or Local user is deemed to be already subscribed if (samAccountName != null) { user = samAccountName; subscribed = true; } //Different limits apply depending on whether the user is subscribed or not if (subscribed) { window = Configuration_BSO.GetCustomConfig(ConfigType.global, "throttle.subscribedWindowSeconds"); cutoff = Configuration_BSO.GetCustomConfig(ConfigType.global, "throttle.subscribedCallLimit"); } else { window = Configuration_BSO.GetCustomConfig(ConfigType.global, "throttle.nonSubscribedWindowSeconds"); cutoff = Configuration_BSO.GetCustomConfig(ConfigType.global, "throttle.nonSubscribedCallLimit"); user = request.userAgent + request.ipAddress; } //Now we check the usage for the current requester List <DateTime> workingList = new List <DateTime>(); var cache = MemCacheD.Get_BSO("PxStat.Security", "Throttle", "Read", user); if (cache.hasData) { List <DateTime> userHistory = JsonConvert.DeserializeObject <List <DateTime> >(cache.data.ToString()); //We only count the entries inside the current window workingList = userHistory.Where(x => x > DateTime.Now.AddSeconds(window * -1)).ToList(); if (workingList.Count() > cutoff + 1) { Log.Instance.Info(String.Format("Throttle event for user {0}, {1} requests in {2} seconds", user, workingList.Count, window)); return(true); } } workingList.Add(DateTime.Now); MemCacheD.Store_BSO("PxStat.Security", "Throttle", "Read", user, workingList, default(DateTime)); return(false); }
/// <summary> /// Constructor /// </summary> /// <returns></returns> public BaseTemplate_Read <T, V> Read() { try { // first of all, we check if user has the right to perform this operation! if (HasUserToBeAuthenticated()) { if (!IsUserAuthenticated() || !HasUserPrivilege()) { return(this); } } //Run the parameters through the cleanse process dynamic cleansedParams = Cleanser.Cleanse(Request.parameters); try { DTO = GetDTO(cleansedParams); } catch { throw new InputFormatException(); } DTO = Sanitizer.Sanitize(DTO); DTOValidationResult = Validator.Validate(DTO); if (!DTOValidationResult.IsValid) { OnDTOValidationError(); return(this); } //Create the analytic data if required Security.Analytic_BSO_Create.Create(Ado, DTO, HttpContext.Current.Request, Request); //See if there's a cache in the process if (MethodReader.MethodHasAttribute(Request.method, "CacheRead")) { cDTO = new CacheMetadata("CacheRead", Request.method, DTO); MemCachedD_Value cache = MemCacheD.Get_BSO <dynamic>(cDTO.Namespace, cDTO.ApiName, cDTO.Method, DTO); if (cache.hasData) { Response.data = cache.data; return(this); } } // The Actual Read should happen here by the specific class! if (!Execute()) { OnExecutionError(); return(this); } OnExecutionSuccess(); return(this); } catch (UnmatchedParametersException unmatchException) { Log.Instance.Debug(unmatchException); OnDTOValidationError(); return(this); } catch (FormatException formatException) { //A FormatException error has been caught, log the error and return a message to the caller Log.Instance.Error(formatException); Response.error = Label.Get("error.schema"); return(this); } catch (Exception ex) { //An error has been caught, log the error and return a message to the caller Log.Instance.Error(ex); Response.error = Label.Get("error.exception"); return(this); } finally { Dispose(); } }
/// <summary> /// Execute /// </summary> /// <returns></returns> protected override bool Execute() { //A power user may not update a user to become an Administrator if (IsPowerUser() && DTO.PrvCode.Equals(Resources.Constants.C_SECURITY_PRIVILEGE_ADMINISTRATOR)) { Log.Instance.Debug("A power user may not update a user to become an Administrator"); Response.error = Label.Get("error.privilege"); return(false); } //A power user may not downgrade an administrator if (IsPowerUser() && IsAdministrator(DTO.CcnUsername) && !DTO.PrvCode.Equals(Resources.Constants.C_SECURITY_PRIVILEGE_ADMINISTRATOR)) { Log.Instance.Debug("A power user may not downgrade an administrator"); Response.error = Label.Get("error.privilege"); return(false); } Account_ADO adoAccount = new Account_ADO(); //There must always be at least one administrator in the system. If this delete would leave no administrator then the request must be refused. if (IsAdministrator(DTO.CcnUsername)) { if (!adoAccount.EnoughPrivilegesInAccounts(Ado, Resources.Constants.C_SECURITY_PRIVILEGE_ADMINISTRATOR)) { Log.Instance.Debug("There are insufficient Administrators in the Account table to proceed with this update."); Response.error = Label.Get("error.update"); return(false); } } //Update and retrieve the number of updated rows int nUpdated = adoAccount.Update(Ado, DTO, SamAccountName); if (nUpdated == 0) { Log.Instance.Debug("Failed to update Account"); Response.error = Label.Get("error.update"); return(false); } //An administrator or power user may not be a member of a group. Therefore we will remove any group memberships for the updated user // We run the check based on the proposed PrvCode, not on the existing privilege if (DTO.PrvCode.Equals(Resources.Constants.C_SECURITY_PRIVILEGE_ADMINISTRATOR) || DTO.PrvCode.Equals(Resources.Constants.C_SECURITY_PRIVILEGE_POWER_USER)) { List <GroupAccount_DTO> groupAccountList = getGroupMembership(DTO.CcnUsername); foreach (GroupAccount_DTO groupAccount in groupAccountList) { GroupAccount_ADO gaAdo = new GroupAccount_ADO(); GroupAccount_DTO_Delete gaDto = new GroupAccount_DTO_Delete(); gaDto.CcnUsername = groupAccount.CcnUsername; gaDto.GrpCode = groupAccount.GrpCode; int deleted = gaAdo.Delete(Ado, gaDto, SamAccountName); if (deleted == 0) { Log.Instance.Debug("Failed to delete account group membership"); Response.error = Label.Get("error.update"); return(false); } } } //If this user is cached then we must remove it because the data is now out of date MemCacheD.Remove_BSO <dynamic>("PxStat.Security", "Account_API", "ReadCurrentAccesss", DTO.CcnUsername); Response.data = JSONRPC.success; return(true); }