public async Task <IActionResult> Get(long?date = null) { if (Throttled && !ThrottlingService.CanAccess(User, Request, Request.Path, Timeout)) { return(StatusCode(429, ThrottleService.Message)); } var found = DbContext.CacheSets.Where(x => x.Type == Type); if (date.HasValue) { var dateTime = date.Value.FromUnixEpochDate(); found = found.Where(x => x.EditedDate > dateTime); } var data = await found.SingleOrDefaultAsync(); if (data != null) { var result = new CacheResults { Timestamp = data.EditedDate, Items = data.JSON }; return(Ok(result)); } return(Ok(null)); }
private void Write(CacheResults results, Reader reader) { StreamWriter output = File.CreateText("output_" + reader.input + ".txt"); //First Line output.WriteLine(results.res.Count()); int score = 0; foreach (CacheFormation cacheResults in results.res) { score = score + cacheResults.score; Console.WriteLine("Score: " + cacheResults.score.ToString()); output.WriteLine(cacheResults.ca.id + " " + string.Join(" ", cacheResults.videos.ToArray())); } Console.WriteLine("Final Score: " + score); // score is 390455 output.Close(); }
public void Solve() { var reader = new Reader(); reader.Read(); // Solve the problem. List <RequestCosting> RequestCostings = new List <RequestCosting>(); // Need to merge the requests var CombinedRequestsGroup = reader.requests.RequestList.GroupBy(f => f.vId); var CombinedRequests = CombinedRequestsGroup.SelectMany(req => req); //var CombinedRequests = reader.requests.RequestList; CacheFormation result = new CacheFormation(); CacheResults results = new CacheResults(); // Get the lowest latency, find cost for each request foreach (var req in CombinedRequests) { //Latency to DataCentre from Endpoint int lD = reader.endpoints.EndpointList.Where(en => req.sourceEndPoint == en.id).Select(an => an.latencyDataCenter).First(); //Get all caches connected to endpoint var Endpoints = reader.endpoints.EndpointList.Where(en => en.id == req.sourceEndPoint).ToArray(); for (int e = 0; e < Endpoints.Count(); e++) { var endpointCacheLatency = Endpoints[e].cacheLatency.ToArray(); for (int ecl = 0; ecl < endpointCacheLatency.Count(); ecl++) { int latencySave = lD - endpointCacheLatency[ecl].latency; // try adding if not too big. RequestCostings.Add(new RequestCosting { id = req.id, cacheId = endpointCacheLatency[ecl].cacheid, latency = endpointCacheLatency[ecl].latency, latencySave = latencySave, noOfReqs = req.requests, video = reader.vids.getVideoById(req.vId) }); } } } // sort my best latency save foreach (Cache c in reader.caches.CacheList) { Console.WriteLine("Starting Cache: " + c.id); // Get the best videos for this request. Latency * NoOfRequests / VideoSize var bestVidsForCache = RequestCostings.Where(z => z.cacheId == c.id).OrderByDescending(a => (a.latencySave * a.noOfReqs) / a.video.size).ToArray(); //Create new cacheFormation for this cache if (results.res == null) { results.res = new List <CacheFormation>(); } result.videos = new List <int>(); result.ca = reader.caches.getCacheById(c.id); for (int i = 0; i < bestVidsForCache.Count(); i++) { var rq = bestVidsForCache[i];; // Check we're not over capacity, and the next video wont push us over either. if (c.capacity > 0 && (c.capacity - rq.video.size > 0)) { // The video is not already added to this CacheFormation if (result.videos.IndexOf(rq.video.id) == -1) { result.videos.Add(rq.video.id); int totalReq = bestVidsForCache.Sum(a => a.noOfReqs); result.score = result.score + ((rq.latencySave * rq.noOfReqs) / totalReq); c.capacity = c.capacity - rq.video.size; } } } results.res.Add(result); } Write(results, reader); }
private CacheResults GetCache(DatastoreEntities context, int id, RepositorySchema schema) { try { var dimensionValueTableName = SqlHelper.GetDimensionValueTableName(schema.ID); var dimensionValueTableNameParent = string.Empty; lock (_cache) { var dimensionStamp = RepositoryManager.GetDimensionChanged(context, id); var retval = _cache.FirstOrDefault(x => x.RepositoryId == id); //Check repository DimensionStamp and if changed the reload dimensions if (retval != null && retval.DimensionStamp != dimensionStamp) { Clear(id); retval = null; } if (retval == null) { #region Parent table stuff if (schema.ParentID != null) { if (!_parentSchemaCache.ContainsKey(schema.ID)) { var parentSchema = RepositoryManager.GetSchema(schema.ParentID.Value); _parentSchemaCache.Add(schema.ID, schema.Subtract(parentSchema)); } dimensionValueTableNameParent = SqlHelper.GetDimensionValueTableName(schema.ParentID.Value); } #endregion retval = new CacheResults() { RepositoryId = id, ParentId = RepositoryManager.GetSchemaParentId(id) }; _cache.Add(retval); var sb = new StringBuilder(); sb.AppendLine($"select v.DIdx, v.DVIdx, v.Value from [{dimensionValueTableName}] v"); //If there is a parent schema then UNION its dimension tables if (schema.ParentID != null) { sb.AppendLine($"union select v.DIdx, v.DVIdx, v.Value from [{dimensionValueTableNameParent}] v"); } sb.AppendLine("order by DIdx, DVIdx"); var ds = SqlHelper.GetDataset(ConfigHelper.ConnectionString, sb.ToString(), null); retval.Results = new List <DimensionItem>(); //Load all dimensions foreach (var dimension in schema.DimensionList) { retval.Results.Add(new DimensionItem { DIdx = dimension.DIdx, Name = dimension.Name, }); } foreach (DataRow dr in ds.Tables[0].Rows) { var didx = (long)dr["DIdx"]; long dvidx = 0; string v = null; if (dr["DVIdx"] != System.DBNull.Value) { dvidx = (long)dr["DVIdx"]; v = (string)dr["Value"]; } var d = retval.Results.FirstOrDefault(x => x.DIdx == didx); if (d == null) { d = new DimensionItem { DIdx = (int)didx, Name = schema.DimensionList.Where(x => x.DIdx == didx).Select(x => x.Name).FirstOrDefault() }; retval.Results.Add(d); } if (dvidx != 0) { d.RefinementList.Add(new RefinementItem { DVIdx = dvidx, FieldValue = v, DIdx = didx }); } //Rearrange all refinements alpha (for debugging and such) //retval.Results.ForEach(ditem => ditem.RefinementList = ditem.RefinementList.OrderBy(x => x.FieldValue).ToList()); } } retval.DimensionStamp = dimensionStamp; retval.Timestamp = DateTime.Now; //Accessed return(retval); } } catch (Exception ex) { throw; } }