private void get_req_cache_unfiltered(ref int cpu_inst_cnt, ref string line, Char[] delim, ref string[] tokens, out Req rd_req, out Req wb_req) { Dbg.AssertPrint(tokens.Length == 6, "trace line = " + line); ReqType req_type = (int.Parse(tokens[5]) == 0) ? ReqType.READ : ReqType.WRITE; // Read-only requests while (Config.proc.wb == false && req_type == ReqType.WRITE) { line = read_trace(); tokens = line.Split(delim); req_type = (int.Parse(tokens[5]) == 0) ? ReqType.READ : ReqType.WRITE; } // Set instruction count b/w requests ulong icount = ulong.Parse(tokens[0]); if (cur_inst_count == 0) { cpu_inst_cnt = 0; } else { cpu_inst_cnt = (int)(icount - cur_inst_count); Dbg.AssertPrint(cpu_inst_cnt >= 0, "Negative instruction count"); } cur_inst_count = icount; // Parse virtual address ulong vaddr = ulong.Parse(tokens[2]); vaddr = vaddr + (((ulong)pid) << 48); rd_req = RequestPool.Depool(); rd_req.Set(pid, req_type, vaddr); wb_req = null; }
protected void RefreshBank(uint rankIdx, uint bankIdx) { List <Req> q = Mctrl.Refbankqs[rankIdx, bankIdx]; Req req = RequestPool.Depool(); req.Type = ReqType.REFRESH_BANK; req.TsArrival = Cycles; // Set up the refresh target address MemAddr addr = new MemAddr(); addr.Reset(); addr.cid = Mctrl.cid; addr.rid = rankIdx; addr.bid = bankIdx; addr.said = SaCountersPerbank[rankIdx, bankIdx]; req.Addr = addr; q.Add(req); // Update states BankCounters[rankIdx]++; if (BankCounters[rankIdx] == BankCounterMax) { BankCounters[rankIdx] = 0; } SaCountersPerbank[rankIdx, bankIdx]++; if (SaCountersPerbank[rankIdx, bankIdx] == SaCounterMax) { SaCountersPerbank[rankIdx, bankIdx] = 0; } }
public void recv_wb_req(Req req) { //stats Stat.procs[pid].write_req_served.collect(); Stat.procs[pid].write_avg_latency.collect(req.Latency); //destroy req RequestPool.Enpool(req); }
// Callback function when a memory request is complete. This retires instructions or inserts data back into caches. public void recv_req(Req req) { // Install the rest of the words in the cacheline bool cw_contains_write = false; //stats if (!req.CpyGenReq) { Stat.procs[pid].read_req_served.collect(); Stat.procs[pid].read_avg_latency.collect(req.Latency); } // Handles the read write request if (req.RdWr) { Dbg.Assert(read_write_q.Contains(req.BlockAddr)); read_write_q.Remove(req.BlockAddr); } //free up instruction window and mshr bool contains_write = inst_wnd.set_ready(req.BlockAddr); contains_write |= cw_contains_write; mshr.RemoveAll(x => x == req.BlockAddr); Req wb_req = null; // Install cachelines and handle dirty block evictions if (Config.proc.cache_enabled) { cache_handler(req, contains_write); } else { Dbg.AssertPrint(!contains_write, "Inst window contains write reqeusts."); // Writeback based on the cache filtered traces wb_req = req.WbReq; if (wb_req != null) { bool wb_merge = wb_q.Exists(x => x.BlockAddr == wb_req.BlockAddr); if (!wb_merge) { addWB(wb_req); } else { RequestPool.Enpool(wb_req); } } } //destory req RequestPool.Enpool(req); out_read_req--; }
// This mode is used when we use the L2-cache filtered trace files with shared LLC enabled private void get_llc_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req) { // No buffered WB request from the previous line in the trace if (buffered_wb_addr == -1) { string line = read_trace(); Char[] delim = new Char[] { ' ' }; string[] tokens = line.Split(delim); // == Trace files that contain "clone" instructions == if (copy_trace_file) { // Skip all the other instructions while (tokens[0] != "R" && tokens[0] != "C") { line = read_trace(); tokens = line.Split(delim); } get_req_clone(ref cpu_inst_cnt, ref line, delim, ref tokens, out rd_req, out wb_req); } // L2 cache filtered traces else { cpu_inst_cnt = int.Parse(tokens[0]); ulong rd_addr = ulong.Parse(tokens[1]); rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.READ, rd_addr); wb_req = null; if (!Config.proc.wb || tokens.Length == 2) { return; } Dbg.Assert(tokens.Length == 3); ulong wb_addr = ulong.Parse(tokens[2]); buffered_wb_addr = wb_addr | (((ulong)pid) << 56); } } else { // Use the buffered WB request cpu_inst_cnt = 0; rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.WRITE, (ulong)buffered_wb_addr); wb_req = null; // Clear the buffered wb buffered_wb_addr = -1; } }
public void NewRequestPool_WhenStarted_ShouldTearDownWhenStopped() { var clientNetworkLayer = new ClientTestNetworkLinkLayer(); var transportLayer = new ClientTestTransportLayer(clientNetworkLayer); var requestPool = new RequestPool <ClientControlFrame>(transportLayer); requestPool.Start(); Assert.AreEqual(requestPool.InitialClientSize, requestPool.ActiveClients); requestPool.StopAsync().Wait(); Assert.AreEqual(0, requestPool.ActiveClients); }
public void NewRequestPool_WhenStarted_ShouldStartUpDefinedNumberOfClients() { var clientNetworkLayer = new ClientTestNetworkLinkLayer(); var transportLayer = new ClientTestTransportLayer(clientNetworkLayer); var requestPool = new RequestPool <ClientControlFrame>(transportLayer, 4); requestPool.Start(); Assert.AreEqual(requestPool.InitialClientSize, requestPool.ActiveClients); requestPool.Stop(); }
// Null upper_c means c is a L1 cache, otherwise L2 public void service_cache_hit_queue(Cache c, Cache upper_c = null) { LinkedList <Req> hit_queue = c.get_hit_queue(pid); while (hit_queue.Count != 0) { Req req = hit_queue.First.Value; int hit_pid = req.Pid; Dbg.Assert(hit_pid == pid); if ((ulong)req.TsDeparture <= cycles) { // Hit in L2 and move L2 $line to L1 if (upper_c != null) { Cache l1c = upper_c; Dbg.AssertPrint(!l1c.in_cache(req.BlockAddr), "$line from an L2 hit shouldn't be in L1."); ulong l1c_wb_addr = l1c.cache_add(req.BlockAddr, req.Type, hit_pid); // Dirty $line eviction from L1, check L2 first. if (l1c_wb_addr != NULL_ADDRESS) { // Miss in L2 if (!c.is_cache_hit(l1c_wb_addr, ReqType.WRITE)) { // Another potential wb from L2 ulong l2c_wb_addr = c.cache_add(l1c_wb_addr, ReqType.WRITE, hit_pid); if (l2c_wb_addr != NULL_ADDRESS) { gen_cache_wb_req(l2c_wb_addr); } } } Stat.procs[pid].l2_cache_hit_avg_latency.collect((int)(cycles - (ulong)req.TsArrival)); } else { Stat.procs[pid].l1_cache_hit_avg_latency.collect((int)(cycles - (ulong)req.TsArrival)); } // Simply hit in L1 hit_queue.RemoveFirst(); inst_wnd.set_ready(req.BlockAddr); RequestPool.Enpool(req); } else { return; } } }
public void NewRequestPool_WhenStarted_ShouldSendControlMessages() { var clientNetworkLayer = new ClientTestNetworkLinkLayer(); var transportLayer = new ClientTestTransportLayer(clientNetworkLayer); var requestPool = new RequestPool <ClientControlFrame>(transportLayer); requestPool.Start(); requestPool.Stop(); var frames = clientNetworkLayer.SentBytes.Select(this.clientFrameEncoder.Decode).ToList(); Assert.IsTrue(frames.Any()); Assert.IsTrue(frames.OfType <ClientControlFrame>().Any()); }
// Generate a new writeback request to memory from L2 dirty block eviction public void gen_cache_wb_req(ulong wb_addr) { Req wb_req = RequestPool.Depool(); wb_req.Set(pid, ReqType.WRITE, wb_addr); bool wb_merge = wb_q.Exists(x => x.BlockAddr == wb_req.BlockAddr); if (!wb_merge) { addWB(wb_req); } else { RequestPool.Enpool(wb_req); } }
public void recv_copy_req(Req req) { //stats Stat.procs[pid].copy_req_served.collect(); Stat.procs[pid].copy_avg_latency.collect(req.Latency); //free up instruction window and mshr bool contains_write = inst_wnd.set_ready(req.BlockAddr, true); mshr.RemoveAll(x => x == req.BlockAddr); Dbg.AssertPrint(!contains_write, "Inst window contains write reqeusts. COPY is not supported in cache mode."); Dbg.Assert(req.WbReq == null); //destory req RequestPool.Enpool(req); }
protected void RefreshRank(uint rankIdx) { List <Req> q = Mctrl.Refrankqs[rankIdx]; Req req = RequestPool.Depool(); req.Type = ReqType.REFRESH; req.TsArrival = Cycles; // Set up the refresh target address MemAddr addr = new MemAddr(); addr.Reset(); addr.cid = Mctrl.cid; addr.rid = rankIdx; addr.said = SaCounters[rankIdx]; req.Addr = addr; q.Add(req); SaCounters[rankIdx]++; if (SaCounters[rankIdx] == SaCounterMax) { SaCounters[rankIdx] = 0; } }
public async Task <ActionResult> Get(int id) { await RequestPool.ExecuteAsync(new JobFactory().SetJob((JobType)id)); return(StatusCode(200, "OK")); }
public override void Dispose() { RequestPool.Release(); base.Dispose(); }
private void get_req_clone(ref int cpu_inst_cnt, ref string line, Char[] delim, ref string[] tokens, out Req rd_req, out Req wb_req) { string inst_type = tokens[0]; Dbg.Assert(copy_to_req_addr_q.Count == 0); if (inst_type == "R") { cpu_inst_cnt = int.Parse(tokens[3]); ulong rd_addr = ulong.Parse(tokens[1], System.Globalization.NumberStyles.HexNumber); //ulong rd_add_dup = rd_addr; rd_addr = rd_addr | (((ulong)pid) << 60); rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.READ, rd_addr); ulong wb_addr = ulong.Parse(tokens[2], System.Globalization.NumberStyles.HexNumber); wb_req = null; if (wb_addr != 0) { wb_addr = wb_addr | (((ulong)pid) << 60); if (Config.proc.llc_shared_cache_only) { buffered_wb_addr = wb_addr; } else if (Config.proc.wb) { wb_req = RequestPool.Depool(); wb_req.Set(pid, ReqType.WRITE, wb_addr); } } } else if (inst_type == "C") { cpu_inst_cnt = 1; ulong dst_addr = ulong.Parse(tokens[1], System.Globalization.NumberStyles.HexNumber); ulong src_addr = ulong.Parse(tokens[2], System.Globalization.NumberStyles.HexNumber); dst_addr = dst_addr | (((ulong)pid) << 60); src_addr = src_addr | (((ulong)pid) << 60); // Convert the copy request into a list of RD/WR memory requests if (Config.mctrl.copy_method == COPY.MEMCPY) { // Simply convert every memcopy to multiple read and write requests Dbg.Assert(copy_to_req_addr_q.Count == 0); // SRC and DST address for (int i = 0; i < Config.mctrl.copy_gran; i++) { copy_to_req_addr_q.Enqueue(src_addr); copy_to_req_addr_q.Enqueue(dst_addr); // Increment by one cacheline src_addr += 64; dst_addr += 64; } cpu_inst_cnt = 1; ulong rd_addr = copy_to_req_addr_q.Dequeue(); rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.READ, rd_addr); // For the destination addr, we need to mark it as dirty when the data is inserted back into the LLC. rd_req.DirtyInsert = dirty_insert; dirty_insert = !dirty_insert; rd_req.CpyGenReq = Config.proc.stats_exclude_cpy; wb_req = null; Stat.banks[rd_req.Addr.cid, rd_req.Addr.rid, rd_req.Addr.bid].cmd_base_inter_sa.collect(); copy_to_req_ipc_deduction += 2; } else { rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.COPY, src_addr); wb_req = null; } } else { rd_req = null; wb_req = null; Dbg.AssertPrint(inst_type == "C" || inst_type == "R", "Unable to fetch valid instruction."); } }
public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req) { if (copy_to_req_addr_q.Count > 0) { cpu_inst_cnt = 1; ulong rd_addr = copy_to_req_addr_q.Dequeue(); rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.READ, rd_addr); // For the destination addr, we need to mark it as dirty when the data is inserted back into the LLC. rd_req.DirtyInsert = dirty_insert; dirty_insert = !dirty_insert; rd_req.CpyGenReq = Config.proc.stats_exclude_cpy; wb_req = null; copy_to_req_ipc_deduction += 2; return; } // Shared LLC mode on cache filtered traces if (Config.proc.llc_shared_cache_only) { get_llc_req(ref cpu_inst_cnt, out rd_req, out wb_req); return; } string line = read_trace(); Char[] delim = new Char[] { ' ' }; string[] tokens = line.Split(delim); // The format of cache unfiltered traces is different if (Config.proc.cache_enabled) { get_req_cache_unfiltered(ref cpu_inst_cnt, ref line, delim, ref tokens, out rd_req, out wb_req); } // traces with clones and sets else if (Config.proc.b_read_rc_traces && copy_trace_file) { // Skip all the other instructions while (tokens[0] != "R" && tokens[0] != "C") { line = read_trace(); tokens = line.Split(delim); } get_req_clone(ref cpu_inst_cnt, ref line, delim, ref tokens, out rd_req, out wb_req); } // Cache filtered else { cpu_inst_cnt = int.Parse(tokens[0]); ulong rd_addr = ulong.Parse(tokens[1]); rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.Depool(); rd_req.Set(pid, ReqType.READ, rd_addr); if (!Config.proc.wb || tokens.Length == 2) { wb_req = null; return; } Dbg.Assert(tokens.Length == 3); ulong wb_addr = ulong.Parse(tokens[2]); wb_addr = wb_addr | (((ulong)pid) << 56); wb_req = RequestPool.Depool(); wb_req.Set(pid, ReqType.WRITE, wb_addr); } }
public void issue_insts(bool issued_rd_req) { //issue instructions for (int i = 0; i < Config.proc.ipc; i++) { Dbg.Assert(curr_rd_req != null); if (curr_rd_req == null) { return; } // Stats if (inst_wnd.is_full()) { if (i == 0) { Stat.procs[pid].stall_inst_wnd.collect(); consec_stalled++; } return; } //cpu instructions if (curr_cpu_inst_cnt > 0) { curr_cpu_inst_cnt--; inst_wnd.add(0, false, true, 0); // word oblivious continue; } //only one memory instruction can be issued per cycle if (issued_rd_req) { return; } // Ideal memory if (Config.proc.ideal_memory) { Dbg.AssertPrint(!Config.proc.cache_enabled, "Cache is not supported in ideal memory mode."); if (curr_rd_req.WbReq != null) { RequestPool.Enpool(curr_rd_req.WbReq); } RequestPool.Enpool(curr_rd_req); curr_rd_req = get_req(); return; } // Need to mark if an instruction is a write on cache mode or COPY for a copy instruction inst_wnd.add(curr_rd_req.BlockAddr, true, false, curr_rd_req.WordOffset, (curr_rd_req.Type == ReqType.WRITE) && Config.proc.cache_enabled, curr_rd_req.Type == ReqType.COPY); // check if true miss -- bool false_miss = inst_wnd.is_duplicate(curr_rd_req.BlockAddr); // COPY is a special instruction, so we don't care about if its address is a duplicate of other instructions if (false_miss && Config.proc.issue_on_dup_req && curr_rd_req.Type != ReqType.COPY) { Dbg.Assert(curr_rd_req.WbReq == null); RequestPool.Enpool(curr_rd_req); curr_rd_req = get_req(); continue; } // STATS collect_inst_stats(); // Caches if (Config.proc.cache_enabled && curr_rd_req.Type != ReqType.COPY) { // Check for in-flight rd_wr_q. // Since write is duplicate, drop it.... bool in_rd_wr_q = read_write_q.Contains(curr_rd_req.BlockAddr); // L1 if (l1c.is_cache_hit(curr_rd_req.BlockAddr, curr_rd_req.Type)) { Dbg.AssertPrint(!in_rd_wr_q, "Both in rd_wr_q and L1 cache baddr=" + curr_rd_req.BlockAddr); // HIT: Add to l1 cache hit queue to model the latency add_cache_hit_queue(l1c, curr_rd_req); curr_rd_req = get_req(); issued_rd_req = true; continue; } // L2 if (l2c.is_cache_hit(curr_rd_req.BlockAddr, curr_rd_req.Type)) { Dbg.Assert(!in_rd_wr_q); // HIT: Add to l2 cache hit queue to model the latency, // add to l1 cache after it is served from the hit queue add_cache_hit_queue(l2c, curr_rd_req); curr_rd_req = get_req(); issued_rd_req = true; continue; } if (in_rd_wr_q) { if (curr_rd_req.Type == ReqType.WRITE) { inst_wnd.set_ready(curr_rd_req.BlockAddr); } RequestPool.Enpool(curr_rd_req); curr_rd_req = get_req(); issued_rd_req = true; continue; } // If write allocate -- 1. need to make sure the following read request // detects this reading request generated from write // 2. don't stall the instruction window // Make it into a read request, then on receving the // request, put them into the cache and mark them dirty. if (curr_rd_req.Type == ReqType.WRITE) { convert_to_read_write(ref curr_rd_req); } } // **** GO TO MEMORY **** //try mshr bool mshr_ok = insert_mshr(curr_rd_req); if (!mshr_ok) { mshr_retry = true; return; } //try memory controller bool mctrl_ok = insert_mctrl(curr_rd_req); if (!mctrl_ok) { mctrl_retry = true; return; } //issued memory request issued_rd_req = true; //get new read request curr_rd_req = get_req(); } }
/// <summary> /// Clase para manejo del back pressure. /// </summary> /// <param name="numberThreads">Número de hilos que serán usados.</param> /// <param name="maxLatency">Máxima latencia permitida.</param> public BasicBackPressure(int numberThreads, double maxLatency) { provider = new RequestPool(numberThreads, maxLatency); }