public bool is_row_hit(Req req) { MemCtrl mctrl = get_mctrl(req); Bank bank = mctrl.chan.ranks[req.addr.rid].banks[req.addr.bid]; return bank.curr_rowid == (long)req.addr.rowid; }
public bool is_row_hit(Req req) { Dbg.Assert(mctrl.cid == req.addr.cid); Bank bank = mctrl.chan.ranks[req.addr.rid].banks[req.addr.bid]; return bank.curr_rowid == (long)req.addr.rowid; }
public override Req better_req(Req req1, Req req2) { bool marked1 = req1.marked; bool marked2 = req2.marked; if (marked1 ^ marked2) { if (marked1) return req1; else return req2; } int rank1 = rank[req1.pid]; int rank2 = rank[req2.pid]; if (rank1 != rank2) { if (rank1 > rank2) return req1; else return req2; } bool hit1 = is_row_hit(req1); bool hit2 = is_row_hit(req2); if (hit1 ^ hit2) { if (hit1) return req1; else return req2; } if (req1.ts_arrival <= req2.ts_arrival) return req1; else return req2; }
public override Req better_req(Req req1, Req req2) { if (mark[req1.pid] != 1 ^ mark[req2.pid] != 1) { if (mark[req1.pid] != 1) { return req1; } else { return req2; } } bool hit1 = is_row_hit(req1); bool hit2 = is_row_hit(req2); if (hit1 ^ hit2) { if (hit1) return req1; else return req2; } if (req1.ts_arrival <= req2.ts_arrival) return req1; else return req2; }
public override void dequeue_req(Req req) { if (!req.marked) return; Dbg.Assert(marked_load > 0); marked_load--; }
//constructor public Cmd(TypeEnum type, MemAddr addr, int pid, Req req, List<Cmd> cmd_q) { valid = true; this.pid = pid; this.addr = addr; this.type = type; this.req = req; this.cmd_q = cmd_q; }
public override Req better_req(Req req1, Req req2) { bool hit1 = is_row_hit(req1); bool hit2 = is_row_hit(req2); if (hit1 ^ hit2) { if (hit1) return req1; else return req2; } if (req1.ts_arrival <= req2.ts_arrival) return req1; else return req2; }
public override void issue_req(Req req) { if (req != null) { uint bid = meta_mctrl.get_bid(req); if (is_row_hit(req)) { streak[bid] += 1; } else { streak[bid] = 1; } } }
public virtual void count_streaks(Req req) { if (pid_last_req != req.pid) { if (!proc_done[pid_last_req]) { if (last_streak_length < 16) streak_length[pid_last_req, last_streak_length] ++; else streak_length[pid_last_req, 16] ++; } last_streak_length = 1; pid_last_req = req.pid; } else last_streak_length ++; }
public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req) { try { ulong rd_addr = binary_reader.ReadUInt64(); cpu_inst_cnt = binary_reader.ReadInt32(); ReqType req_type, proc_req_type; req_type = ReqType.RD; if (rd_addr >> 63 != 1) proc_req_type = ReqType.RD; else proc_req_type = ReqType.WR; rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.depool(); rd_req.set(pid, req_type, proc_req_type, rd_addr); wb_req = null; } catch (EndOfStreamException) { gzip_reader = new GZipInputStream(File.OpenRead(trace_fname)); binary_reader = new BinaryReader (gzip_reader); ulong rd_addr = binary_reader.ReadUInt64(); cpu_inst_cnt = binary_reader.ReadInt32(); ReqType req_type, proc_req_type; req_type = ReqType.RD; if (rd_addr >> 63 != 1) proc_req_type = ReqType.RD; else proc_req_type = ReqType.WR; rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.depool(); rd_req.set(pid, req_type, proc_req_type, rd_addr); wb_req = null; } }
public override Req better_req(Req req1, Req req2) { bool hit1 = is_row_hit(req1); bool hit2 = is_row_hit(req2); uint bid1 = meta_mctrl.get_bid(req1); uint bid2 = meta_mctrl.get_bid(req2); bool capped1 = streak[bid1] >= Config.sched.row_hit_cap; bool capped2 = streak[bid2] >= Config.sched.row_hit_cap; hit1 = hit1 && (!capped1); hit2 = hit2 && (!capped2); if (hit1 ^ hit2) { if (hit1) return req1; else return req2; } if (req1.ts_arrival <= req2.ts_arrival) return req1; else return req2; }
public uint get_bid(Req req) { uint cid = req.addr.cid; uint rid = req.addr.rid; uint bid = req.addr.bid; uint global_bid = 0; if (is_omniscient && cid > 0) { global_bid += (cid - 1) * mctrls[0].rmax * mctrls[0].bmax; } if(rid > 0){ global_bid += (rid - 1) * mctrls[0].bmax; } global_bid += bid; return global_bid; }
public bool issue_wb_req(Req wb_req) { bool mctrl_ok = insert_mctrl(wb_req); return(mctrl_ok); }
public void process_request(int pid, ulong block_addr, ulong input_pc) { if (!pc_present(input_pc)) { StrideEntry new_stride_entry = new StrideEntry(); new_stride_entry.block_addr = block_addr; new_stride_entry.trained = false; new_stride_entry.stride = 0; new_stride_entry.train_hits = 0; insert_entry(input_pc, new_stride_entry); return; } int hit_position = entry_position(input_pc); int current_stride = (int)(block_addr - stride_entries[hit_position].block_addr); if (current_stride != stride_entries[hit_position].stride) { stride_entries[hit_position].stride = current_stride; stride_entries[hit_position].trained = false; stride_entries[hit_position].train_hits = 0; } stride_entries[hit_position].block_addr = block_addr; if (!stride_entries[hit_position].trained) { stride_entries[hit_position].train_hits++; stride_entries[hit_position].prefetch_block_addr = block_addr; } if (stride_entries[hit_position].train_hits >= num_trains) { stride_entries[hit_position].trained = true; } if (stride_entries[hit_position].stride == 0) { return; } if (stride_entries[hit_position].trained == true) { ulong max_block_address = stride_entries[hit_position].block_addr + (ulong)((distance + 1) * stride_entries[hit_position].stride); int max_prefetches = (int)(((int)max_block_address - (int)stride_entries[hit_position].prefetch_block_addr) / stride_entries[hit_position].stride); int num_prefetches = (max_prefetches > degree) ? degree : max_prefetches; for (int i = 0; i < num_prefetches; i++) { stride_entries[hit_position].prefetch_block_addr += (ulong)stride_entries[hit_position].stride; Req new_prefetch = create_new_prefetch_req(pid, stride_entries[hit_position].prefetch_block_addr); bool mctrl_ok = insert_mctrl(new_prefetch); if (!mctrl_ok) { RequestPool.enpool(new_prefetch); } else { Stat.procs[new_prefetch.pid].num_prefetches.Collect(); }; } } return; }
public void dequeue_req(Req req) { sched.dequeue_req(req); wbsched.issue_req(req); }
public void __enqueue_req(Req req, List<Req> q) { //timestamp req.ts_arrival = cycles; //add to queue q.Add(req); if (req.type == ReqType.WR) { Dbg.Assert(mctrl_writeq.Count < mctrl_writeq.Capacity); mctrl_writeq.Add(req); } //sched meta_mctrl.enqueue_req(req); //does nothing for now //stats if (req.type == ReqType.RD) { rload++; rload_per_proc[req.pid]++; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]++; } else { wload++; wload_per_proc[req.pid]++; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]++; } }
//scheduler-specific overridden method public abstract Req better_req(Req req1, Req req2);
public static void enpool(Req req) { req.reset(); req_pool.AddLast(req); }
public void enqueue_req(Req req) { //check if writeback hit List<Req> q = get_q(req); MemAddr addr = req.addr; if (req.type == ReqType.RD) { List<Req> wq = writeqs[addr.rid, addr.bid]; int idx = wq.FindIndex(delegate(Req w) { return w.block_addr == req.block_addr; }); if (idx != -1) { //writeback hit Sim.xbar.enqueue(req); Stat.procs[req.pid].wb_hit.Collect(); return; } } //writeback dumpster if (req.type == ReqType.WR && Config.mctrl.wb_dump) { req.addr.rowid = 0; } //enqueue proper Dbg.Assert(q.Count < q.Capacity); __enqueue_req(req, q); }
new public void __dequeue_req(Req req) { req.ts_departure = cycles; Dbg.Assert(req.ts_departure - req.ts_arrival > 0); if ((!req.migrated_request) && (Config.proc.cache_insertion_policy == "PFA")) { RowStat.UpdateMLP(RowStat.DramDict, req); Measurement.mem_num_dec(req); // Measurement.DramServiceTimeUpdate (req); // Measurement.DramCoreReqNumDec (req); } if (Config.proc.cache_insertion_policy == "PFA") { Measurement.DramCoreReqNumDec(req); } /* if (Config.proc.cache_insertion_policy == "RBLAMLP" || Config.proc.cache_insertion_policy == "PFA") * Measurement.DramSetCorePrevRowid (req); */ //sched meta_mctrl.dequeue_req(req); //load stat management if (!req.migrated_request) { if (req.type == ReqType.RD) { rload--; rload_per_proc[req.pid]--; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(rload >= 0); Dbg.Assert(rload_per_proc[req.pid] >= 0); Dbg.Assert(rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } else { wload--; wload_per_proc[req.pid]--; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(wload >= 0); Dbg.Assert(wload_per_proc[req.pid] >= 0); Dbg.Assert(wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); // RequestPool.CacheWrite--; } } else { if (req.type == ReqType.RD) { rload--; } else { wload--; } } /* //dequeue proper * if (req.type == ReqType.RD) { * //traverse crossbar * //Sim.xbar.enqueue(req); * * Callback cb = req.cache_callback; * cb(req); * * * } * else { * bool removeok = mctrl_writeq.Remove(req); * Dbg.Assert(removeok); * req.latency = (int)(req.ts_departure - req.ts_arrival); * * Callback cb = req.cache_callback; * cb(req); * * * RequestPool.enpool(req); * }*/ //yang: //dequeue proper if (req.type == ReqType.RD) { Callback cb = req.cache_callback; cb(req); } else { bool removeok = mctrl_writeq.Remove(req); Dbg.Assert(removeok); req.latency = (int)(req.ts_departure - req.ts_arrival); Callback cb = req.cache_callback; cb(req); /* Callback cb1 = req.callback; * if (cb1!=null) * { * Console.WriteLine("Position3"); * RequestPool.CacheWrite--; * }*/ } }
new public void __enqueue_req(Req req, List <Req> q) { //timestamp // req.ts_arrival = cycles; /* // do any analysis * if (Config.collect_reuse == true) { * if (Sim.reuse[req.pid].ContainsKey(req.block_addr)) * Sim.reuse[req.pid][req.block_addr] = Sim.reuse[req.pid][req.block_addr] + 1; * else * Sim.reuse[req.pid].Add(req.block_addr, 1); * } * */ if (Config.proc.cache_insertion_policy == "PFA") { Measurement.DramCoreReqNumInc(req); } // check if cache hit bool cache_serviced = false; /* * // TODO: add support for DRAM caching * // don't allow cache writeback requests to be re-cached * if (Config.proc.cache && Sim.caches[Sim.get_cache(req.pid)].is_cached(req) && !req.cache_wb) { * Sim.caches[Sim.get_cache(req.pid)].promote(req); * //stats * if (req.type == ReqType.RD) { * Stat.procs[req.pid].cache_read.Collect(); * Stat.procs[req.pid].cache_hit_rate_read.Collect(1); * Sim.caches[Sim.get_cache(req.pid)].service(req); * cache_serviced = true; * } * else { * switch (Config.proc.cache_write_policy) { * case "WriteThrough": * // displace entry * Sim.caches[Sim.get_cache(req.pid)].displace(req); * break; * case "WriteBack": * Stat.procs[req.pid].cache_write.Collect(); * Stat.procs[req.pid].cache_hit_rate_write.Collect(1); * Sim.caches[Sim.get_cache(req.pid)].service(req); * cache_serviced = true; * break; * } * } * } */ if (!cache_serviced) { /* if (Sim.in_hot_region(req)) * Sim.thread_criticality[req.pid]++; * * //add to queue */ if (!req.migrated_request) { Sim.Dram_req_num = Sim.Dram_req_num + 1; } q.Add(req); if (req.type == ReqType.WR) { Dbg.Assert(mctrl_writeq.Count < mctrl_writeq.Capacity); mctrl_writeq.Add(req); } //sched meta_mctrl.enqueue_req(req); //does nothing for now //stats if (!req.migrated_request) { if (req.type == ReqType.RD) { rload++; rload_per_proc[req.pid]++; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]++; Stat.procs[req.pid].cache_hit_rate_read.Collect(0); } else { wload++; wload_per_proc[req.pid]++; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]++; Stat.procs[req.pid].cache_hit_rate_write.Collect(0); } } else { if (req.type == ReqType.RD) { rload++; } else { wload++; } } } }
new public void tick() { //must be the very first thing that's done cycles++; meta_mctrl.tick(cid); wbthrottle.tick(); mwbmode.tick(cid); //load stats for (int p = 0; p < Config.N; p++) { //read load if (rload_per_proc[p] > 0) { Stat.mctrls2[cid].rbinaryloadtick_per_proc[p].Collect(); } Stat.mctrls2[cid].rloadtick_per_proc[p].Collect(rload_per_proc[p]); //write load if (wload_per_proc[p] > 0) { Stat.mctrls2[cid].wbinaryloadtick_per_proc[p].Collect(); } Stat.mctrls2[cid].wloadtick_per_proc[p].Collect(wload_per_proc[p]); } //busy/idle stats if (rload > 0) { read_loaded_time++; if (read_unloaded_time > 0) { //Stat.mctrls2[cid].read_unloaded_time.Collect(read_unloaded_time); } read_unloaded_time = 0; } else { read_unloaded_time++; if (read_loaded_time > 0) { //Stat.mctrls2[cid].read_loaded_time.Collect(read_loaded_time); } read_loaded_time = 0; } /*** writeback mode ***/ update_wb_mode(); /* * if (wb_mode && cid == 0) { * Console.WriteLine("==={0}==============================================", cycles); * Console.WriteLine("Reads to Drain: {0}", reads_to_drain); * Console.WriteLine("Writes Serviced: {0}", ((DecoupledWBFullServeN) mwbmode).serve_cnt[0]); * uint r = 0; * for (uint b = 0; b < bmax; b++) { * Console.Write("{0}\t", b); * foreach (Cmd cmd in cmdqs[r, b]) { * Console.Write("{0} {1}\t", cmd.type.ToString(), can_schedule_cmd(cmd)); * } * Console.WriteLine(); * } * } */ /*** clock factor ***/ if (cycles % Config.mem.clock_factor != 0) { return; } if ((Config.proc.cache_insertion_policy == "PFA") && (cycles % (6 * Config.mem.clock_factor) == 0)) { int indexi, indexj; for (indexi = 0; indexi < rmax; indexi++) { for (indexj = 0; indexj < bmax; indexj++) { Measurement.read_MLP_cal(ref readqs[indexi, indexj]); Measurement.write_MLP_cal(ref writeqs[indexi, indexj]); Measurement.MLP_cal(ref inflightqs[indexi, indexj]); } } } /*** serve completed request ***/ if (bus_q.Count > 0 && bus_q[0].ts <= cycles) { MemAddr addr = bus_q[0].addr; bus_q.RemoveAt(0); List <Req> inflight_q = inflightqs[addr.rid, addr.bid]; Dbg.Assert(inflight_q.Count > 0); Dbg.Assert(addr == inflight_q[0].addr); Req req = inflight_q[0]; inflight_q.RemoveAt(0); if (Config.proc.cache_insertion_policy == "PFA") { Measurement.DramBankPidDeUpdate(req); } dequeue_req(req); } Cmd best_cmd = find_best_cmd(); Req best_req = find_best_req(); //nothing to issue if (best_cmd == null && best_req == null) { if (Config.proc.cache_insertion_policy == "PFA") { CheckBusConflict(); } return; } //arbitrate between command and request bool is_issue_req = false; if (best_req != null && best_cmd == null) { is_issue_req = true; } else if (best_req == null && best_cmd != null) { is_issue_req = false; } else { if (best_req == __better_req(best_cmd.req, best_req)) { is_issue_req = true; } else { is_issue_req = false; } } //issue command or request if (is_issue_req) { if (!best_req.migrated_request) { if (Config.proc.cache_insertion_policy == "RBLA") { RowStat.UpdateDict(RowStat.DramDict, best_req, this); } else if (Config.proc.cache_insertion_policy == "PFA") { RowStat.UpdateDict(RowStat.DramDict, best_req, this); // Measurement.DramBankPidEnUpdate(best_req); } // if (Config.proc.cache_insertion_policy == "PFA") // Measurement.DramBankPidEnUpdate(best_req); } if (Config.proc.cache_insertion_policy == "PFA") { Measurement.DramBankPidEnUpdate(best_req); } issue_req(best_req); } else { issue_cmd(best_cmd); } if (Config.proc.cache_insertion_policy == "PFA") { CheckBusConflict(); } }
private void issue_req(Req req) { //remove request from waiting queue List <Req> q = get_q(req); Dbg.Assert(q.Contains(req)); q.Remove(req); //add to inflight queue MemAddr addr = req.addr; List <Req> inflight_q = inflightqs[addr.rid, addr.bid]; Dbg.Assert(inflight_q.Count < inflight_q.Capacity); inflight_q.Add(req); //add to command queue List <Cmd> cmd_q = cmdqs[addr.rid, addr.bid]; Dbg.Assert(cmd_q.Count == 0); List <Cmd> new_cmd_q = decode_req(req); Dbg.Assert(new_cmd_q.Count > 0); cmd_q.AddRange(new_cmd_q); Cmd cmd = cmd_q[0]; //meta_mctrl meta_mctrl.issue_req(req); Dbg.Assert(cmd.req.addr.rowid == req.addr.rowid); //stats BankStat bstat = Stat.banks2[addr.cid, addr.rid, addr.bid]; bstat.access.Collect(); if (cmd.type == Cmd.TypeEnum.PRECHARGE || cmd.type == Cmd.TypeEnum.ACTIVATE) { //bank stat bstat.row_miss.Collect(); bstat.row_miss_perproc[req.pid].Collect(); //proc stat if (cmd.req.type == ReqType.RD) { Stat.procs[req.pid].row_hit_rate_read.Collect(0); Stat.procs[req.pid].row_miss_read.Collect(); } else { Stat.procs[req.pid].row_hit_rate_write.Collect(0); Stat.procs[req.pid].row_miss_write.Collect(); } req.hit = 2; // Power Measurement: Sim.DRAM_power_statistics(req.pid, req.migrated_request, req.type, false); // if (Config.proc.cache_insertion_policy == "PFA") { // if ((!req.migrated_request) && (req.type == ReqType.RD)) Measurement.DramMissSetRowBufferChange(req); } } else { //bank stat bstat.row_hit.Collect(); bstat.row_hit_perproc[req.pid].Collect(); //proc stat if (cmd.req.type == ReqType.RD) { Stat.procs[req.pid].row_hit_rate_read.Collect(1); Stat.procs[req.pid].row_hit_read.Collect(); } else { Stat.procs[req.pid].row_hit_rate_write.Collect(1); Stat.procs[req.pid].row_hit_write.Collect(); } req.hit = 1; // Power Measurement: Sim.DRAM_power_statistics(req.pid, req.migrated_request, req.type, true); // if (Config.proc.cache_insertion_policy == "PFA") { Measurement.DramHitSetRowBufferChange(req); } } if (Config.proc.cache_insertion_policy == "PFA") { Measurement.DramSetCorePrevRowid(req); } //issue command issue_cmd(cmd); if (cmd.addr != req.addr) { Console.Write("big error!"); } }
public void enqueue(Req req) { reqs.Add(req); }
public virtual void service_counter(Req req) { return; }
public override void dequeue_req(Req req) { }
public override void issue_req(Req req) { if (req == null) return; count_streaks(req); uint bid; if (Config.sched.channel_level) { if (req.pid == last_req_pid && oldest_streak_global < Config.sched.row_hit_cap) { oldest_streak_global += 1; } else if (req.pid == last_req_pid && oldest_streak_global == Config.sched.row_hit_cap) { mark[req.pid] = 1; oldest_streak_global = 1; } else { oldest_streak_global = 1; } last_req_pid = req.pid; } else { bid = meta_mctrl.get_bid(req); if (meta_mctrl.is_req_to_cur_proc(req) && oldest_streak[bid] < Config.sched.row_hit_cap) { oldest_streak[bid] += 1; } else if (meta_mctrl.is_req_to_cur_proc(req) && oldest_streak[bid] == Config.sched.row_hit_cap) { mark[req.pid] = 1; oldest_streak[bid] = 1; // Console.Write(" OLDEST: Marking processor " + req.pid + "\n"); } else { oldest_streak[bid] = 1; } } }
public override void issue_req(Req req) { }
public List<Req> get_q(Req req) { List<Req>[,] rw_qs = (req.type == ReqType.RD ? readqs : writeqs); List<Req> q = rw_qs[req.addr.rid, req.addr.bid]; return q; }
public void __enqueue_req(Req req, List <Req> q) { //timestamp req.ts_arrival = cycles; /* // do any analysis * if (Config.collect_reuse == true) { * if (Sim.reuse[req.pid].ContainsKey(req.block_addr)) * Sim.reuse[req.pid][req.block_addr] = Sim.reuse[req.pid][req.block_addr] + 1; * else * Sim.reuse[req.pid].Add(req.block_addr, 1); * } */ // check if cache hit bool cache_serviced = false; // don't allow cache writeback requests to be re-cached if (Config.proc.cache && Sim.caches[Sim.get_cache(req.pid)].is_cached(req) && (!req.migrated_request)) { if (Config.proc.cache_insertion_policy == "PFA") { Measurement.mem_num_inc(req); } Sim.caches[Sim.get_cache(req.pid)].promote(req); //stats if (req.type == ReqType.RD) { Stat.procs[req.pid].cache_read.Collect(); Stat.procs[req.pid].cache_hit_rate_read.Collect(1); Sim.caches[Sim.get_cache(req.pid)].service(req); cache_serviced = true; } else { switch (Config.proc.cache_write_policy) { case "WriteThrough": // displace entry Sim.caches[Sim.get_cache(req.pid)].displace(req); break; case "WriteBack": Stat.procs[req.pid].cache_write.Collect(); Stat.procs[req.pid].cache_hit_rate_write.Collect(1); Sim.caches[Sim.get_cache(req.pid)].service(req); cache_serviced = true; break; } } } if (!cache_serviced) { if (!req.migrated_request) { Sim.NVM_req_num = Sim.NVM_req_num + 1; } if ((!req.migrated_request) && (Config.proc.cache_insertion_policy == "PFA")) { Measurement.mem_num_inc(req); } if (Config.proc.cache_insertion_policy == "PFA") { Measurement.NVMCoreReqNumInc(req); } q.Add(req); if (req.type == ReqType.WR) { Dbg.Assert(mctrl_writeq.Count < mctrl_writeq.Capacity); mctrl_writeq.Add(req); } //sched meta_mctrl.enqueue_req(req); //does nothing for now //stats if (!req.cache_wb) { if (req.type == ReqType.RD) { rload++; rload_per_proc[req.pid]++; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]++; Stat.procs[req.pid].cache_hit_rate_read.Collect(0); } else { wload++; wload_per_proc[req.pid]++; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]++; Stat.procs[req.pid].cache_hit_rate_write.Collect(0); } } else { if (req.type == ReqType.RD) { rload++; } else { wload++; } } } }
private void issue_req(Req req) { //remove request from waiting queue List<Req> q = get_q(req); Dbg.Assert(q.Contains(req)); q.Remove(req); req.queueing_latency = (int) (cycles - req.ts_arrival); total_queueing_latency[req.pid] += (ulong)req.queueing_latency; if (Sim.highest_rank_proc == req.pid) Sim.procs[req.pid].queueing_latency += (ulong)req.queueing_latency; Stat.mctrls[cid].queueing_latency_per_proc[req.pid].Collect(req.queueing_latency); //add to inflight queue MemAddr addr = req.addr; List<Req> inflight_q = inflightqs[addr.rid, addr.bid]; Dbg.Assert(inflight_q.Count < inflight_q.Capacity); inflight_q.Add(req); //add to command queue List<Cmd> cmd_q = cmdqs[addr.rid, addr.bid]; Dbg.Assert(cmd_q.Count == 0); List<Cmd> new_cmd_q = decode_req(req); Dbg.Assert(new_cmd_q.Count > 0); cmd_q.AddRange(new_cmd_q); Cmd cmd = cmd_q[0]; //meta_mctrl meta_mctrl.issue_req(req); req.ts_issue = cycles; //stats BankStat bstat = Stat.banks[addr.cid, addr.rid, addr.bid]; bstat.access.Collect(); if (cmd.type == Cmd.TypeEnum.PRECHARGE || cmd.type == Cmd.TypeEnum.ACTIVATE) { //bank stat bstat.row_miss.Collect(); bstat.row_miss_perproc[req.pid].Collect(); //proc stat if (cmd.req.type == ReqType.RD) { Stat.procs[req.pid].row_hit_rate_read.Collect(0); Stat.procs[req.pid].row_miss_read.Collect(); } else { Stat.procs[req.pid].row_hit_rate_write.Collect(0); Stat.procs[req.pid].row_miss_write.Collect(); } } else { //bank stat bstat.row_hit.Collect(); bstat.row_hit_perproc[req.pid].Collect(); //proc stat if (cmd.req.type == ReqType.RD) { Stat.procs[req.pid].row_hit_rate_read.Collect(1); Stat.procs[req.pid].row_hit_read.Collect(); } else { Stat.procs[req.pid].row_hit_rate_write.Collect(1); Stat.procs[req.pid].row_hit_write.Collect(); } } //issue command issue_cmd(cmd); }
public bool is_req_to_cur_proc(Req req) { MemCtrl mctrl = get_mctrl(req); Bank bank = mctrl.chan.ranks[req.addr.rid].banks[req.addr.bid]; Req curr_req = get_curr_req(bank); if (curr_req != null) return curr_req.pid == (long)req.pid; else return false; }
public void tick() { /*** Preamble ***/ cycles++; Stat.procs[pid].cycle.Collect(); ulong inst_cnt = Stat.procs[pid].ipc.Count; if (inst_cnt != 0 && inst_cnt % 1000000 == 0) { ulong quantum = inst_cnt / 1000000; if (quantum > curr_quantum) { curr_quantum = quantum; ulong read_req = Stat.procs[pid].read_req.Count; Stat.procs[pid].read_quantum.EndQuantum(read_req - prev_read_req); prev_read_req = read_req; ulong write_req = Stat.procs[pid].write_req.Count; Stat.procs[pid].write_quantum.EndQuantum(write_req - prev_write_req); prev_write_req = write_req; } } /*** Throttle ***/ if (throttle_fraction > 0) { if (rand.NextDouble() < throttle_fraction) { return; } } /*** Retire ***/ int retired = inst_wnd.retire(Config.proc.ipc); Stat.procs[pid].ipc.Collect(retired); if (retired < 0.5 * Config.proc.ipc) { Measurement.core_stall_cycles[pid] += 1; } /*** Issue writeback request ***/ if (Config.proc.wb && wb_q.Count > 0) { bool wb_ok = issue_wb_req(wb_q[0]); // Console.WriteLine("Issue Write {0}",wb_ok); if (wb_ok) { wb_q.RemoveAt(0); } //writeback stall bool stalled_wb = wb_q.Count > Config.proc.wb_q_max; if (stalled_wb) { return; } } /*** Reissue previous read request ***/ bool issued_rd_req = false; if (mshr_retry || mctrl_retry) { Dbg.Assert(curr_rd_req != null && curr_cpu_inst_cnt == 0); //mshr/mctrl stall bool reissue_ok = reissue_rd_req(); // Console.Write("Reissue read {0}",reissue_ok); if (!reissue_ok) { return; } //reissue success Dbg.Assert(!mshr_retry && !mctrl_retry); issued_rd_req = true; curr_rd_req = get_req(); } /*** Issue instructions ***/ Dbg.Assert(curr_rd_req != null); issue_insts(issued_rd_req); }
private void send_req(Req req) { Sim.procs[req.pid].inflight_mem_requests++; req.callback = new Callback(Sim.procs[req.pid].recv_req); Sim.mctrls[req.addr.cid].enqueue_req(req); }
public void issue_req(Req req) { sched.issue_req(req); wbsched.issue_req(req); }
public List <Req> get_inflight_q(Req req) { List <Req> q = inflightqs[req.addr.rid, req.addr.bid]; return(q); }
public Req better_wb_req(Req req1, Req req2) { return wbsched.better_req(req1, req2); }
public void tick() { //must be the very first thing that's done cycles++; meta_mctrl.tick(cid); wbthrottle.tick(); mwbmode.tick(cid); //load stats for (int p = 0; p < Config.N; p++) { //read load if (rload_per_proc[p] > 0) { Stat.mctrls[cid].rbinaryloadtick_per_proc[p].Collect(); } Stat.mctrls[cid].rloadtick_per_proc[p].Collect(rload_per_proc[p]); //write load if (wload_per_proc[p] > 0) { Stat.mctrls[cid].wbinaryloadtick_per_proc[p].Collect(); } Stat.mctrls[cid].wloadtick_per_proc[p].Collect(wload_per_proc[p]); } //busy/idle stats if (rload > 0) { read_loaded_time++; if (read_unloaded_time > 0) { //Stat.mctrls[cid].read_unloaded_time.Collect(read_unloaded_time); } read_unloaded_time = 0; } else { read_unloaded_time++; if (read_loaded_time > 0) { //Stat.mctrls[cid].read_loaded_time.Collect(read_loaded_time); } read_loaded_time = 0; } /*** writeback mode ***/ update_wb_mode(); /* * if (wb_mode && cid == 0) { * Console.WriteLine("==={0}==============================================", cycles); * Console.WriteLine("Reads to Drain: {0}", reads_to_drain); * Console.WriteLine("Writes Serviced: {0}", ((DecoupledWBFullServeN) mwbmode).serve_cnt[0]); * uint r = 0; * for (uint b = 0; b < bmax; b++) { * Console.Write("{0}\t", b); * foreach (Cmd cmd in cmdqs[r, b]) { * Console.Write("{0} {1}\t", cmd.type.ToString(), can_schedule_cmd(cmd)); * } * Console.WriteLine(); * } * } */ /*** clock factor ***/ if (cycles % Config.mem.clock_factor != 0) { return; } /*** serve completed request ***/ if (bus_q.Count > 0 && bus_q[0].ts <= cycles) { MemAddr addr = bus_q[0].addr; bus_q.RemoveAt(0); List <Req> inflight_q = inflightqs[addr.rid, addr.bid]; Dbg.Assert(inflight_q.Count > 0); Dbg.Assert(addr == inflight_q[0].addr); Req req = inflight_q[0]; inflight_q.RemoveAt(0); dequeue_req(req); } Cmd best_cmd = find_best_cmd(); Req best_req = find_best_req(); //nothing to issue if (best_cmd == null && best_req == null) { return; } //arbitrate between command and request bool is_issue_req = false; if (best_req != null && best_cmd == null) { is_issue_req = true; } else if (best_req == null && best_cmd != null) { is_issue_req = false; } else { if (best_req == __better_req(best_cmd.req, best_req)) { is_issue_req = true; } else { is_issue_req = false; } } //issue command or request if (is_issue_req) { // Console.Write(" Cycles " + meta_mctrl.get_cycles() + "\n"); issue_req(best_req); meta_mctrl.prev_req_pid = best_req.pid; } else { issue_cmd(best_cmd); meta_mctrl.prev_req_pid = best_cmd.req.pid; } }
public void enqueue_req(Req req) { sched.enqueue_req(req); wbsched.issue_req(req); }
public void update_wb_mode() { bool prev_wb_mode = wb_mode; wb_mode = mwbmode.is_wb_mode(cid); if (wb_mode) { Stat.mctrls[cid].wbmode_fraction.Collect(); } if (prev_wb_mode == false && wb_mode == true) { //stats ts_start_wbmode = cycles; if (ts_end_wbmode != -1) { Stat.mctrls[cid].wbmode_distance.Collect((int)(ts_start_wbmode - ts_end_wbmode)); } /* * if (cid == 0) { * Console.WriteLine("=====Start: {0,8}======================================", cycles); * Console.Write("\t"); * for (uint b = 0; b < bmax; b++) { * Console.Write("{0,4}", readqs[0, b].Count); * } * Console.WriteLine(); * * Console.Write("\t"); * for (uint b = 0; b < bmax; b++) { * Console.Write("{0,4}", writeqs[0, b].Count); * } * Console.WriteLine(); * } */ //stats: longest write transaction int longest_transaction = 0; for (uint r = 0; r < rmax; r++) { for (uint b = 0; b < bmax; b++) { List <Req> q = writeqs[r, b]; Dictionary <ulong, int> dict = new Dictionary <ulong, int>(); foreach (Req req in q) { if (!dict.ContainsKey(req.addr.rowid)) { dict.Add(req.addr.rowid, 0); } dict[req.addr.rowid] += 1; } foreach (int transaction in dict.Values) { if (transaction > longest_transaction) { longest_transaction = transaction; } } } } Stat.mctrls[cid].wbmode_longest_transaction.Collect(longest_transaction); /* * if (cid == 0) * Console.WriteLine("Longest Transaction: {0}", longest_transaction); */ //flush/drain reads reads_to_drain = 0; for (uint r = 0; r < rmax; r++) { for (uint b = 0; b < bmax; b++) { List <Cmd> cmdq = cmdqs[r, b]; if (cmdq.Count == 0) { continue; } //only column command if (cmdq.Count == 1) { //increment the number of reads to drain during the first part of the writeback mode Dbg.Assert(cmdq[0].type == Cmd.TypeEnum.READ || cmdq[0].type == Cmd.TypeEnum.WRITE); if (cmdq[0].type == Cmd.TypeEnum.READ) { reads_to_drain++; cmdq[0].is_drain = true; } continue; } //activate+column command Dbg.Assert(cmdq.Count == 2); Dbg.Assert(cmdq[0].type == Cmd.TypeEnum.ACTIVATE); Dbg.Assert(cmdq[1].type == Cmd.TypeEnum.READ || cmdq[1].type == Cmd.TypeEnum.WRITE); //write requests don't matter if (cmdq[1].type == Cmd.TypeEnum.WRITE) { continue; } //don't flush read request if (Config.mctrl.read_bypass) { if (writeqs[r, b].Count == 0) { continue; } } //flush read request Req req = cmdq[1].req; List <Req> inflightq = get_inflight_q(req); Req last_req = inflightq[inflightq.Count - 1]; Dbg.Assert(last_req.block_addr == req.block_addr); inflightq.RemoveAt(inflightq.Count - 1); List <Req> q = get_q(req); Dbg.Assert(q.Count < q.Capacity); q.Add(req); //flush read command cmdq.RemoveRange(0, 2); } } } else if (prev_wb_mode == true && wb_mode == false) { //stats ts_end_wbmode = cycles; Stat.mctrls[cid].wbmode_length.Collect((int)(ts_end_wbmode - ts_start_wbmode)); /* * if (cid == 0) { * Console.WriteLine("Length: {0}", cycles-ts_start_wbmode); * Console.WriteLine("Rds: {0}", rds_per_wb_mode); * Console.WriteLine("Wrs: {0}", wbs_per_wb_mode); * Console.WriteLine("=====End: {0,8}======================================", cycles); * } */ Stat.mctrls[cid].rds_per_wb_mode.Collect(rds_per_wb_mode); Stat.mctrls[cid].wbs_per_wb_mode.Collect(wbs_per_wb_mode); rds_per_wb_mode = 0; wbs_per_wb_mode = 0; //flush/drain writes writes_to_drain = 0; foreach (List <Cmd> cmdq in cmdqs) { if (cmdq.Count == 0) { continue; } //only column command if (cmdq.Count == 1) { //increment the number of reads to drain during the first part of the writeback mode Dbg.Assert(cmdq[0].type == Cmd.TypeEnum.READ || cmdq[0].type == Cmd.TypeEnum.WRITE); if (cmdq[0].type == Cmd.TypeEnum.WRITE) { writes_to_drain++; cmdq[0].is_drain = true; } continue; } //activate+column command Dbg.Assert(cmdq.Count == 2); Dbg.Assert(cmdq[0].type == Cmd.TypeEnum.ACTIVATE); Dbg.Assert(cmdq[1].type == Cmd.TypeEnum.READ || cmdq[1].type == Cmd.TypeEnum.WRITE); if (cmdq[1].type == Cmd.TypeEnum.READ) { continue; } //flush read request Req req = cmdq[1].req; List <Req> inflightq = get_inflight_q(req); Req last_req = inflightq[inflightq.Count - 1]; Dbg.Assert(last_req.block_addr == req.block_addr); inflightq.RemoveAt(inflightq.Count - 1); List <Req> q = get_q(req); Dbg.Assert(q.Count < q.Capacity); q.Add(req); //flush read command cmdq.RemoveRange(0, 2); } } }
public void issue_insts(bool issued_rd_req) { //issue instructions for (int i = 0; i < Config.proc.ipc; i++) { if (inst_wnd.is_full()) { if (i == 0) { Stat.procs[pid].stall_inst_wnd.Collect(); // Measurement.core_stall_cycles[pid] += 1; } return; } //cpu instructions if (curr_cpu_inst_cnt > 0) { curr_cpu_inst_cnt--; inst_wnd.add(0, false, true); continue; } //only one memory instruction can be issued per cycle if (issued_rd_req) { return; } //memory instruction (only AFTER checking for one memory instruction per cycle) inst_wnd.add(curr_rd_req.block_addr, true, false); //check if true miss bool false_miss = inst_wnd.is_duplicate(curr_rd_req.block_addr); if (false_miss) { Dbg.Assert(curr_rd_req.wb_req == null); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } //try mshr bool mshr_ok = insert_mshr(curr_rd_req); if (!mshr_ok) { mshr_retry = true; return; } //try memory controller bool mctrl_ok = insert_mctrl(curr_rd_req); if (!mctrl_ok) { mctrl_retry = true; return; } //issued memory request issued_rd_req = true; //get new read request curr_rd_req = get_req(); } }
private Req __find_best_req(int r, int b) { //no need to search for request, already outstanding commands if (cmdqs[r, b].Count > 0) { return(null); } /*** find best request ***/ List <Req> rq = readqs[r, b]; List <Req> wq = writeqs[r, b]; if (rq.Count == 0 && wq.Count == 0) { return(null); } Req best_req = null; Cmd cmd = null; //find best writeback request if (wb_mode) { best_req = meta_mctrl.find_best_wb_req(wq); if (best_req != null) { //check if best writeback request is schedulable cmd = decode_req(best_req)[0]; if (!can_schedule_cmd(cmd)) { return(null); } return(best_req); } //writeq is empty: should we let reads bypass? if (!Config.mctrl.read_bypass) { return(null); } } //find best read request best_req = meta_mctrl.find_best_rd_req(rq); /*** row-hit bypass ***/ if (Config.mctrl.row_hit_bypass) { Req hit_req = rh_finder.find_best_req(rq); if (!meta_mctrl.is_row_hit(best_req) && hit_req != null) { Bank bank = chan.ranks[r].banks[b]; Dbg.Assert(bank.ts_act != -1); long ts_pre = bank.ts_act + timing.tRAS; long speculative_ts_pre = cycles + timing.tRTP; if (speculative_ts_pre <= ts_pre) { best_req = hit_req; } } } if (best_req == null) { return(null); } //check if best request is schedulable cmd = decode_req(best_req)[0]; if (!can_schedule_cmd(cmd)) { return(null); } return(best_req); }
public void dequeue_req(Req req) { __dequeue_req(req); }
private void issue_req(Req req) { //remove request from waiting queue List <Req> q = get_q(req); Dbg.Assert(q.Contains(req)); q.Remove(req); req.queueing_latency = (int)(cycles - req.ts_arrival); total_queueing_latency[req.pid] += (ulong)req.queueing_latency; //add to inflight queue MemAddr addr = req.addr; List <Req> inflight_q = inflightqs[addr.rid, addr.bid]; Dbg.Assert(inflight_q.Count < inflight_q.Capacity); inflight_q.Add(req); //add to command queue List <Cmd> cmd_q = cmdqs[addr.rid, addr.bid]; Dbg.Assert(cmd_q.Count == 0); List <Cmd> new_cmd_q = decode_req(req); Dbg.Assert(new_cmd_q.Count > 0); cmd_q.AddRange(new_cmd_q); Cmd cmd = cmd_q[0]; //meta_mctrl meta_mctrl.issue_req(req); req.ts_issue = cycles; //stats BankStat bstat = Stat.banks[addr.cid, addr.rid, addr.bid]; bstat.access.Collect(); if (cmd.type == Cmd.TypeEnum.PRECHARGE || cmd.type == Cmd.TypeEnum.ACTIVATE) { //bank stat bstat.row_miss.Collect(); bstat.row_miss_perproc[req.pid].Collect(); //proc stat if (cmd.req.type == ReqType.RD) { Stat.procs[req.pid].row_hit_rate_read.Collect(0); Stat.procs[req.pid].row_miss_read.Collect(); } else { Stat.procs[req.pid].row_hit_rate_write.Collect(0); Stat.procs[req.pid].row_miss_write.Collect(); } } else { //bank stat bstat.row_hit.Collect(); bstat.row_hit_perproc[req.pid].Collect(); //proc stat if (cmd.req.type == ReqType.RD) { Stat.procs[req.pid].row_hit_rate_read.Collect(1); Stat.procs[req.pid].row_hit_read.Collect(); } else { Stat.procs[req.pid].row_hit_rate_write.Collect(1); Stat.procs[req.pid].row_hit_write.Collect(); } } //issue command issue_cmd(cmd); }
protected bool is_row_hit(Req req) { return(meta_mctrl.is_row_hit(req)); }
public static void DramSetCorePrevRowid(Req req) { core_prev_rowid[req.pid, NVM_bank_num + (req.addr.cid * Config.mem2.rank_max + req.addr.rid) * Dram_BANK_MAX + req.addr.bid] = req.addr.rowid; }
public static void NVMSetCorePrevRowid(Req req) { core_prev_rowid[req.pid, (req.addr.cid * Config.mem.rank_max + req.addr.rid) * NVM_BANK_MAX + req.addr.bid] = req.addr.rowid; }
public List<Req> get_inflight_q(Req req) { List<Req> q = inflightqs[req.addr.rid, req.addr.bid]; return q; }
public abstract void enqueue_req(Req req);
public void __dequeue_req(Req req) { req.ts_departure = cycles; Dbg.Assert(req.ts_departure - req.ts_arrival > 0); //sched meta_mctrl.dequeue_req(req); //load stat management if (req.type == ReqType.RD) { rload--; rload_per_proc[req.pid]--; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(rload >= 0); Dbg.Assert(rload_per_proc[req.pid] >= 0); Dbg.Assert(rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } else { wload--; wload_per_proc[req.pid]--; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(wload >= 0); Dbg.Assert(wload_per_proc[req.pid] >= 0); Dbg.Assert(wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } meta_mctrl.sched.service_counter(req); //dequeue proper if (req.type == ReqType.RD) { //traverse crossbar Sim.xbar.enqueue(req); } else { bool removeok = mctrl_writeq.Remove(req); Dbg.Assert(removeok); req.latency = (int)(req.ts_departure - req.ts_arrival); Callback cb = req.callback; cb(req); } }
public static void NVMResetRowBufferChange(Req req) { core_prev_rowid[req.pid, (req.addr.cid * Config.mem.rank_max + req.addr.rid) * NVM_BANK_MAX + req.addr.bid] = 0; }
private List<Cmd> decode_req(Req req) { MemAddr addr = req.addr; List<Cmd> cmd_q = cmdqs[addr.rid, addr.bid]; int pid = req.pid; Bank b = chan.ranks[addr.rid].banks[addr.bid]; List<Cmd> decode_cmd_q = new List<Cmd>(CMDQ_MAX); if (b.curr_rowid == -1) { //row-closed req.row_hit = false; decode_cmd_q.Add(new Cmd(Cmd.TypeEnum.ACTIVATE, addr, pid, req, cmd_q)); } else if (b.curr_rowid != (long)addr.rowid) { //row-conflict req.row_hit = false; decode_cmd_q.Add(new Cmd(Cmd.TypeEnum.PRECHARGE, addr, pid, req, cmd_q)); decode_cmd_q.Add(new Cmd(Cmd.TypeEnum.ACTIVATE, addr, pid, req, cmd_q)); } Cmd.TypeEnum RW = (req.type == ReqType.WR ? Cmd.TypeEnum.WRITE : Cmd.TypeEnum.READ); decode_cmd_q.Add(new Cmd(RW, addr, pid, req, cmd_q)); return decode_cmd_q; }
public static void DramBankPidDeUpdate(Req req) { bank_req_pid[NVM_bank_num + (req.addr.cid * Config.mem2.rank_max + req.addr.rid) * Dram_BANK_MAX + req.addr.bid] = Config.N; }
private Req __better_req(Req req1, Req req2) { bool is_wr1 = req1.type == ReqType.WR; bool is_wr2 = req2.type == ReqType.WR; if (is_wr1 && is_wr2) { return meta_mctrl.better_wb_req(req1, req2); } if (is_wr1 ^ is_wr2) { if (is_wr1) return req1; else return req2; } //two reads return meta_mctrl.better_req(req1, req2); }
public static void NVMBankPidDeUpdate(Req req) { bank_req_pid[(req.addr.cid * Config.mem.rank_max + req.addr.rid) * NVM_BANK_MAX + req.addr.bid] = Config.N; }
public MemCtrl get_mctrl(Req req) { if (!is_omniscient) { Dbg.Assert(mctrl.cid == req.addr.cid); return mctrl; } return mctrls[req.addr.cid]; }
public static void DramResetRowBufferChange(Req req) { core_prev_rowid[req.pid, NVM_bank_num + (req.addr.cid * Config.mem2.rank_max + req.addr.rid) * Dram_BANK_MAX + req.addr.bid] = 0; }
public void __dequeue_req(Req req) { req.ts_departure = cycles; Dbg.Assert(req.ts_departure - req.ts_arrival > 0); if (!req.migrated_request) { if (Config.proc.cache_insertion_policy == "PFA") { RowStat.UpdateMLP(RowStat.NVMDict, req); Measurement.mem_num_dec(req); // Measurement.NVMServiceTimeUpdate (req); // Measurement.NVMCoreReqNumDec (req); Row_Migration_Policies.target = true; Row_Migration_Policies.target_req = req; } else if (Config.proc.cache_insertion_policy == "RBLA") { Row_Migration_Policies.target = true; Row_Migration_Policies.target_req = req; } } if (Config.proc.cache_insertion_policy == "PFA") { Measurement.NVMCoreReqNumDec(req); } /* if (Config.proc.cache_insertion_policy == "PFA") * { * Measurement.NVMSetCorePrevRowid (req); * } */ //sched meta_mctrl.dequeue_req(req); //load stat management if (!req.cache_wb) { if (req.type == ReqType.RD) { rload--; rload_per_proc[req.pid]--; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(rload >= 0); Dbg.Assert(rload_per_proc[req.pid] >= 0); Dbg.Assert(rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } else { wload--; wload_per_proc[req.pid]--; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(wload >= 0); Dbg.Assert(wload_per_proc[req.pid] >= 0); Dbg.Assert(wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } } else { if (req.type == ReqType.RD) { rload--; } else { wload--; } } /* //dequeue proper * if (req.type == ReqType.RD) { * //traverse crossbar * Sim.xbar.enqueue(req); * * //cache * Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); * } * else { * bool removeok = mctrl_writeq.Remove(req); * Dbg.Assert(removeok); * req.latency = (int)(req.ts_departure - req.ts_arrival); * * Callback cb = req.callback; * cb(req); * * if (!req.cache_wb) { * //cache * switch (Config.proc.cache_write_policy) { * case "WriteThrough": * // do nothing * break; * case "WriteBack": * Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); * break; * } * } * else * RequestPool.enpool(req); * } */ if (req.type == ReqType.RD) { if (!Sim.caches[Sim.get_cache(req.pid)].is_cached(req)) { Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); } // if (req.callback != null) Sim.xbar.enqueue(req); // else // RequestPool.enpool(req); } else { bool removeok = mctrl_writeq.Remove(req); Dbg.Assert(removeok); req.latency = (int)(req.ts_departure - req.ts_arrival); Callback cb = req.callback; if (!req.cache_wb) { switch (Config.proc.cache_write_policy) { case "WriteThrough": break; case "WriteBack": if (!Sim.caches[Sim.get_cache(req.pid)].is_cached(req)) { Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); } break; } if (cb != null) { cb(req); } // else // RequestPool.enpool(req); } else { RequestPool.enpool(req); } } }