public void service_cache_queue() { while (cache_hit_queue.Count != 0) { if (pid == Sim.highest_rank_proc) { high_priority_total_hit_latency++; } total_hit_latency++; Req first_request = cache_hit_queue.First.Value; if ((ulong)first_request.ts_departure <= cycles) { if (!l1_cache.has_addr(first_request.block_addr, ReqType.RD)) { l1_cache.cache_add(first_request.block_addr, first_request.type, (ulong)pid); } cache_hit_queue.RemoveFirst(); RequestPool.enpool(first_request); mshr.RemoveAll(x => x.block_addr == first_request.block_addr); inst_wnd.set_ready(first_request.block_addr); } else { return; } } }
public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req) { string line = read_trace(); Char[] delim = new Char[] { ' ' }; string[] tokens = line.Split(delim); cpu_inst_cnt = int.Parse(tokens[0]); total_cpu_inst_count += (ulong)cpu_inst_cnt; ulong rd_addr = ulong.Parse(tokens[1]); rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.depool(); // RequestPool.RD_Count++; rd_req.set(pid, ReqType.RD, rd_addr); if (!Config.proc.wb || tokens.Length == 2) { wb_req = null; return; } Dbg.Assert(tokens.Length == 3); ulong wb_addr = ulong.Parse(tokens[2]); wb_addr = wb_addr | (((ulong)pid) << 56); wb_req = RequestPool.depool(); wb_req.set(pid, ReqType.WR, wb_addr); // Console.WriteLine("{0}",rd_req.paddr); }
public void recv_req(Req req) { //stats Stat.procs[pid].read_req_served.Collect(); Stat.procs[pid].read_avg_latency.Collect(req.latency); //free up instruction window and mshr inst_wnd.set_ready(req.block_addr); mshr.RemoveAll(x => x == req.block_addr); //writeback Req wb_req = req.wb_req; if (wb_req != null) { bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr); if (!wb_merge) { wb_q.Add(wb_req); } else { RequestPool.enpool(wb_req); } } //destory req RequestPool.enpool(req); out_read_req--; }
public void recv_wb_req(Req req) { //stats Stat.procs[pid].write_req_served.Collect(); Stat.procs[pid].write_avg_latency.Collect(req.latency); //destroy req RequestPool.enpool(req); }
public Req create_new_prefetch_req(int pid, ulong prefetch_block_addr) { ulong addr = prefetch_block_addr << Config.proc.block_size_bits; Req new_prefetch_req = RequestPool.depool(); new_prefetch_req.set_prefetch(pid, ReqType.RD, ReqType.RD, addr); return(new_prefetch_req); }
public override bool displace(Req req) { // get the set int temp = set_hash(req.block_addr); int temp1 = set_hash_block(req.block_addr); // search for the entry for (int n = 0; n < ways; n++) { if (data[temp][n].valid && data[temp][n].block_valid[temp1] && data[temp][n].addr == (req.block_addr >> Config.proc.page_block_diff_bits)) { // displace and write back if necessary data[temp][n].valid = false; Sim.Dram_Utilization_size = Sim.Dram_Utilization_size - (ulong)Config.proc.page_block_diff; for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++) { data[temp][n].block_valid[block_id] = false; } for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++) { if (data[temp][n].block_dirty[block_id]) { Req req_insert2 = new Req(); // req_insert2.set(data[temp][n].pid, ReqType.RD, (data[temp][n].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true); //new dram mapping req_insert2.set(data[temp][n].pid, ReqType.RD, (ulong)((n * sets + temp) << Config.proc.page_size_bits) + (ulong)(block_id << Config.proc.block_size_bits), true); //end new dram mapping req_insert2.ts_arrival = cycles; req_insert2.migrated_request = true; reqs.Enqueue(req_insert2); // write data back Req wb_req = RequestPool.depool(); //************************************************************************************************************************* //yang: // wb_req.set(way.req.pid, ReqType.WR, way.req.paddr); // wb_req.set(way.req.pid, ReqType.WR, way.req.paddr,true); // wb_req.set(data[temp][n].pid, ReqType.WR, data[temp][n].addr << Config.proc.block_size_bits, true); wb_req.set(data[temp][n].pid, ReqType.WR, (data[temp][n].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true); wb_req.cache_wb = true; wb_req.migrated_request = true; wbs.Enqueue(wb_req); } } return(true); } } return(false); }
public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req) { try { ulong rd_addr = binary_reader.ReadUInt64(); cpu_inst_cnt = binary_reader.ReadInt32(); ReqType req_type, proc_req_type; req_type = ReqType.RD; if (rd_addr >> 63 != 1) { proc_req_type = ReqType.RD; } else { proc_req_type = ReqType.WR; } rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.depool(); rd_req.set(pid, req_type, proc_req_type, rd_addr); wb_req = null; } catch (EndOfStreamException) { gzip_reader = new GZipInputStream(File.OpenRead(trace_fname)); binary_reader = new BinaryReader(gzip_reader); ulong rd_addr = binary_reader.ReadUInt64(); cpu_inst_cnt = binary_reader.ReadInt32(); ReqType req_type, proc_req_type; req_type = ReqType.RD; if (rd_addr >> 63 != 1) { proc_req_type = ReqType.RD; } else { proc_req_type = ReqType.WR; } rd_addr = rd_addr | (((ulong)pid) << 56); rd_req = RequestPool.depool(); rd_req.set(pid, req_type, proc_req_type, rd_addr); wb_req = null; } }
public void service_mem_queue() { while (mem_queue.Count != 0) { Req first_request = mem_queue.First.Value; if ((ulong)first_request.ts_departure <= cycles) { Stat.procs[pid].read_req_served.Collect(); Stat.procs[pid].read_avg_latency.Collect(first_request.latency); ulong wb_addr = Proc.NULL_ADDRESS; if (!cache.has_addr(first_request.block_addr, first_request.proc_req_type)) { wb_addr = cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid); if (!l1_cache.has_addr(first_request.block_addr, first_request.type)) { l1_cache.cache_add(first_request.block_addr, first_request.type, (ulong)pid); } l1_cache.cache_remove(wb_addr, ReqType.RD); } if (Config.proc.wb == false) { wb_addr = Proc.NULL_ADDRESS; } if (wb_addr != Proc.NULL_ADDRESS) { Req wb_req = RequestPool.depool(); wb_req.set(pid, ReqType.WR, ReqType.NULL, wb_addr); bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr); if (!wb_merge) { wb_q.Add(wb_req); } else { RequestPool.enpool(wb_req); } } // mem_queue.RemoveFirst(); RequestPool.enpool(first_request); inst_wnd.set_ready(first_request.block_addr); } else { return; } } }
public void recv_req(Req req) { //stats Stat.procs[pid].read_req_served.Collect(); Stat.procs[pid].read_avg_latency.Collect(req.latency); total_read_latency += (ulong)req.latency; Req first_request = req; ulong wb_addr = Proc.NULL_ADDRESS; //free up instruction window and mshr inst_wnd.set_ready(req.block_addr); mshr.RemoveAll(x => x == req.block_addr); if (!cache.has_addr(first_request.block_addr, ReqType.RD)) { wb_addr = cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid); if (!l1_cache.has_addr(first_request.block_addr, first_request.type)) { l1_cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid); } l1_cache.cache_remove(wb_addr, ReqType.RD); } //add to cache; returns the address of evicted block; returns null if empty block has been populated //if there is an evicted block, writeback; another memory request is generated if (Config.proc.wb == false) { wb_addr = Proc.NULL_ADDRESS; } if (wb_addr != Proc.NULL_ADDRESS) { Req wb_req = RequestPool.depool(); wb_req.set(pid, ReqType.WR, ReqType.NULL, wb_addr); bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr); if (!wb_merge) { wb_q.Add(wb_req); } else { RequestPool.enpool(wb_req); } } //destory req RequestPool.enpool(req); out_read_req--; }
public void service_cache_queue() { while (cache_hit_queue.Count != 0) { Req first_request = cache_hit_queue.First.Value; if ((ulong)first_request.ts_departure <= cycles) { if (!l1_cache.has_addr(first_request.block_addr, ReqType.RD)) { l1_cache.cache_add(first_request.block_addr, first_request.type, (ulong)pid); } cache_hit_queue.RemoveFirst(); RequestPool.enpool(first_request); inst_wnd.set_ready(first_request.block_addr); } else { return; } } }
public void issue_insts(bool issued_rd_req) { //issue instructions for (int i = 0; i < Config.proc.ipc; i++) { if (inst_wnd.is_full()) { if (i == 0) { Stat.procs[pid].stall_inst_wnd.Collect(); } return; } //cpu instructions if (curr_cpu_inst_cnt > 0) { curr_cpu_inst_cnt--; inst_wnd.add(0, false, true, 0); continue; } //only one memory instruction can be issued per cycle if (issued_rd_req) { return; } //check if true miss bool false_miss = inst_wnd.is_duplicate(curr_rd_req.block_addr); if (false_miss) { bool get_ready = inst_wnd.get_ready_status(curr_rd_req.block_addr); bool get_alone_hit = inst_wnd.get_alone_hit_status(curr_rd_req.block_addr); inst_wnd.add(curr_rd_req.block_addr, true, get_ready, get_alone_hit, curr_rd_req.pc); Dbg.Assert(curr_rd_req.wb_req == null); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } if (!Config.is_cache_filtered) { bool is_in_l1_cache = false; is_in_l1_cache = l1_cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); if (is_in_l1_cache) { if (pid == Sim.highest_rank_proc) { Sim.cache_controller.count_l1_hits(pid, true); } Sim.cache_controller.count_l1_hits(pid, false); Stat.procs[pid].l1_cache_hit_count.Collect(); inst_wnd.add(curr_rd_req.block_addr, true, true, true, curr_rd_req.pc); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } bool mshr_ok = insert_mshr(curr_rd_req); if (!mshr_ok) { mshr_retry = true; return; } Stat.procs[pid].l1_cache_miss_count.Collect(); if (pid == Sim.highest_rank_proc) { Sim.cache_controller.count_l1_misses(pid, true); } Sim.cache_controller.count_l1_misses(pid, false); } is_in_cache = false; is_alone_hit = false; if (!Config.is_cache_filtered) { int way_sampled_set = cache.has_addr_sampled_set(curr_rd_req.block_addr, curr_rd_req.proc_req_type); is_in_cache = cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); //Count high-priority and non-high-priority hits and misses in the sampled sets if (!is_in_cache) { Sim.cache_controller.count_misses(pid, way_sampled_set, false); } if (!is_in_cache && (pid == Sim.highest_rank_proc)) { Sim.cache_controller.count_misses(pid, way_sampled_set, true); } if (is_in_cache && (pid == Sim.highest_rank_proc)) { Sim.cache_controller.count_current_hits(pid, way_sampled_set, true); } if (is_in_cache) { Sim.cache_controller.count_current_hits(pid, way_sampled_set, false); } //Count high-priority and non-high-priority hits and misses in all sets if (!is_in_cache) { Sim.cache_controller.count_all_misses(pid, false); } if (!is_in_cache && (pid == Sim.highest_rank_proc)) { Sim.cache_controller.count_all_misses(pid, true); } if (is_in_cache && (pid == Sim.highest_rank_proc)) { Sim.cache_controller.count_all_hits(pid, true); } if (is_in_cache) { Sim.cache_controller.count_all_hits(pid, false); } if (Config.aux_cache) { way_sampled_set = aux_cache.has_addr_sampled_set(curr_rd_req.block_addr, curr_rd_req.proc_req_type); is_alone_hit = aux_cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); //count aux tag store hits in sampled sets if (pid == Sim.highest_rank_proc) { Sim.cache_controller.count_aux_hits_alone(pid, way_sampled_set); } Sim.cache_controller.count_aux_hits(pid, way_sampled_set); } if (Config.fst) { if (Config.poll_filter) { is_alone_hit = pollution_vector.check_filter(curr_rd_req.block_addr); } else if (Config.aux_cache) { is_alone_hit = aux_cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); } } } if (Config.fst) { if (is_alone_hit) { curr_rd_req.alone_counter_tracker = (int)Config.mem_latency; setting_counter++; interference_bit = true; if (Config.poll_filter) { interference_bit_core = pollution_vector.get_interfering_core(curr_rd_req.block_addr); } interference_bit_set_addr = curr_rd_req.block_addr; } } if (is_alone_hit) { curr_rd_req.is_alone_hit = true; } // if (Config.stride_prefetcher_on) stride_prefetcher.process_request(pid, curr_rd_req.block_addr, curr_rd_req.pc); //check if already in cache if (!Config.is_cache_filtered) { if (is_in_cache) { Stat.procs[pid].l2_cache_hit_count.Collect(); add_to_cache_queue(curr_rd_req); // RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } } if (Config.stride_prefetcher_on) { stride_prefetcher.process_request(pid, curr_rd_req.block_addr, curr_rd_req.pc); } inst_wnd.add(curr_rd_req.block_addr, true, false, curr_rd_req.is_alone_hit, curr_rd_req.pc); if (Config.model_memory) { //try memory controller bool mctrl_ok = insert_mctrl(curr_rd_req); if (!mctrl_ok) { mctrl_retry = true; return; } } else { add_to_mem_queue(curr_rd_req); } Stat.procs[pid].l2_cache_miss_count.Collect(); //issued memory request issued_rd_req = true; //get new read request curr_rd_req = get_req(); } }
public void issue_insts(bool issued_rd_req) { //issue instructions for (int i = 0; i < Config.proc.ipc; i++) { if (inst_wnd.is_full()) { if (i == 0) { Stat.procs[pid].stall_inst_wnd.Collect(); } return; } // Console.Write(" i - " + i + " Proc IPC - " + Config.proc.ipc + "\n"); //cpu instructions if (curr_cpu_inst_cnt > 0) { curr_cpu_inst_cnt--; inst_wnd.add(0, false, true, 0); continue; } //only one memory instruction can be issued per cycle if (issued_rd_req) { return; } //check if true miss bool false_miss = inst_wnd.is_duplicate(curr_rd_req.block_addr); if (false_miss) { Dbg.Assert(curr_rd_req.wb_req == null); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } if (!Config.is_cache_filtered) { bool is_in_l1_cache = false; is_in_l1_cache = l1_cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); if (is_in_l1_cache) { Stat.procs[pid].l1_cache_hit_count.Collect(); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); inst_wnd.add(curr_rd_req.block_addr, true, true, curr_rd_req.pc); continue; } Stat.procs[pid].l1_cache_miss_count.Collect(); } is_in_cache = false; is_cache_hit = false; if (!Config.is_cache_filtered) { is_in_cache = cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); } //check if already in cache if (!Config.is_cache_filtered) { if (is_in_cache) { Stat.procs[pid].l2_cache_hit_count.Collect(); add_to_cache_queue(curr_rd_req); curr_rd_req = get_req(); continue; } } inst_wnd.add(curr_rd_req.block_addr, true, false, false, curr_rd_req.pc); if (Config.model_memory) { //try mshr bool mshr_ok = insert_mshr(curr_rd_req); if (!mshr_ok) { mshr_retry = true; return; } //try memory controller bool mctrl_ok = insert_mctrl(curr_rd_req); if (!mctrl_ok) { mctrl_retry = true; return; } } else { add_to_mem_queue(curr_rd_req); } Stat.procs[pid].l2_cache_miss_count.Collect(); misses++; //issued memory request issued_rd_req = true; //get new read request curr_rd_req = get_req(); } }
public override bool insert(Req req) { if (is_cached(req)) { return(false); } // get the set int temp = set_hash(req.block_addr); int temp1 = set_hash_block(req.block_addr); for (int n = 0; n < ways; n++) { if (data[temp][n].valid && (!data[temp][n].block_valid[temp1]) && (data[temp][n].addr == (req.block_addr >> Config.proc.page_block_diff_bits))) { Req req_insert1 = new Req(); // req_insert1.set(req.pid, ReqType.WR, req.paddr, true); //new dram mapping req_insert1.set(req.pid, ReqType.WR, (ulong)((n * sets + temp) << Config.proc.page_size_bits) + (ulong)(temp1 << Config.proc.block_size_bits), true); //end new dram mapping req_insert1.ts_arrival = cycles; req_insert1.migrated_request = true; Sim.Dram_Utilization_size = Sim.Dram_Utilization_size + 1; reqs.Enqueue(req_insert1); data[temp][n].access = cycles; data[temp][n].block_valid[temp1] = true; data[temp][n].block_dirty[temp1] = false; return(true); } } // find a candidate for replacement int victim = 0; bool victim_status = false; /* for (int n = 0; n < ways; n++) { * if (data[temp][n].valid == false || data[temp][n].access < data[temp][victim].access) * victim = n; * } */ //new dram mapping for (int n = 0; n < ways; n++) { if (!data[temp][n].valid) { victim = n; victim_status = true; break; } } if (!victim_status) { for (int n = 0; n < ways; n++) { if (data[temp][n].access < data[temp][victim].access) { victim = n; } } } Dbg.Assert(victim != null); if (data[temp][victim].valid == true) { Sim.Dram_Utilization_size = Sim.Dram_Utilization_size - (ulong)Config.proc.page_block_diff; } for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++) { data[temp][victim].block_valid[block_id] = false; } // do writeback switch (Config.proc.cache_write_policy) { case "WriteThrough": throw new System.Exception("Cache: Dirty data in a write-through cache."); case "WriteBack": // write data back for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++) { if (data[temp][victim].block_dirty[block_id]) { Req req_insert2 = new Req(); // req_insert2.set(data[temp][victim].pid, ReqType.RD, (data[temp][victim].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true); //new dram mapping req_insert2.set(data[temp][victim].pid, ReqType.RD, (ulong)((victim * sets + temp) << Config.proc.page_size_bits) + (ulong)(block_id << Config.proc.block_size_bits), true); //end new dram mapping req_insert2.ts_arrival = cycles; req_insert2.migrated_request = true; reqs.Enqueue(req_insert2); Req wb_req = RequestPool.depool(); // RequestPool.DRAM_TO_PCM_Count++; //******************************************************************************************************************************** //yang: // wb_req.set(victim.req.pid, ReqType.WR, victim.req.paddr); // wb_req.set(victim.req.pid, ReqType.WR, victim.req.paddr, true); // wb_req.set(data[temp][victim].pid, ReqType.WR, data[temp][victim].addr << Config.proc.block_size_bits, true); wb_req.set(data[temp][victim].pid, ReqType.WR, (data[temp][victim].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true); wb_req.cache_wb = true; wb_req.migrated_request = true; wbs.Enqueue(wb_req); } } break; } /* victim.valid = true; * victim.addr = req.block_addr; * victim.access = cycles; * victim.dirty = false; * victim.req = req; * Stat.procs[req.pid].cache_insert.Collect();*/ //************************************************************************************* //yang: Req req_insert = new Req(); // req_insert.set(req.pid, ReqType.WR, req.paddr, true); //new dram mapping req_insert.set(req.pid, ReqType.WR, (ulong)((victim * sets + temp) << Config.proc.page_size_bits) + (ulong)(temp1 << Config.proc.block_size_bits), true); //end new dram mapping req_insert.ts_arrival = cycles; req_insert.migrated_request = true; Sim.Dram_Utilization_size = Sim.Dram_Utilization_size + 1; reqs.Enqueue(req_insert); data[temp][victim].valid = true; // data[temp][victim].addr = req_insert.block_addr >> Config.proc.page_block_diff_bits; //new dram mapping data[temp][victim].addr = req.block_addr >> Config.proc.page_block_diff_bits; //end new dram mapping data[temp][victim].access = cycles; // data[temp][victim].dirty = false; data[temp][victim].block_dirty[temp1] = false; // victim.req = req_insert; data[temp][victim].pid = req_insert.pid; data[temp][victim].block_valid[temp1] = true; //************************************************************************************** return(true); }
public void recv_req(Req req) { //stats Stat.procs[pid].read_req_served.Collect(); Stat.procs[pid].read_avg_latency.Collect(req.latency); total_read_latency += (ulong)req.latency; if (pid == Sim.highest_rank_proc) { high_priority_total_misses += 1; } inflight_mem_requests--; Req first_request = req; ulong wb_addr = Proc.NULL_ADDRESS; //free up instruction window and mshr inst_wnd.set_ready(req.block_addr); mshr.RemoveAll(x => x.block_addr == req.block_addr); if (Config.fst) { if ((req.block_addr == interference_bit_set_addr) && (interference_bit == true)) { interference_bit = false; interference_bit_core = Config.N; } } if (!cache.has_addr(first_request.block_addr, ReqType.RD)) { wb_addr = cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid); if (!l1_cache.has_addr(first_request.block_addr, ReqType.RD) && !first_request.is_prefetch) { l1_cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid); } l1_cache.cache_remove(wb_addr, ReqType.RD); } if (Config.aux_cache) { if (!aux_cache.has_addr(first_request.block_addr, ReqType.RD)) { aux_cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid); } } if (Config.proc.wb == false) { wb_addr = Proc.NULL_ADDRESS; } if (wb_addr != Proc.NULL_ADDRESS) { Req wb_req = RequestPool.depool(); wb_req.set(pid, ReqType.WR, ReqType.NULL, wb_addr); bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr); if (!wb_merge) { wb_q.Add(wb_req); } else { RequestPool.enpool(wb_req); } } //destory req RequestPool.enpool(req); out_read_req--; }
public void issue_insts(bool issued_rd_req) { //issue instructions for (int i = 0; i < Config.proc.ipc; i++) { if (inst_wnd.is_full()) { if (i == 0) { Stat.procs[pid].stall_inst_wnd.Collect(); // Measurement.core_stall_cycles[pid] += 1; } return; } //cpu instructions if (curr_cpu_inst_cnt > 0) { curr_cpu_inst_cnt--; inst_wnd.add(0, false, true); continue; } //only one memory instruction can be issued per cycle if (issued_rd_req) { return; } //memory instruction (only AFTER checking for one memory instruction per cycle) inst_wnd.add(curr_rd_req.block_addr, true, false); //check if true miss bool false_miss = inst_wnd.is_duplicate(curr_rd_req.block_addr); if (false_miss) { Dbg.Assert(curr_rd_req.wb_req == null); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } //try mshr bool mshr_ok = insert_mshr(curr_rd_req); if (!mshr_ok) { mshr_retry = true; return; } //try memory controller bool mctrl_ok = insert_mctrl(curr_rd_req); if (!mctrl_ok) { mctrl_retry = true; return; } //issued memory request issued_rd_req = true; //get new read request curr_rd_req = get_req(); } }
public void process_request(int pid, ulong block_addr, ulong input_pc) { if (!pc_present(input_pc)) { StrideEntry new_stride_entry = new StrideEntry(); new_stride_entry.block_addr = block_addr; new_stride_entry.trained = false; new_stride_entry.stride = 0; new_stride_entry.train_hits = 0; insert_entry(input_pc, new_stride_entry); return; } int hit_position = entry_position(input_pc); int current_stride = (int)(block_addr - stride_entries[hit_position].block_addr); if (current_stride != stride_entries[hit_position].stride) { stride_entries[hit_position].stride = current_stride; stride_entries[hit_position].trained = false; stride_entries[hit_position].train_hits = 0; } stride_entries[hit_position].block_addr = block_addr; if (!stride_entries[hit_position].trained) { stride_entries[hit_position].train_hits++; stride_entries[hit_position].prefetch_block_addr = block_addr; } if (stride_entries[hit_position].train_hits >= num_trains) { stride_entries[hit_position].trained = true; } if (stride_entries[hit_position].stride == 0) { return; } if (stride_entries[hit_position].trained == true) { ulong max_block_address = stride_entries[hit_position].block_addr + (ulong)((distance + 1) * stride_entries[hit_position].stride); int max_prefetches = (int)(((int)max_block_address - (int)stride_entries[hit_position].prefetch_block_addr) / stride_entries[hit_position].stride); int num_prefetches = (max_prefetches > degree) ? degree : max_prefetches; for (int i = 0; i < num_prefetches; i++) { stride_entries[hit_position].prefetch_block_addr += (ulong)stride_entries[hit_position].stride; Req new_prefetch = create_new_prefetch_req(pid, stride_entries[hit_position].prefetch_block_addr); bool mctrl_ok = insert_mctrl(new_prefetch); if (!mctrl_ok) { RequestPool.enpool(new_prefetch); } else { Stat.procs[new_prefetch.pid].num_prefetches.Collect(); }; } } return; }
public void __dequeue_req(Req req) { req.ts_departure = cycles; Dbg.Assert(req.ts_departure - req.ts_arrival > 0); if (!req.migrated_request) { if (Config.proc.cache_insertion_policy == "PFA") { RowStat.UpdateMLP(RowStat.NVMDict, req); Measurement.mem_num_dec(req); // Measurement.NVMServiceTimeUpdate (req); // Measurement.NVMCoreReqNumDec (req); Row_Migration_Policies.target = true; Row_Migration_Policies.target_req = req; } else if (Config.proc.cache_insertion_policy == "RBLA") { Row_Migration_Policies.target = true; Row_Migration_Policies.target_req = req; } } if (Config.proc.cache_insertion_policy == "PFA") { Measurement.NVMCoreReqNumDec(req); } /* if (Config.proc.cache_insertion_policy == "PFA") * { * Measurement.NVMSetCorePrevRowid (req); * } */ //sched meta_mctrl.dequeue_req(req); //load stat management if (!req.cache_wb) { if (req.type == ReqType.RD) { rload--; rload_per_proc[req.pid]--; rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(rload >= 0); Dbg.Assert(rload_per_proc[req.pid] >= 0); Dbg.Assert(rload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } else { wload--; wload_per_proc[req.pid]--; wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid]--; Dbg.Assert(wload >= 0); Dbg.Assert(wload_per_proc[req.pid] >= 0); Dbg.Assert(wload_per_procrankbank[req.pid, req.addr.rid, req.addr.bid] >= 0); } } else { if (req.type == ReqType.RD) { rload--; } else { wload--; } } /* //dequeue proper * if (req.type == ReqType.RD) { * //traverse crossbar * Sim.xbar.enqueue(req); * * //cache * Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); * } * else { * bool removeok = mctrl_writeq.Remove(req); * Dbg.Assert(removeok); * req.latency = (int)(req.ts_departure - req.ts_arrival); * * Callback cb = req.callback; * cb(req); * * if (!req.cache_wb) { * //cache * switch (Config.proc.cache_write_policy) { * case "WriteThrough": * // do nothing * break; * case "WriteBack": * Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); * break; * } * } * else * RequestPool.enpool(req); * } */ if (req.type == ReqType.RD) { if (!Sim.caches[Sim.get_cache(req.pid)].is_cached(req)) { Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); } // if (req.callback != null) Sim.xbar.enqueue(req); // else // RequestPool.enpool(req); } else { bool removeok = mctrl_writeq.Remove(req); Dbg.Assert(removeok); req.latency = (int)(req.ts_departure - req.ts_arrival); Callback cb = req.callback; if (!req.cache_wb) { switch (Config.proc.cache_write_policy) { case "WriteThrough": break; case "WriteBack": if (!Sim.caches[Sim.get_cache(req.pid)].is_cached(req)) { Sim.caches[Sim.get_cache(req.pid)].meta_insert(req); } break; } if (cb != null) { cb(req); } // else // RequestPool.enpool(req); } else { RequestPool.enpool(req); } } }