public override bool insert(Req req) { if (is_cached(req)) { return(false); } // get the set int temp = set_hash(req.block_addr); int temp1 = set_hash_block(req.block_addr); for (int n = 0; n < ways; n++) { if (data[temp][n].valid && (!data[temp][n].block_valid[temp1]) && (data[temp][n].addr == (req.block_addr >> Config.proc.page_block_diff_bits))) { Req req_insert1 = new Req(); // req_insert1.set(req.pid, ReqType.WR, req.paddr, true); //new dram mapping req_insert1.set(req.pid, ReqType.WR, (ulong)((n * sets + temp) << Config.proc.page_size_bits) + (ulong)(temp1 << Config.proc.block_size_bits), true); //end new dram mapping req_insert1.ts_arrival = cycles; req_insert1.migrated_request = true; Sim.Dram_Utilization_size = Sim.Dram_Utilization_size + 1; reqs.Enqueue(req_insert1); data[temp][n].access = cycles; data[temp][n].block_valid[temp1] = true; data[temp][n].block_dirty[temp1] = false; return(true); } } // find a candidate for replacement int victim = 0; bool victim_status = false; /* for (int n = 0; n < ways; n++) { * if (data[temp][n].valid == false || data[temp][n].access < data[temp][victim].access) * victim = n; * } */ //new dram mapping for (int n = 0; n < ways; n++) { if (!data[temp][n].valid) { victim = n; victim_status = true; break; } } if (!victim_status) { for (int n = 0; n < ways; n++) { if (data[temp][n].access < data[temp][victim].access) { victim = n; } } } Dbg.Assert(victim != null); if (data[temp][victim].valid == true) { Sim.Dram_Utilization_size = Sim.Dram_Utilization_size - (ulong)Config.proc.page_block_diff; } for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++) { data[temp][victim].block_valid[block_id] = false; } // do writeback switch (Config.proc.cache_write_policy) { case "WriteThrough": throw new System.Exception("Cache: Dirty data in a write-through cache."); case "WriteBack": // write data back for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++) { if (data[temp][victim].block_dirty[block_id]) { Req req_insert2 = new Req(); // req_insert2.set(data[temp][victim].pid, ReqType.RD, (data[temp][victim].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true); //new dram mapping req_insert2.set(data[temp][victim].pid, ReqType.RD, (ulong)((victim * sets + temp) << Config.proc.page_size_bits) + (ulong)(block_id << Config.proc.block_size_bits), true); //end new dram mapping req_insert2.ts_arrival = cycles; req_insert2.migrated_request = true; reqs.Enqueue(req_insert2); Req wb_req = RequestPool.depool(); // RequestPool.DRAM_TO_PCM_Count++; //******************************************************************************************************************************** //yang: // wb_req.set(victim.req.pid, ReqType.WR, victim.req.paddr); // wb_req.set(victim.req.pid, ReqType.WR, victim.req.paddr, true); // wb_req.set(data[temp][victim].pid, ReqType.WR, data[temp][victim].addr << Config.proc.block_size_bits, true); wb_req.set(data[temp][victim].pid, ReqType.WR, (data[temp][victim].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true); wb_req.cache_wb = true; wb_req.migrated_request = true; wbs.Enqueue(wb_req); } } break; } /* victim.valid = true; * victim.addr = req.block_addr; * victim.access = cycles; * victim.dirty = false; * victim.req = req; * Stat.procs[req.pid].cache_insert.Collect();*/ //************************************************************************************* //yang: Req req_insert = new Req(); // req_insert.set(req.pid, ReqType.WR, req.paddr, true); //new dram mapping req_insert.set(req.pid, ReqType.WR, (ulong)((victim * sets + temp) << Config.proc.page_size_bits) + (ulong)(temp1 << Config.proc.block_size_bits), true); //end new dram mapping req_insert.ts_arrival = cycles; req_insert.migrated_request = true; Sim.Dram_Utilization_size = Sim.Dram_Utilization_size + 1; reqs.Enqueue(req_insert); data[temp][victim].valid = true; // data[temp][victim].addr = req_insert.block_addr >> Config.proc.page_block_diff_bits; //new dram mapping data[temp][victim].addr = req.block_addr >> Config.proc.page_block_diff_bits; //end new dram mapping data[temp][victim].access = cycles; // data[temp][victim].dirty = false; data[temp][victim].block_dirty[temp1] = false; // victim.req = req_insert; data[temp][victim].pid = req_insert.pid; data[temp][victim].block_valid[temp1] = true; //************************************************************************************** return(true); }
public void tick() { /*** Preamble ***/ cycles++; Stat.procs[pid].cycle.Collect(); inst_cnt = Stat.procs[pid].ipc.Count; if (cycles % 1000000 == 0) { Console.Write(" Processor " + pid + " Cycles " + cycles + " Instructions " + inst_cnt + "\n"); } // if ((inst_cnt/(ulong)Config.periodicDumpWindow) > (ulong)prev_dump) if (cycles % (ulong)Config.periodicDumpWindow == 0) { prev_dump++; Sim.periodic_writer_ipc.WriteLine(" Proc " + pid + " Cycles " + cycles + " Instructions " + inst_cnt + " " + (double)(inst_cnt - prev_inst_cnt) / (double)(cycles - prev_cycle)); // Sim.periodic_writer_ipc.WriteLine(" Proc " + pid + " Cycles " + cycles + " Instructions " + ((ulong)prev_dump * (ulong)Config.periodicDumpWindow) + " " + (double)(inst_cnt - prev_inst_cnt) / (double)(cycles - prev_cycle)); prev_inst_cnt = inst_cnt; prev_cycle = cycles; Sim.periodic_writer_ipc.Flush(); } if (!Config.model_memory) { service_mem_queue(); } service_cache_queue(); if (inst_cnt != 0 && inst_cnt % 1000000 == 0) { ulong quantum = inst_cnt / 1000000; if (quantum > curr_quantum) { curr_quantum = quantum; ulong read_req = Stat.procs[pid].read_req.Count; Stat.procs[pid].read_quantum.EndQuantum(read_req - prev_read_req); prev_read_req = read_req; ulong write_req = Stat.procs[pid].write_req.Count; Stat.procs[pid].write_quantum.EndQuantum(write_req - prev_write_req); prev_write_req = write_req; } } /*** Retire ***/ int retired = inst_wnd.retire(Config.proc.ipc); Stat.procs[pid].ipc.Collect(retired); if (Config.pc_trace && inst_wnd.is_full()) { just_set_full = true; full_address = (inst_wnd.pc_oldest() >> 32) & (ulong)1023; criticality_running_table[full_address]++; } else { if (just_set_full) { set_full = false; } if (set_full == false) { set_full = true; just_set_full = false; if (Config.sched.max_stall_crit) { if (criticality_running_table[full_address] > criticality_table[full_address]) { criticality_table[full_address] = criticality_running_table[full_address]; criticality_running_table[full_address] = 0; } } else { criticality_table[full_address] += criticality_running_table[full_address]; criticality_running_table[full_address] = 0; } } } if (is_req_outstanding()) { Stat.procs[pid].memory_cycle.Collect(); } /*** Issue writeback request ***/ if (Config.proc.wb && wb_q.Count > 0) { bool wb_ok = issue_wb_req(wb_q[0]); if (wb_ok) { wb_q.RemoveAt(0); } //writeback stall bool stalled_wb = wb_q.Count > Config.proc.wb_q_max; if (stalled_wb) { return; } } /*** Reissue previous read request ***/ bool issued_rd_req = false; if (mshr_retry || mctrl_retry) { Dbg.Assert(curr_rd_req != null && curr_cpu_inst_cnt == 0); //mshr/mctrl stall bool reissue_ok = reissue_rd_req(); if (!reissue_ok) { return; } //reissue success Dbg.Assert(!mshr_retry && !mctrl_retry); issued_rd_req = true; curr_rd_req = get_req(); } /*** Issue instructions ***/ Dbg.Assert(curr_rd_req != null); issue_insts(issued_rd_req); }
public override void tick() { cycles++; //memory controllers for (int n = 0; n < Config.mem2.mctrl_num; n++) { for (int i = 0; i < Config.mem2.channel_max; i++) { mctrls2[n][i].tick(); } } while (reqs.Count > 0 && cycles - reqs.Peek().ts_arrival >= latency) { //********************************************************************************************************************************************* //yang Req req_temp = reqs.Peek(); Dbg.Assert(req_temp != null); if (mctrls2[Sim.get_mctrl(req_temp.pid)][req_temp.addr.cid].is_q_full(req_temp.pid, req_temp.type, req_temp.addr.rid, req_temp.addr.bid)) { break; } //********************************************************************************************************************************************* Req req = reqs.Dequeue(); Dbg.Assert(req != null); // req.ts_departure = cycles; // Dbg.Assert(req.ts_departure - req.ts_arrival > 0); if (req.type == ReqType.RD) { ////traverse crossbar //Sim.xbar.enqueue(req); //insert into mctrl insert_mctrl(req); } else { switch (Config.proc.cache_write_policy) { case "WriteThrough": // throw new System.Exception("Cache: Trying to service a write in a write-through cache."); break; case "WriteBack": //set_dirty(req); //Callback cb = req.callback; //Dbg.Assert(cb != null); //cb(req); insert_mctrl(req); break; } } } while (wbs.Count > 0) { Req wb_req = wbs.Peek(); Dbg.Assert(wb_req != null); MemAddr addr = wb_req.addr; /* if (mctrls2[Sim.get_mctrl(wb_req.pid)][addr.cid].is_q_full(wb_req.pid, wb_req.type, addr.rid, addr.bid)) * break; * * wbs.Dequeue(); * mctrls2[Sim.get_mctrl(wb_req.pid)][addr.cid].enqueue_req(wb_req);*/ //yang: if (Sim.mctrls[Sim.get_mctrl(wb_req.pid)][addr.cid].is_q_full(wb_req.pid, wb_req.type, addr.rid, addr.bid)) { break; } wbs.Dequeue(); Sim.mctrls[Sim.get_mctrl(wb_req.pid)][addr.cid].enqueue_req(wb_req); Stat.procs[wb_req.pid].cache_wb_req.Collect(); } if (Config.proc.cache_insertion_policy == "All") { Migration.tick(); } }
public void issue_insts(bool issued_rd_req) { //issue instructions for (int i = 0; i < Config.proc.ipc; i++) { if (inst_wnd.is_full()) { if (i == 0) { Stat.procs[pid].stall_inst_wnd.Collect(); } return; } // Console.Write(" i - " + i + " Proc IPC - " + Config.proc.ipc + "\n"); //cpu instructions if (curr_cpu_inst_cnt > 0) { curr_cpu_inst_cnt--; inst_wnd.add(0, false, true, 0); continue; } //only one memory instruction can be issued per cycle if (issued_rd_req) { return; } //check if true miss bool false_miss = inst_wnd.is_duplicate(curr_rd_req.block_addr); if (false_miss) { Dbg.Assert(curr_rd_req.wb_req == null); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); continue; } if (!Config.is_cache_filtered) { bool is_in_l1_cache = false; is_in_l1_cache = l1_cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); if (is_in_l1_cache) { Stat.procs[pid].l1_cache_hit_count.Collect(); RequestPool.enpool(curr_rd_req); curr_rd_req = get_req(); inst_wnd.add(curr_rd_req.block_addr, true, true, curr_rd_req.pc); continue; } Stat.procs[pid].l1_cache_miss_count.Collect(); } is_in_cache = false; is_cache_hit = false; if (!Config.is_cache_filtered) { is_in_cache = cache.has_addr(curr_rd_req.block_addr, curr_rd_req.proc_req_type); } //check if already in cache if (!Config.is_cache_filtered) { if (is_in_cache) { Stat.procs[pid].l2_cache_hit_count.Collect(); add_to_cache_queue(curr_rd_req); curr_rd_req = get_req(); continue; } } inst_wnd.add(curr_rd_req.block_addr, true, false, false, curr_rd_req.pc); if (Config.model_memory) { //try mshr bool mshr_ok = insert_mshr(curr_rd_req); if (!mshr_ok) { mshr_retry = true; return; } //try memory controller bool mctrl_ok = insert_mctrl(curr_rd_req); if (!mctrl_ok) { mctrl_retry = true; return; } } else { add_to_mem_queue(curr_rd_req); } Stat.procs[pid].l2_cache_miss_count.Collect(); misses++; //issued memory request issued_rd_req = true; //get new read request curr_rd_req = get_req(); } }