/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; //search in global/local banks int bank_index; if (Config.memory.is_shared_MC) bank_index = mem.mem_id * Config.memory.bank_max_per_mem; else bank_index = 0; //search for next request for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) continue; MemoryRequest cur_req = buf[b, 0]; int cur_proc = cur_req.request.requesterID; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_RANK_FR_FCFS(cur_req, buf[b, j], service_rank[cur_proc], service_rank[buf[b, j].request.requesterID])) cur_req = buf[b, j]; } if (next_req == null || !helper.is_RANK_FR_FCFS(next_req, cur_req, service_rank[next_req.request.requesterID], service_rank[cur_req.request.requesterID])) { next_req = cur_req; } } return next_req; }
/** * Constructor */ public Bank(MemSched sched, MemCtlr MC) { //bank id bank_id = bank_max; bank_max++; Console.WriteLine("bank" + '\t' + bank_id.ToString() + '\t' + MC.mem_id.ToString()); //set scheduler this.sched = sched; //allocate stat stat = new BankStat(this); outstandingReqs_perapp = new ulong[Config.N]; outstandingReqs = 0; //initialize bank state state = RowState.Closed; cur_row = ulong.MaxValue; is_cur_marked = false; wait_left = 0; this.MC = MC; lastOpen.Clear(); }
public Node(NodeMapping mapping, Coord c) { m_coord = c; m_mapping = mapping; if (mapping.hasCPU(c.ID)) { m_cpu = new CPU(this); } if (mapping.hasMem(c.ID)) { Console.WriteLine("Proc/Node.cs : MC locations:{0}", c.ID); m_mem = new MemCtlr(this); } m_inj_pool = Simulator.controller.newPrioPktPool(m_coord.ID); Simulator.controller.setInjPool(m_coord.ID, m_inj_pool); m_injQueue_flit = new Queue <Flit>(); m_injQueue_evict = new Queue <Flit>(); m_local = new Queue <Packet>(); m_rxbuf_naive = new RxBufNaive(this, delegate(Flit f) { m_injQueue_evict.Enqueue(f); }, delegate(Packet p) { receivePacket(p); }); }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) continue; if (next_req == null) next_req = buf[b, 0]; for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_FCFS(next_req, buf[b, j])) next_req = buf[b, j]; } } return next_req; }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_FR_FCFS(nextBankRequest, buf[b, j])) { nextBankRequest = buf[b, j]; } } // Compare between highest priority between different banks if (next_req == null || !helper.is_FCFS(next_req, nextBankRequest)) { next_req = nextBankRequest; } } } return next_req; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank if (nextRequest == null) nextRequest = buf[b, 0]; for (int j = 0; j < buf_load[b]; j++) { if (ScheduleWB && !helper.is_FCFS(nextRequest, buf[b, j]) || (!ScheduleWB && !helper.is_NONWB_FCFS(nextRequest, buf[b, j]))) { nextRequest = buf[b, j]; } } } } return nextRequest; }
public Node(NodeMapping mapping, Coord c) { m_coord = c; m_mapping = mapping; if (mapping.hasCPU(c.ID)) { m_cpu = new CPU(this); } if (mapping.hasMem(c.ID)) { Console.WriteLine("Proc/Node.cs : MC locations:{0}", c.ID); m_mem = new MemCtlr(this); } m_inj_pool = Simulator.controller.newPrioPktPool(m_coord.ID); Simulator.controller.setInjPool(m_coord.ID, m_inj_pool); m_injQueue_flit = new Queue<Flit>(); m_injQueue_evict = new Queue<Flit>(); m_local = new Queue<Packet>(); m_rxbuf_naive = new RxBufNaive(this, delegate(Flit f) { m_injQueue_evict.Enqueue(f); }, delegate(Packet p) { receivePacket(p); }); }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { bool cap = ((MemCtlr.cycle - bankTimers[b]) >= Config.memory.prio_inv_thresh); if (Config.memory.prio_inv_thresh == 0) { if (cap == false) { Environment.Exit(1); } } if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if (cap) { if (ScheduleWB && !helper.is_FCFS(nextBankRequest, buf[b, j]) || (!ScheduleWB && !helper.is_NONWB_FCFS(nextBankRequest, buf[b, j]))) { nextBankRequest = buf[b, j]; } } else { if (ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[b, j]) || (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[b, j]))) { nextBankRequest = buf[b, j]; } } } // Compare between highest priority between different banks if (nextRequest == null || !helper.is_FCFS(nextRequest, nextBankRequest)) { nextRequest = nextBankRequest; } } } if (nextRequest != null) { // Update the bank timers if the row has changed! if (bank[nextRequest.glob_b_index].get_cur_row() != nextRequest.r_index) bankTimers[nextRequest.glob_b_index] = MemCtlr.cycle; } return nextRequest; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; //search in global/local banks int bank_index; int next_req_bank = -1; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } //Select a memory request according to priorities: //Order: Ready > Timeout > priority > rowHit > rank > totAU > BankRank > time for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { //Rule 1: Ready if (!bank[b].is_ready() || buf_load[b] == 0) { continue; } for (int j = 0; j < buf_load[b]; j++) { //Mark, priority, row hit, rank, totAU, bank rank, bank rank per thread, FCFS if (next_req == null) { next_req = buf[b, j]; next_req_bank = b; continue; } if (!helper.is_MARK_PRIO_FR_RANK_TOTAU_BR_BRT_FCFS(next_req, buf[b, j], rank[next_req.memoryRequesterID], rank[buf[b, j].memoryRequesterID], priority[getGroup(next_req.memoryRequesterID)], priority[getGroup(buf[b, j].memoryRequesterID)], bankRank[next_req.memoryRequesterID], bankRank[buf[b, j].memoryRequesterID], bankRankPerThread[next_req.glob_b_index, next_req.memoryRequesterID], bankRankPerThread[b, buf[b, j].memoryRequesterID], totAU[next_req.memoryRequesterID], totAU[buf[b, j].memoryRequesterID])) { next_req = buf[b, j]; next_req_bank = b; } } } //Note: All requests (in this simulator) are either load, store, or writeback if (next_req != null) { bankRank[next_req.memoryRequesterID]--; bankRankPerThread[next_req_bank, next_req.memoryRequesterID]--; utility[next_req.memoryRequesterID]++; utilityMinFrame[next_req.memoryRequesterID]++; } return(next_req); }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; //search in global/local banks int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } if (mem.mem_id == 0 && MemCtlr.cycle % 10000 == 0) { Console.Write("Ranking: "); for (int p = 0; p < Config.N; p++) { Console.Write(service_rank[p].ToString() + '\t'); } Console.WriteLine(""); } //search for next request for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) { continue; } MemoryRequest cur_req = buf[b, 0]; int cur_proc = cur_req.request.requesterID; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_MARK_RANK_FR_FCFS(cur_req, buf[b, j], service_rank[cur_proc], service_rank[buf[b, j].request.requesterID])) { cur_req = buf[b, j]; } } if (next_req == null || !helper.is_MARK_RANK_FR_FCFS(next_req, cur_req, service_rank[next_req.request.requesterID], service_rank[cur_req.request.requesterID])) { next_req = cur_req; } } return(next_req); }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; //search in global/local banks int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } //search for next request for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) { continue; } MemoryRequest cur_req = buf[b, 0]; int cur_proc = cur_req.request.requesterID; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_RANK_FCFS(cur_req, buf[b, j], service_rank[cur_proc], service_rank[buf[b, j].request.requesterID])) { cur_req = buf[b, j]; } } if (next_req == null || !helper.is_RANK_FCFS(next_req, cur_req, service_rank[next_req.request.requesterID], service_rank[cur_req.request.requesterID])) { next_req = cur_req; } } if (next_req != null) { last_service_time[next_req.request.requesterID] = Simulator.CurrentRound; } return(next_req); }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if (ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[b, j]) || (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[b, j]))) { nextBankRequest = buf[b, j]; } } // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !helper.is_FCFS(nextRequest, nextBankRequest)) || (!ScheduleWB && !helper.is_NONWB_FCFS(nextRequest, nextBankRequest))) { nextRequest = nextBankRequest; } } } return(nextRequest); }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank if (nextRequest == null) { nextRequest = buf[b, 0]; } for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_MARK_PRIO_FR_RANK_FCFS(nextRequest, buf[b, j], threadPriority[nextRequest.request.requesterID], threadPriority[buf[b, j].request.requesterID], Rank[nextRequest.request.requesterID], Rank[buf[b, j].request.requesterID])) { nextRequest = buf[b, j]; } } } } return(nextRequest); }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_FR_FCFS(nextBankRequest, buf[b, j])) { nextBankRequest = buf[b, j]; } } // Compare between highest priority between different banks if (next_req == null || !helper.is_FCFS(next_req, nextBankRequest)) { next_req = nextBankRequest; } } } return(next_req); }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) { continue; } if (next_req == null) { next_req = buf[b, 0]; } for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_FCFS(next_req, buf[b, j])) { next_req = buf[b, j]; } } } return(next_req); }
/** * This method returns the next request to be scheduled (sent over the bus). * Each memory request scheduler needs to implement its own function */ abstract public MemoryRequest get_next_req(MemCtlr mem);
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bindex_low; if (Config.memory.is_shared_MC) bindex_low = mem.mem_id * Config.memory.bank_max_per_mem; else bindex_low = 0; for (int b = bindex_low; b < bindex_low + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) continue; if (next_req == null) next_req = buf[b, 0]; for (int j = 0; j < buf_load[b]; j++) { MemoryRequest req = buf[b, j]; switch (Config.memory.batch_sched_algo) { case BatchSchedAlgo.MARKED_FR_RANK_FCFS: if (!helper.is_MARK_FR_RANK_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) next_req = req; break; case BatchSchedAlgo.MARKED_RANK_FR_FCFS: if (!helper.is_MARK_RANK_FR_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) next_req = req; break; case BatchSchedAlgo.MARKED_RANK_FCFS: if (!helper.is_MARK_RANK_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) next_req = req; break; case BatchSchedAlgo.MARKED_FR_FCFS: if (!helper.is_MARK_FR_FCFS(next_req, req)) next_req = req; break; case BatchSchedAlgo.MARKED_FCFS: if (!helper.is_MARK_FCFS(next_req, req)) next_req = req; break; default: Debug.Assert(false); break; }//switch }//for over buffer }//for over banks //Debug.Assert(next_req == null); if (next_req == null) return null; if (is_batch == false) return next_req; if (next_req.isMarked) return next_req; //which round are we in (within a batch)? int cur_proc = rank_to_proc[cur_rank]; int cur_ct = bhelper.completion_time[cur_proc]; if ((MemCtlr.cycle - batch_begin_time) >= (ulong)cur_ct) cur_rank--; //test for completion time int test_ct = bhelper.completion_time[next_req.request.requesterID]; if (test_ct > cur_ct && cur_rank == Config.N - 1) return null; return next_req; /* //which round are we in (within a batch)? int cur_proc = rank_to_proc[cur_rank]; int cur_ct = bhelper.completion_time[cur_proc]; if ((Mem.cycle - batch_begin_time) >= (ulong) cur_ct) cur_rank--; //test for completion time int test_ct = bhelper.completion_time[next_req.threadID]; if (test_ct > cur_ct) return null; return next_req; */ }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; MemoryRequest fr_req = null; //search in global/local banks int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } //search for next request for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) { continue; } MemoryRequest buf_req = buf[b, 0]; int buf_proc = buf_req.request.requesterID; for (int j = 1; j < buf_load[b]; j++) { MemoryRequest cur_req = buf[b, j]; if (!helper.is_MARK_RANK_FR_FCFS(buf_req, cur_req, service_rank[buf_proc], service_rank[cur_req.request.requesterID])) { buf_req = cur_req; } if (cur_req.r_index != bank[b].get_cur_row()) { continue; } //row-hit (fr) if (fr_req != null && service_rank[fr_req.request.requesterID] < service_rank[cur_req.request.requesterID]) { fr_req = cur_req; } } if (next_req == null || !helper.is_MARK_RANK_FR_FCFS(next_req, buf_req, service_rank[next_req.request.requesterID], service_rank[buf_req.request.requesterID])) { next_req = buf_req; } } //row-hit; return it if (fr_req == null || next_req.r_index == bank[next_req.glob_b_index].get_cur_row()) { return(next_req); } //no row-hit; see if we can do better. if (service_rank[fr_req.request.requesterID] >= Config.N / 2) { return(fr_req); } else { return(next_req); } }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; //search in global/local banks int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } if(mem.mem_id == 0 && MemCtlr.cycle % 10000 == 0){ Console.Write("Ranking: "); for (int p = 0; p < Config.N; p++) Console.Write(service_rank[p].ToString() + '\t'); Console.WriteLine(""); } //search for next request for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) continue; MemoryRequest cur_req = buf[b, 0]; int cur_proc = cur_req.request.requesterID; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_MARK_RANK_FR_FCFS(cur_req, buf[b, j], service_rank[cur_proc], service_rank[buf[b, j].request.requesterID])) cur_req = buf[b, j]; } if (next_req == null || !helper.is_MARK_RANK_FR_FCFS(next_req, cur_req, service_rank[next_req.request.requesterID], service_rank[cur_req.request.requesterID])) { next_req = cur_req; } } return next_req; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank if (nextRequest == null) nextRequest = buf[b, 0]; for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_BATCH(nextRequest, buf[b, j])) { nextRequest = buf[b, j]; } } } } //if( nextRequest != null) //Console.WriteLine(currentMaxPerProc[nextRequest.threadID]); return nextRequest; }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank if (nextRequest == null) nextRequest = buf[b, 0]; if (Config.memory.batch_sched_algo == BatchSchedAlgo.MARKED_FR_RANK_FCFS) for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_MARK_FR_RANK_FCFS(nextRequest, buf[b, j], perBankRank[nextRequest.request.requesterID, b], perBankRank[buf[b, j].request.requesterID, b])) { nextRequest = buf[b, j]; } /* global rank across all banks if (!higher_MARK_FR_RANK_FCFS_Priority(nextRequest, buffer[i, j], Rank[nextRequest.threadID], Rank[buffer[i, j].threadID])) { nextRequest = buffer[i, j]; } */ } else if (Config.memory.batch_sched_algo == BatchSchedAlgo.MARKED_RANK_FR_FCFS) for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_MARK_RANK_FR_FCFS(nextRequest, buf[b, j], perBankRank[nextRequest.request.requesterID, b], perBankRank[buf[b, j].request.requesterID, b])) { nextRequest = buf[b, j]; } /* if (!higher_MARK_RANK_FR_FCFS_Priority(nextRequest, buffer[i, j], Rank[nextRequest.threadID], Rank[buffer[i, j].threadID])) { nextRequest = buffer[i, j]; } */ } else if (Config.memory.batch_sched_algo == BatchSchedAlgo.MARKED_FR_FCFS) for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_MARK_FR_FCFS(nextRequest, buf[b, j])) { nextRequest = buf[b, j]; } } else if (Config.memory.batch_sched_algo == BatchSchedAlgo.MARKED_FCFS) for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_MARK_FCFS(nextRequest, buf[b, j])) { nextRequest = buf[b, j]; } } else throw new Exception("Unknown WithinBatchPriority"); } } return nextRequest; }
public override MemoryRequest get_next_req(MemCtlr mem) { // In the Nesbit scheme, this function has to do three things: // 1) Get the next request to be scheduled // 2) If there is a next request, update the data structures // 3) Return the next request bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); MemoryRequest nextRequest = null; MemoryRequest statsNextRequest = null; // for statistics, also keep the VFT highest request ulong nextVFT = 0; ulong statsNextVFT = 0; // for statistics // Simulator.writer.WriteLine(Memory.memoryTime + " " + vtmsBankFinishTime[0, 0] + " " + vtmsBankFinishTime[1, 0]); // 1. Get the highest priority request, if there is one. // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // check whether the timer for this bank has already exceeded bool cap = ((MemCtlr.cycle - bankTimers[b]) >= Config.memory.prio_inv_thresh); // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; MemoryRequest statsNextBankRequest = buf[b, 0]; // for statistics ulong nextBankVFT = computeVirtualFinishTime(nextBankRequest); ulong statsNextBankVFT = nextBankVFT; // for statistics for (int j = 1; j < buf_load[b]; j++) { ulong curBankVFT = computeVirtualFinishTime(buf[b, j]); // for stats if (ScheduleWB && !higherFVDPriority(statsNextBankRequest, buf[b, j], statsNextBankVFT, curBankVFT) || (!ScheduleWB && !higherFVDPriorityWB(statsNextBankRequest, buf[b, j], statsNextBankVFT, curBankVFT))) { statsNextBankRequest = buf[b, j]; statsNextBankVFT = curBankVFT; } // for real! if (cap) { if (ScheduleWB && !higherFVDPriority(nextBankRequest, buf[b, j], nextBankVFT, curBankVFT) || (!ScheduleWB && !higherFVDPriorityWB(nextBankRequest, buf[b, j], nextBankVFT, curBankVFT))) { nextBankRequest = buf[b, j]; nextBankVFT = curBankVFT; } } else { if (ScheduleWB && !higherNesbitPriority(nextBankRequest, buf[b, j], nextBankVFT, curBankVFT) || (!ScheduleWB && !higherNesbitPriorityWB(nextBankRequest, buf[b, j], nextBankVFT, curBankVFT))) { nextBankRequest = buf[b, j]; nextBankVFT = curBankVFT; } } } // TODO: Correct this here for stats if (statsNextRequest == null || !higherFVDPriority(statsNextRequest, statsNextBankRequest, statsNextVFT, statsNextBankVFT)) { statsNextRequest = statsNextBankRequest; statsNextVFT = statsNextBankVFT; } // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !higherNesbitPriority(nextRequest, nextBankRequest, nextVFT, nextBankVFT)) || (!ScheduleWB && !higherNesbitPriorityWB(nextRequest, nextBankRequest, nextVFT, nextBankVFT))) // if (nextRequest == null || !higherFCFSPriority(nextRequest, nextBankRequest)) { nextRequest = nextBankRequest; nextVFT = nextBankVFT; } } } // 2. Update the data structures if (nextRequest != null) { // update statistics // TODO!!! totalScheduled[nextRequest.request.requesterID, nextRequest.glob_b_index]++; if (nextRequest.request.requesterID != statsNextRequest.request.requesterID) { wonDueToFR[nextRequest.request.requesterID, nextRequest.glob_b_index]++; lostDueToFR[statsNextRequest.request.requesterID, statsNextRequest.glob_b_index]++; } // update VTMSRegisters updateVMTSRegisters(nextRequest); // Update the bank timers if the row has changed! if (bank[nextRequest.glob_b_index].get_cur_row() != nextRequest.r_index) bankTimers[nextRequest.glob_b_index] = MemCtlr.cycle; } // delete VTF cache for (int i = 0; i < Config.N; i++) for (int j = 0; j < Config.memory.bank_max_per_mem; j++) { VFTHitCache[i, j] = 0; VFTMissCache[i, j] = 0; } // 3. Return the request. return nextRequest; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { // In the Nesbit scheme, this function has to do three things: // 1) Get the next request to be scheduled // 2) If there is a next request, update the data structures // 3) Return the next request MemoryRequest nextRequest = null; bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); ulong nextVFT = 0; // 1. Get the highest priority request, if there is one. // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; ulong nextBankVFT = computeVirtualFinishTime(nextBankRequest); for (int j = 1; j < buf_load[b]; j++) { ulong curBankVFT = computeVirtualFinishTime(buf[b, j]); if (ScheduleWB && !higherNesbitPriority(nextBankRequest, buf[b, j], nextBankVFT, curBankVFT) || (!ScheduleWB && !higherNesbitPriorityWB(nextBankRequest, buf[b, j], nextBankVFT, curBankVFT))) { nextBankRequest = buf[b, j]; nextBankVFT = curBankVFT; } } // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !higherNesbitPriority(nextRequest, nextBankRequest, nextVFT, nextBankVFT)) || (!ScheduleWB && !higherNesbitPriorityWB(nextRequest, nextBankRequest, nextVFT, nextBankVFT))) { nextRequest = nextBankRequest; nextVFT = nextBankVFT; } } } //2. Update the data structures if (nextRequest != null) updateVMTSRegisters(nextRequest); // 3. Return the request. return nextRequest; }
/** * Returns the highest priority request, but does not do any updates. */ protected override MemoryRequest selectHighestPriorityRequest(MemCtlr mem) { MemoryRequest nextRequest = null; bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); for (int n = 0; n < Config.N; n++) { stallShared[n] += Simulator.network.nodes[n].cpu.get_stalledSharedDelta(); } // Compute current values of chi_i and get largest and smallest! bool applyFairnessRule = false; double minchi = double.MaxValue; double maxchi = double.MinValue; int maxProc = -1; int minProc = -2; for (int i = 0; i < Config.N; i++) { stallAlone[i] = stallShared[i] - stallDelta[i]; chi[i] = (double)stallShared[i] / (double)stallAlone[i]; if (chi[i] < 1) { //throw new Exception("X is less than 1!"); //Console.WriteLine("X is less than 1!!!!!!"); chi[i] = 1; } // Priorities if (Config.memory.use_weight > 0) { chi[i] = 1 + ((chi[i] - 1) * Config.memory.weight[i]); } // just checking whether currentLoadPerProc > 0 is not enough, // because all the banks may be busy!!!! bool considerProc = false; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && cur_nonwb_per_procbank[i, b] > 0) { considerProc = true; break; } } if (considerProc) { if (chi[i] < minchi) { minchi = chi[i]; minProc = i; if (chi[i] < minChiEver) { minChiEver = chi[i]; minChiEverProc = i; } } if (chi[i] > maxchi) { maxchi = chi[i]; maxProc = i; if (chi[i] > maxChiEver) { maxChiEver = chi[i]; maxChiEverProc = i; } } } } if (minProc != -2 && maxProc != -1 && maxchi / minchi > Config.memory.alpha) applyFairnessRule = true; if (applyFairnessRule) { // Get the highest priority request according to fairness index! // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && cur_load_per_procbank[maxProc, b] > 0) // now I have do check nonetheless! { // Find the highest priority request for this bank MemoryRequest nextBankRequest = null; for (int j = 0; j < buf_load[b]; j++) { if (buf[b, j].request.requesterID == maxProc) { if (nextBankRequest == null || (ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[b, j])) || (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[b, j]))) { nextBankRequest = buf[b, j]; } } } if (nextBankRequest == null) throw new Exception("Bank Load is 0"); // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !helper.is_X_FCFS(nextRequest, nextBankRequest, chi[nextRequest.request.requesterID], chi[nextBankRequest.request.requesterID])) || (!ScheduleWB && !helper.is_NONWB_X_FCFS(nextRequest, nextBankRequest, chi[nextRequest.request.requesterID], chi[nextBankRequest.request.requesterID]))) { nextRequest = nextBankRequest; } } } if (nextRequest == null) throw new Exception("No Request from MaxProc"); } else { // Get the highest priority request according to FR-FCFS. // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) // now I have do check nonetheless! { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if ((ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[b, j])) || (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[b, j]))) { nextBankRequest = buf[b, j]; } } // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !helper.is_FCFS(nextRequest, nextBankRequest)) || (!ScheduleWB && !helper.is_NONWB_FCFS(nextRequest, nextBankRequest))) { nextRequest = nextBankRequest; } } } } // for stats! if (nextRequest != null) { if (applyFairnessRule) fairnessRule++; else if (minchi == double.MaxValue || maxProc == -1 || minProc == maxProc) frfcfsRule_unsampled++; else frfcfsRule++; } // assert! if (nextRequest == null) { for (int b = 0; b < Config.memory.bank_max_per_mem; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) throw new Exception("No Request from MaxProc"); } } return nextRequest; }
public Node(NodeMapping mapping, Coord c) { m_coord = c; m_mapping = mapping; if (mapping.hasCPU(c.ID)) { m_cpu = new CPU(this); } if (mapping.hasMem(c.ID)) { Console.WriteLine("Proc/Node.cs : MC locations:{0}", c.ID); m_mem = new MemCtlr(this); } m_inj_pool = Simulator.controller.newPrioPktPool(m_coord.ID); Simulator.controller.setInjPool(m_coord.ID, m_inj_pool); m_injQueue_flit = new Queue<Flit>(); m_injQueue_evict = new Queue<Flit>(); m_injQueue_multi_flit = new Queue<Flit> [Config.sub_net]; for (int i=0; i<Config.sub_net; i++) m_injQueue_multi_flit[i] = new Queue<Flit> (); m_local = new Queue<Packet>(); //m_inheritance_table = new ArrayList(); m_inheritance_dict = new Dictionary<string, int> (); m_rxbuf_naive = new RxBufNaive(this, delegate(Flit f) { m_injQueue_evict.Enqueue(f); }, delegate(Packet p) { receivePacket(p); }); }
/** * */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bindex_low; if (Config.memory.is_shared_MC) bindex_low = mem.mem_id * Config.memory.bank_max_per_mem; else bindex_low = 0; for (int b = bindex_low; b < bindex_low + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) continue; if (next_req == null) next_req = buf[b, 0]; for (int j = 0; j < buf_load[b]; j++) { MemoryRequest req = buf[b, j]; switch (Config.memory.batch_sched_algo) { case BatchSchedAlgo.MARKED_FR_RANK_FCFS: if (!helper.is_MARK_FR_RANK_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) next_req = req; break; case BatchSchedAlgo.MARKED_RANK_FR_FCFS: if (!helper.is_MARK_RANK_FR_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) next_req = req; break; case BatchSchedAlgo.MARKED_RANK_FCFS: if (!helper.is_MARK_RANK_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) next_req = req; break; case BatchSchedAlgo.MARKED_FR_FCFS: if (!helper.is_MARK_FR_FCFS(next_req, req)) next_req = req; break; case BatchSchedAlgo.MARKED_FCFS: if (!helper.is_MARK_FCFS(next_req, req)) next_req = req; break; default: Debug.Assert(false); break; }//switch }//for over buffer }//for over banks //Debug.Assert(next_req == null); return next_req; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; //search in global/local banks int bank_index; int next_req_bank = -1; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } //Select a memory request according to priorities: //Order: Ready > Timeout > priority > rowHit > rank > totAU > BankRank > time for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { //Rule 1: Ready if (!bank[b].is_ready() || buf_load[b] == 0) continue; for (int j = 0; j < buf_load[b]; j++) { //Mark, priority, row hit, rank, totAU, bank rank, bank rank per thread, FCFS if(next_req == null) { next_req = buf[b,j]; next_req_bank = b; continue; } if(!helper.is_MARK_PRIO_FR_RANK_TOTAU_BR_BRT_FCFS(next_req, buf[b,j], rank[next_req.memoryRequesterID], rank[buf[b,j].memoryRequesterID], priority[getGroup(next_req.memoryRequesterID)], priority[getGroup(buf[b,j].memoryRequesterID)], bankRank[next_req.memoryRequesterID], bankRank[buf[b,j].memoryRequesterID], bankRankPerThread[next_req.glob_b_index,next_req.memoryRequesterID],bankRankPerThread[b,buf[b,j].memoryRequesterID], totAU[next_req.memoryRequesterID],totAU[buf[b,j].memoryRequesterID])) { next_req = buf[b,j]; next_req_bank = b; } } } //Note: All requests (in this simulator) are either load, store, or writeback if(next_req != null) { bankRank[next_req.memoryRequesterID]--; bankRankPerThread[next_req_bank,next_req.memoryRequesterID]--; utility[next_req.memoryRequesterID]++; utilityMinFrame[next_req.memoryRequesterID]++; } return next_req; }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest next_req = null; // Get the highest priority request. int bindex_low; if (Config.memory.is_shared_MC) { bindex_low = mem.mem_id * Config.memory.bank_max_per_mem; } else { bindex_low = 0; } for (int b = bindex_low; b < bindex_low + mem.bank_max; b++) // for each bank { if (!bank[b].is_ready() || buf_load[b] == 0) { continue; } if (next_req == null) { next_req = buf[b, 0]; } for (int j = 0; j < buf_load[b]; j++) { MemoryRequest req = buf[b, j]; switch (Config.memory.batch_sched_algo) { case BatchSchedAlgo.MARKED_FR_RANK_FCFS: if (!helper.is_MARK_FR_RANK_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) { next_req = req; } break; case BatchSchedAlgo.MARKED_RANK_FR_FCFS: if (!helper.is_MARK_RANK_FR_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) { next_req = req; } break; case BatchSchedAlgo.MARKED_RANK_FCFS: if (!helper.is_MARK_RANK_FCFS(next_req, req, proc_to_rank[next_req.request.requesterID], proc_to_rank[req.request.requesterID])) { next_req = req; } break; case BatchSchedAlgo.MARKED_FR_FCFS: if (!helper.is_MARK_FR_FCFS(next_req, req)) { next_req = req; } break; case BatchSchedAlgo.MARKED_FCFS: if (!helper.is_MARK_FCFS(next_req, req)) { next_req = req; } break; default: Debug.Assert(false); break; } //switch } //for over buffer } //for over banks //Debug.Assert(next_req == null); if (next_req == null) { return(null); } if (is_batch == false) { return(next_req); } if (next_req.isMarked) { return(next_req); } //which round are we in (within a batch)? int cur_proc = rank_to_proc[cur_rank]; int cur_ct = bhelper.completion_time[cur_proc]; if ((MemCtlr.cycle - batch_begin_time) >= (ulong)cur_ct) { cur_rank--; } //test for completion time int test_ct = bhelper.completion_time[next_req.request.requesterID]; if (test_ct > cur_ct && cur_rank == Config.N - 1) { return(null); } return(next_req); /* * //which round are we in (within a batch)? * int cur_proc = rank_to_proc[cur_rank]; * int cur_ct = bhelper.completion_time[cur_proc]; * if ((Mem.cycle - batch_begin_time) >= (ulong) cur_ct) * cur_rank--; * * //test for completion time * int test_ct = bhelper.completion_time[next_req.threadID]; * * if (test_ct > cur_ct) * return null; * * return next_req; */ }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; // Compute current values of chi_i and get largest and smallest! bool applyFairnessRule = false; double minchi = double.MaxValue; double maxchi = double.MinValue; int maxProc = -1; int minProc = -2; for (int i = 0; i < Config.N; i++) { // MENTAL NOTE: Can this be very inaccurate at the beginning, // when idealLatency and realLatency are very small??? chi[i] = (double)realLatency[i] / (double)idealLatency[i]; if (chi[i] < 1) { //throw new Exception("X is less than 1!"); //Console.WriteLine("X is less than 1!!!!!!"); chi[i] = 1; } // Priorities if (Config.memory.use_weight > 0) { chi[i] = 1 + ((chi[i] - 1) * Config.memory.weight[i]); } // check whether processor i has at least one outstanding ready request bool considerProc = false; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && cur_load_per_procbank[i, b] > 0) { considerProc = true; break; } } if (considerProc && nrOfSamples[i] >= Config.memory.sample_min) { if (chi[i] < minchi) { minchi = chi[i]; minProc = i; if (chi[i] < minChiEver) { minChiEver = chi[i]; minChiEverProc = i; } } if (chi[i] > maxchi) { maxchi = chi[i]; maxProc = i; if (chi[i] > maxChiEver) { maxChiEver = chi[i]; maxChiEverProc = i; } } } } if (minchi != double.MaxValue && maxProc != -1 && maxchi / minchi > Config.memory.alpha) applyFairnessRule = true; // TODO BUGFIX: // 1) Use different tie-breaker rule (within and especially across bank) // 2) Check whether MaxProc has at least one outstanding request. // --> Check also MICRO implementation!!! // --> DONE! It should work now! if (applyFairnessRule) { // Get the highest priority request according to fairness index! // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && cur_load_per_procbank[maxProc, b] > 0) // now I have do check nonetheless! { // Find the highest priority request for this bank MemoryRequest nextBankRequest = null; for (int j = 0; j < buf_load[b]; j++) { if (buf[b, j].request.requesterID == maxProc) { if (nextBankRequest == null || !helper.is_FR_FCFS(nextBankRequest, buf[b, j])) { nextBankRequest = buf[b, j]; } } } if (nextBankRequest == null) throw new Exception("Bank Load is 0"); // Compare between highest priority between different banks if (nextRequest == null || !helper.is_X_FCFS(nextRequest, nextBankRequest, chi[nextRequest.request.requesterID], chi[nextBankRequest.request.requesterID])) { nextRequest = nextBankRequest; } } } if (nextRequest == null) throw new Exception("No Request from MaxProc"); } else { // Get the highest priority request according to FR-FCFS. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) // now I have do check nonetheless! { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if (!helper.is_FR_X_FCFS(nextBankRequest, buf[b, j], chi[nextBankRequest.request.requesterID], chi[buf[b, j].request.requesterID])) { nextBankRequest = buf[b, j]; } } // Compare between highest priority between different banks if (nextRequest == null || !helper.is_X_FCFS(nextRequest, nextBankRequest, chi[nextRequest.request.requesterID], chi[nextBankRequest.request.requesterID])) { nextRequest = nextBankRequest; } } } } // for stats! if (nextRequest != null) { if (applyFairnessRule) fairnessRule++; else if (minchi == double.MaxValue || maxProc == -1 || minProc == maxProc) frfcfsRule_unsampled++; else frfcfsRule++; } return nextRequest; }
/** * Constructor */ public MemCtlr(Node node) { this.node = node; /* * //locally visible bank * bank = new Bank[Config.memory.bank_max_per_mem]; * * //allocate scheduler (sees local banks) * sched = MemCtlr.alloc_sched(bank, Config.memory.buf_size_per_bank, Config.memory.mem_sched_algo, Config.memory.wb_special_sched); * * //allocate individual banks * for (int b = 0; b < Config.memory.bank_max_per_mem; b++) * bank[b] = new Bank(sched, this); * * //memory id * mem_id = index++; * * //size * this.bank_max = bank.Length; */ //Yoongu: giant hack to support shared MCs if (Config.memory.is_shared_MC == false) { bank = new Bank[Config.memory.bank_max_per_mem]; //allocate scheduler (sees local banks) sched = MemCtlr.alloc_sched(bank, Config.memory.buf_size_per_mem, Config.memory.mem_sched_algo, Config.memory.wb_special_sched); //allocate individual banks for (int b = 0; b < Config.memory.bank_max_per_mem; b++) { bank[b] = new Bank(sched, this); } //memory id mem_id = index++; //size this.bank_max = bank.Length; } else { //memory id mem_id = index++; if (mem_id == 0) { //only the first memory allocates the global banks bank_global = new Bank[Config.memory.bank_max_per_mem * Config.memory.mem_max]; //allocate scheduler (sees local banks) sched_global = MemCtlr.alloc_sched(bank_global, Config.memory.buf_size_per_mem * Config.memory.mem_max, Config.memory.mem_sched_algo, Config.memory.wb_special_sched); //allocate individual banks for (int b = 0; b < bank_global.Length; b++) { bank_global[b] = new Bank(sched_global, this); } } sched = sched_global; bank = new Bank[Config.memory.bank_max_per_mem]; for (int b = 0; b < bank.Length; b++) { bank[b] = bank_global[mem_id * Config.memory.bank_max_per_mem + b]; } //size this.bank_max = bank.Length; } }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank if (nextRequest == null) nextRequest = buf[b, 0]; for (int j = 0; j < buf_load[b]; j++) { if (!helper.is_MARK_PRIO_FR_RANK_FCFS(nextRequest, buf[b, j], threadPriority[nextRequest.request.requesterID], threadPriority[buf[b, j].request.requesterID], Rank[nextRequest.request.requesterID], Rank[buf[b, j].request.requesterID])) { nextRequest = buf[b, j]; } } } } return nextRequest; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio); // Compute current values of chi_i and get largest and smallest! bool applyFairnessRule = false; double minchi = double.MaxValue; double maxchi = double.MinValue; int maxProc = -1; int minProc = -2; for (int i = 0; i < Config.N; i++) { chi[i] = (double)realLatency[i] / (double)idealLatency[i]; if (chi[i] < 1) { //throw new Exception("X is less than 1!"); //Console.WriteLine("X is less than 1!!!!!!"); chi[i] = 1; } // Priorities if (Config.memory.use_weight > 0) { chi[i] = 1 + ((chi[i] - 1) * Config.memory.weight[i]); } // check whether processor i has at least one outstanding ready request bool considerProc = false; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && cur_nonwb_per_procbank[i, b] > 0) { considerProc = true; break; } } if (considerProc && nrOfSamples[i] >= Config.memory.sample_min) { if (chi[i] < minchi) { minchi = chi[i]; minProc = i; if (chi[i] < minChiEver) { minChiEver = chi[i]; minChiEverProc = i; } } if (chi[i] > maxchi) { maxchi = chi[i]; maxProc = i; if (chi[i] > maxChiEver) { maxChiEver = chi[i]; maxChiEverProc = i; } } } } if (minchi != double.MaxValue && maxProc != -1 && maxchi / minchi > Config.memory.alpha) applyFairnessRule = true; if (applyFairnessRule) { // Get the highest priority request according to fairness index! int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { if (bank[b].is_ready() && cur_load_per_procbank[maxProc, b] > 0) // now I have do check nonetheless! { // Find the highest priority request for this bank MemoryRequest nextBankRequest = null; for (int j = 0; j < buf_load[b]; j++) { if (buf[b, j].request.requesterID == maxProc) { if (nextBankRequest == null || (ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[b, j])) || (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[b, j]))) { nextBankRequest = buf[b, j]; } } } if (nextBankRequest == null) throw new Exception("Bank Load is 0"); // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !helper.is_X_FCFS(nextRequest, nextBankRequest, chi[nextRequest.request.requesterID], chi[nextBankRequest.request.requesterID])) || (!ScheduleWB && !helper.is_NONWB_X_FCFS(nextRequest, nextBankRequest, chi[nextRequest.request.requesterID], chi[nextBankRequest.request.requesterID]))) { nextRequest = nextBankRequest; } } } if (nextRequest == null) throw new Exception("No Request from MaxProc"); } else { // Get the highest priority request according to FR-FCFS. for (int i = 0; i < Config.memory.bank_max_per_mem; i++) // for each bank { if (bank[i].is_ready() && buf_load[i] > 0) // now I have do check nonetheless! { // Find the highest priority request for this bank MemoryRequest nextBankRequest = buf[i, 0]; for (int j = 1; j < buf_load[i]; j++) { if ((ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[i, j])) || (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[i, j]))) // if ((ScheduleWB && !higherFRFCFSPriority(nextBankRequest, buffer[i, j], chi[nextBankRequest.threadID], chi[buffer[i, j].threadID])) || // (!ScheduleWB && !higherFRFCFSPriorityWB(nextBankRequest, buffer[i, j], chi[nextBankRequest.threadID], chi[buffer[i, j].threadID]))) { nextBankRequest = buf[i, j]; } } // Compare between highest priority between different banks if (nextRequest == null || (ScheduleWB && !helper.is_FCFS(nextRequest, nextBankRequest)) || (!ScheduleWB && !helper.is_NONWB_FCFS(nextRequest, nextBankRequest))) { nextRequest = nextBankRequest; } } } } // for stats! if (nextRequest != null) { //bool isRowHit = (nextRequest.r_index == bank[nextRequest.glob_b_index].get_cur_row()); if (applyFairnessRule) fairnessRule++; else if (minchi == double.MaxValue || maxProc == -1 || minProc == maxProc) frfcfsRule_unsampled++; else frfcfsRule++; } // assert! if (nextRequest == null) { for (int b = 0; b < Config.memory.bank_max_per_mem; b++) // for each bank { if (bank[b].is_ready() && buf_load[b] > 0) throw new Exception("No Request from MaxProc"); } } return nextRequest; }
/** * This is the main function of interest. Another memory scheduler needs to * implement this function in a different way! */ public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = selectHighestPriorityRequest(mem); // Update all stalledDelta if nextRequest != null! if (nextRequest != null) { // 1. add bus latency to all threads that aren't scheduled updateOthersBusStallDelta(nextRequest.request.requesterID); // 2. add bank latency to all threads in the bank that aren't scheduled updateOthersStallDelta(nextRequest.request.requesterID, nextRequest.glob_b_index, nextRequest.r_index); // 3. add latency to the scheduled thread IF it's a conflict, but it would have been a hit // 3. Now, update own stallDelta if necessary! // Also, update currentRowBuffers. updateOwnStallDelta(nextRequest.request.requesterID, nextRequest.glob_b_index, nextRequest.r_index); } return nextRequest; }
public override MemoryRequest get_next_req(MemCtlr mem) { MemoryRequest nextRequest = null; // Get the highest priority request. int bank_index; if (Config.memory.is_shared_MC) { bank_index = mem.mem_id * Config.memory.bank_max_per_mem; } else { bank_index = 0; } for (int b = bank_index; b < bank_index + mem.bank_max; b++) // for each bank { /*bool cap = ((Memory.memoryTime - bankTimers[i]) >= Simulator.PriorityInversionThreshold); if (Simulator.PriorityInversionThreshold == 0) { if (cap == false) { Environment.Exit(1); } }*/ if (bank[b].is_ready() && buf_load[b] > 0) { // Find the highest priority request for this bank bool cap = (bankRowHitCount[b] > Config.memory.row_hit_cap); MemoryRequest nextBankRequest = buf[b, 0]; for (int j = 1; j < buf_load[b]; j++) { if (cap) { if (!helper.is_FCFS(nextBankRequest, buf[b, j])) { nextBankRequest = buf[b, j]; } } else { if (!helper.is_FR_FCFS(nextBankRequest, buf[b, j])) { nextBankRequest = buf[b, j]; } } } // Compare between highest priority between different banks if (nextRequest == null || !helper.is_FCFS(nextRequest, nextBankRequest)) { nextRequest = nextBankRequest; } } } if (nextRequest != null) { // Update the bank timers if the row has changed! if (bank[nextRequest.glob_b_index].get_cur_row() != nextRequest.r_index) //bankTimers[nextRequest.bankIndex] = Memory.memoryTime; bankRowHitCount[nextRequest.glob_b_index] = 0; else bankRowHitCount[nextRequest.glob_b_index]++; } return nextRequest; }