/** * Progress time and (possibly) service the current request. * Decrement the time left to fully service the current request. * If it reaches zero, service it and notify the processor. */ public void tick() { for (int i = 0; i < Config.N; i++) { //Console.WriteLine(bank_id.ToString() + '\t' + i.ToString()); //Simulator.stats.bank_queuedepth_persrc[bank_id, i].Add(outstandingReqs_perapp[i]); } Simulator.stats.bank_queuedepth[bank_id].Add(outstandingReqs); //sanity check Debug.Assert((wait_left >= 0) && (wait_left <= Config.memory.row_conflict_latency + Config.memory.bus_busy_time)); Debug.Assert(!(cur_req == null && wait_left != 0)); Debug.Assert(!(cur_req != null && wait_left == 0)); //decrement time left to serve current request if (wait_left > 0) { wait_left--; } //can't serve current request if (cur_req == null || wait_left != 0) { return; } //we can now serve the current request //Console.WriteLine("Request complete, sending reply"); //serve request by removing current request from scheduler buffer sched.remove_req(cur_req); outstandingReqs--; if (outstandingReqs < 0) { throw new Exception("Bank has negative number of requests!"); } outstandingReqs_perapp[cur_req.request.requesterID]--; if (outstandingReqs_perapp[cur_req.request.requesterID] < 0) { throw new Exception("App has negative number of requests!"); } cur_req.cb(); //send back the serviced request to cache (which in turn sends it to processor) Request request = cur_req.request; if (request == null) { throw new Exception("No request! don't know who to send it back to!"); } //Console.WriteLine("Returning mc_data packet to cache slice at Proc {0}, ({1},{2})", mcaddrpacket.source.ID, mcaddrpacket.source.x, mcaddrpacket.source.y); CPU cpu = Simulator.network.nodes[request.requesterID].cpu; cpu.outstandingReqsMemory--; if (cpu.outstandingReqsMemory == 0) { Simulator.stats.memory_episode_persrc[request.requesterID].Add(Simulator.CurrentRound - cpu.outstandingReqsMemoryCycle); cpu.outstandingReqsMemoryCycle = Simulator.CurrentRound; } //----- STATS START ----- stat.dec(ref BankStat.req_cnt[request.requesterID]); if (cur_req.isMarked) { stat.dec(ref BankStat.marked_req_cnt[request.requesterID]); } else { stat.dec(ref BankStat.unmarked_req_cnt[request.requesterID]); } //----- STATS END ------ //reset current req cur_req = null; }
// This is needed just because of the wacky binding/scoping rules used by // delegates; without this, you can't keep recursively wrapping delegates // within a loop. protected void WrapDelegates(MemoryRequest m1, MemoryRequest m2) { Simulator.Ready prev_cb = m1.cb; m1.cb = delegate() { prev_cb(); m2.cb(); }; }
public void Tick() { //Sample BLP for (int i = 0; i < maxRequests; i++) { if (buf[i].Busy) { BLP[buf[i].mreq.request.requesterID]++; if (Simulator.network.nodes[buf[i].mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.BLPTotal[buf[i].mreq.request.requesterID].Add(); } if (Simulator.network.nodes[buf[i].mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.DRAMUtilization[buf[i].mreq.request.requesterID].Add(); } } } for (int i = 0; i < Config.Ng; i++) { IATCounter[i]++; BufferUsed[i] += (float)loadPerProc[i] / (float)maxRequests; // Console.WriteLine("LoadperProc[{0}] = {1}, BLP[{0}] = {2}",i, loadPerProc[i], BLP[i]); } if (IATCounter[Config.Ng - 1] > Config.IAT_threshold) { triggerCPUPrio = true; } else { if (triggerDelay < Config.IAT_thres_delay) { triggerDelay++; } else { triggerDelay = 0; triggerCPUPrio = false; } } // anyone done? invoke cb() for (int i = 0; i < maxRequests; i++) { if (buf[i].Valid && buf[i].Completed) { #if FIFORRDEBUG Console.WriteLine("Mem req. from src {0} finish at {1} (this is where we deallocate buffers queue)", buf[i].mreq.request.requesterID, Simulator.CurrentRound); #endif MemoryRequest mreq = buf[i].mreq; // For RBH and BLP stats memServiceCount[mreq.request.requesterID]++; // Console.WriteLine("Decrementing load at src {0}", mreq.request.requesterID); loadPerProc[mreq.request.requesterID]--; /* HWA Code */ // bug fixed?? // if(mreq.from_GPU) if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_GPU()) { /* HWA Code End */ GPURequests--; } /* HWA Code */ else if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_HWA()) { HWARequests--; } /* HWA Code End */ else { coreRequests--; } if (mreq.request.write) { writeRequests--; if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.DRAMWritesPerSrc[mreq.request.requesterID].Add(); } if (!buf[i].issuedActivation) { RBHCount[mreq.request.requesterID]++; } } else { readRequests--; if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.DRAMReadsPerSrc[mreq.request.requesterID].Add(); } if (!buf[i].issuedActivation) { RBHCount[mreq.request.requesterID]++; } } // Stats in Common/stats.cs if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.DRAMTotalLatencyPerSrc[mreq.request.requesterID].Add(Simulator.CurrentRound - buf[i].whenArrived); } if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.DRAMTotalArrayLatencyPerSrc[mreq.request.requesterID].Add(Simulator.CurrentRound - buf[i].whenStarted); } if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.DRAMTotalQueueLatencyPerSrc[mreq.request.requesterID].Add(buf[i].whenStarted - buf[i].whenArrived); } // RequestsPerBank[mreq.bank_index]--; Simulator.QoSCtrl.RequestsPerBank[mreq.bank_index]--; if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_HWA()) { Simulator.QoSCtrl.CPURequestsPerBank[mreq.bank_index]--; } if (!buf[i].issuedActivation) { this.ComboHitsCounts++; } else { if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.ComboHitsBin.Add(ComboHitsCounts); } this.ComboHitsCounts = 0; } //InterArrvalTime stat if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.CumulativeArrivalTime[mreq.request.requesterID].Add(IATCounter[mreq.request.requesterID]); } //Simulator.stats.ArrivaltimeBin[mreq.request.requesterID].Add((float)IATCounter[mreq.request.requesterID]); if (Simulator.network.nodes[mreq.request.requesterID].m_cpu.m_stats_active) { Simulator.stats.ArrivaltimeBin.Add((float)IATCounter[mreq.request.requesterID]); } IATCounter[mreq.request.requesterID] = 0; //Deallocation and callback /* HWA CODE */ // if( mreq.request.requesterID == 17 ) // buf[i].print_stat(mem_id); /* HWA CODE END */ buf[i].Deallocate(); mreq.cb(); break; } } for (int i = 0; i < numBanks; i++) { Simulator.QoSCtrl.dram_bank_req_cnt[Config.memory.numChannels * mem_id + id, i] += (ulong)RequestsPerBank[i]; } Simulator.QoSCtrl.dram_bank_req_cnt_base[Config.memory.numChannels * mem_id + id]++; sched.Tick(); }
// This is needed just because of the wacky binding/scoping rules used by // delegates; without this, you can't keep recursively wrapping delegates // within a loop. protected void WrapDelegates(MemoryRequest m1, MemoryRequest m2) { Simulator.Ready prev_cb = m1.cb; m1.cb = delegate() { prev_cb(); m2.cb(); }; }