/// <summary> /// Transfer Processor requests into Memory requests /// </summary> /// <param name="pro_req_">processor request to transfer</param> /// <returns></returns> public static MemRequest transfer(ProcRequest pro_req_) { MemRequest trans = new MemRequest(); trans.address = MemorySelector.resize(pro_req_.actual_addr); trans.data = 0; //actully we need no data here, but we'll take it in the future. trans.block_addr = pro_req_.block_addr; trans.pid.Add(pro_req_.pid); trans.cycle = pro_req_.cycle; switch (pro_req_.type) { case RequestType.READ: trans.memtype = MemReqType.READ; break; case RequestType.WRITE: trans.memtype = MemReqType.WRITE; break; case RequestType.FLUSH: trans.memtype = MemReqType.FLUSH; break; default: trans.memtype = MemReqType.RETURN_DATA; break; } return(trans); }
/// <summary> /// Add to MSHR /// </summary> /// <param name="req_">Processor Request</param> /// <returns>False when MSHR is full; else true.</returns> public bool add_to_mshr(ProcRequest req_) { if (MSHR.Exists(x => x.actual_addr == req_.actual_addr && x.block_addr == req_.block_addr)) { for (int i = 0; i < MSHR.Count; i++) { if (MSHR[i].actual_addr == req_.actual_addr && MSHR[i].block_addr == req_.block_addr) { if (Config.DEBUG_PROC) { DEBUG.WriteLine("-- MSHR : Merge Reqs : [" + req_.type + "] [0x" + req_.actual_addr.ToString("X") + "]"); } return(true); } } } if (MSHR.Count > Config.mshr_size) { if (Config.DEBUG_PROC) { DEBUG.WriteLine("-- MSHR : Failed to add Req to MSHR."); } mshr_stalled++; return(false); } mshr_loaded++; MSHR.Add(req_); if (Config.DEBUG_PROC) { DEBUG.WriteLine("-- MSHR : New Entry : [" + req_.type + "] [0x" + req_.actual_addr.ToString("X") + "]"); } return(true); }
/// <summary> /// Add to Cache /// Foreach requests, add lantency /// </summary> /// <param name="req_">Processor Request</param> public void add_to_cache(ProcRequest req_) { req_.ts_departure = cycle + Config.l1cache_hit_latency; cache_req_queue.Enqueue(req_); curr_ins.ready = false; curr_ins.is_mem = true; ins_w.add_ins(curr_ins, this.cycle); }
public void HandleNewRequest() { curr_ins = null; curr_ins = get_ins_from_insp(); curr_req = null; curr_req = new ProcRequest(); curr_req.parse_ins(curr_ins); }
/// <summary> /// Handle last requests /// </summary> /// <param name="req_">Processor Requests</param> /// <returns>return false when l1$ hits </returns> public bool handle_last_req(ProcRequest req_) { //l1$ encounts miss curr_ins.is_mem = true; curr_ins.ready = false; ins_w.add_ins(curr_ins, this.cycle); return(true); }
/// <summary> /// Add to Cache /// Foreach requests, add lantency /// </summary> /// <param name="req_">Processor Request</param> /// <param name="if_shared"> if shared cache</param> public void add_to_cache(ProcRequest req_, bool if_shared = false) { req_.ts_departure = cycle + (if_shared ? Config.share_cache_hit_latecy : Config.l1cache_hit_latency); cache_req_queue.Enqueue(req_); curr_ins.ready = false; curr_ins.is_mem = true; if (Config.DEBUG_PROC) { DEBUG.WriteLine("CPU [" + this.pid + "] : Add Reqs to Cache_Queue : [" + req_.type + "] [0x" + req_.actual_addr.ToString("X") + "]"); } ins_w.add_ins(curr_ins, this.cycle); }
public async Task <ActionResult> RunRequest([FromBody] BpmRequestViewModel requestViewModel) { ProcRequest request = new ProcRequest() { Name = requestViewModel.RequestName, Discription = requestViewModel.Description, BpmWorkflowName = requestViewModel.WorkflowName, WorkflowParameters = requestViewModel.WorkflowParameters }; await _requestRepo.Update(request); return(Ok("ok")); }
/// <summary> /// Write Complete Callback /// </summary> /// <param name="block_addr">Block address</param> public bool handle_writeback_queue() { if (writeback_req.Count <= 0) { return(false); } if (Config.DEBUG_PIM) { DEBUG.WriteLine("--PIM Proc : Served WriteBack Reqs : [" + writeback_req[0].type + "] [0x" + writeback_req[0].actual_addr.ToString("X") + "]"); } ProcRequest req = writeback_req[0]; return(mctrl.add_to_mctrl(req)); }
/// <summary> /// Things ctrl done every cycle. /// </summary> public static void Step() { cycle++; if (Config.DEBUG_MTRL) { DEBUG.WriteLine(); DEBUG.WriteLine("---------PIM Memory Controller [" + id + "] Update [Cycle " + cycle + "]------------"); } for (int i = 0; i < wait_queue.Count(); i++) { ProcRequest peek = wait_queue[i]; if (peek.cycle + (UInt64)Config.mc_latency <= cycle - 1) { if (Config.DEBUG_MTRL) { DEBUG.WriteLine("-- Issue ProcRequest : [" + peek.type + "] [0x" + peek.block_addr.ToString("X") + "] [0x" + peek.actual_addr.ToString("X") + "]"); } if (PIMConfigs.Consistency_Model == Consistency.SpinLock) { //if (Config.DEBUG_MTRL) // DEBUG.WriteLine("-- Use Coherence : [" + Config.pim_config.Consistency_Model.ToString() + "]"); Coherence.spin_lock.setlock(peek.actual_addr); //when pim units start to perform, flush all relative data in the host core if (!Coherence.flush(peek.block_addr)) { Coherence.spin_lock.relese_lock(peek.actual_addr); DEBUG.WriteLine("-- Waiting Host cores flushing data : [0x" + peek.block_addr.ToString("X") + "] [0x" + peek.actual_addr.ToString("X") + "]"); continue; } send_queue[MemorySelector.get_id(wait_queue[i].actual_addr)].Enqueue(transfer(wait_queue[i])); wait_queue.RemoveAt(i); i--; if (Config.DEBUG_MTRL) { DEBUG.WriteLine("-- Sent ProcRequest : [" + peek.type + "] [0x" + peek.block_addr.ToString("X") + "] [0x" + peek.actual_addr.ToString("X") + "]"); } } } } if (Config.DEBUG_MTRL) { DEBUG.WriteLine(); } }
/// <summary> /// update cache /// </summary> public void handle_cache_req() { while (cache_req_queue.Count != 0) { ProcRequest req = cache_req_queue.Peek(); if (req.ts_departure <= cycle) { if (!L1Cache.search_block(req.block_addr, RequestType.READ)) { L1Cache.add(req.block_addr, req.type, pid); } cache_req_queue.Dequeue(); MSHR.RemoveAll(x => x.block_addr == req.block_addr); ins_w.set_ready(req.block_addr, this.cycle); } } }
/// <summary> /// Flush cacheline /// </summary> /// <param name="addr">Block address</param> public bool flush(UInt64 addr, bool actual = false) { var address = addr; if (actual) { address = tlb.scan_page(addr); } if (!Config.use_cache) { return(true); } if (Coherence.flush_queue.Contains(addr)) { return(false); } if (L1Cache.ifdirty(addr) || (Config.shared_cache ? shared_cache.ifdirty(addr) : false)) { ProcRequest item = new ProcRequest(); item.block_addr = addr; item.actual_addr = tlb.scan_page(addr); item.cycle = GlobalTimer.tick; item.if_mem = true; item.pid = this.pid; item.type = RequestType.FLUSH; item.ready = false; if (Config.writeback) { writeback_req.Add(item); } else { add_to_mctrl(item); } total_flushed++; L1Cache.remove(addr); if (Config.shared_cache) { shared_cache.remove(addr); } return(false); } return(true); }
/// <summary> /// Add to MSHR /// </summary> /// <param name="req_">Processor Request</param> /// <returns>False when MSHR is full; else true.</returns> public bool add_to_mshr(ProcRequest req_) { if (MSHR.Count >= Config.mshr_size) { if (Config.DEBUG_PIM) { DEBUG.WriteLine("-- MSHR : Failed to add Req to MSHR."); } mshr_stalled++; return(false); } mshr_loaded++; MSHR.Add(req_); if (Config.DEBUG_PIM) { DEBUG.WriteLine("-- MSHR : New Entry : [" + req_.type + "] [0x" + req_.actual_addr.ToString("X") + "]"); } return(true); }
/// <summary> /// Add processor requests to wait queue in mctl /// </summary> /// <param name="req_">Request sent by Processors</param> /// <returns>Return true when request is added to wait_queue.</returns> public static bool add_to_mctrl(ProcRequest req_) { if (wait_queue.Count > Config.crtl_queue_max - 1) { if (Config.DEBUG_MTRL) { DEBUG.WriteLine("-- PIM MTRL : Add requests failed : wait_queue full --[" + req_.type + "] [0x" + req_.actual_addr.ToString("X") + "]"); } add_failed++; return(false); } wait_queue.Add(req_); if (Config.DEBUG_MTRL) { DEBUG.WriteLine("--PIM MTRL : Add requests : [" + req_.type + "] [0x" + req_.actual_addr.ToString("X") + "]"); } total_add++; return(true); }
/// <summary> /// Handle last requests /// </summary> /// <param name="req_">Processor Requests</param> /// <returns>return false when l1$ hits </returns> public bool handle_last_req(ProcRequest req_) { //l1$ encounters miss bool hit = false; if (Config.use_cache) { if (Config.shared_cache) { hit = shared_cache.search_block(req_.block_addr, req_.type); if (hit) { //found data in shared cache add_to_cache(curr_req, true); return(false); } } } curr_ins.is_mem = true; curr_ins.ready = false; ins_w.add_ins(curr_ins, this.cycle); return(true); }
/// <summary> /// Add a Processor Request to Memory Controller. /// </summary> /// <param name="req_">Processor Request</param> /// <returns></returns> public bool add_to_mctrl(ProcRequest req_) { return(mctrl.add_to_mctrl(req_)); }
/// <summary> /// process current requests /// </summary> public void handle_current_req() { //if core had processed a memory operation, mem_restrct will be set to 0. //the loop will not be executed. while (mem_restrict.WaitOne()) { if (ins_w.full()) { return; } bool hit = ins_w.if_exist(curr_req.block_addr); if (hit) { bool ready = ins_w.get_readyinfo(curr_req.block_addr); ins_w.add_ins(curr_ins, this.cycle); ins_w.setLast(ready); curr_ins = null; curr_ins = get_ins_from_insp(); curr_req = null; curr_req = new ProcRequest(); curr_req.parse_ins(curr_ins); continue; } if (PIMConfigs.use_l1_cache) { bool l1_hit = L1Cache.search_block(curr_req.block_addr, curr_req.type); if (l1_hit) { //l1 cache hit curr_ins.is_mem = true; curr_ins.ready = true; ins_w.add_ins(curr_ins, this.cycle); curr_ins = null; curr_ins = get_ins_from_insp(); curr_req = null; curr_req = new ProcRequest(); curr_req.parse_ins(curr_ins); continue; } bool mshr = add_to_mshr(curr_req); if (!mshr) { mshr_retry = true; return; } //l1 miss } curr_ins.is_mem = true; curr_ins.ready = false; ins_w.add_ins(curr_ins, this.cycle); bool mctrl_ = add_to_mctrl(curr_req); if (!mctrl_) { mctrl_retry = true; return; } curr_ins = null; curr_ins = get_ins_from_insp(); curr_req = null; curr_req = new ProcRequest(); curr_req.parse_ins(curr_ins); } }
/// <summary> /// One cycle of Core. /// </summary> public override void Step() { cycle++; if (Config.DEBUG_PIM) { DEBUG.WriteLine(); DEBUG.WriteLine("----------PIM CPU [" + this.pid + "] Update [Cycle " + cycle + "]------------"); DEBUG.WriteLine(); } //reset all restriction reset_restrict(); //period statics if (cycle % Config.pim_static_period == 0 && cycle != 0) { //static } //init current request and instruction when cycle 1. //otherwise current request and instruction cannot be null. if (curr_ins == null || curr_req == null) { curr_ins = get_ins_from_insp(); if (!started) { if (curr_ins.type == InstructionType.NOP) { return; } else { started = true; } } if (curr_req == null) { curr_req = new ProcRequest(); } curr_req.parse_ins(curr_ins); } if (Config.trace_type == Trace_Type.PC) { pc++; Console.WriteLine(pc.ToString("x")); //if (curr_req.type != RequestType.NOP) //{ // if (pc > curr_req.pc) // { // pc = curr_req.pc; // } //} } if (Config.sim_type == SIM_TYPE.cycle) { //simulater has reach max sim cysle,exit if (cycle > Config.sim_cycle) { return; } } if (Config.trace_type != Trace_Type.PC) { //if no memory operation, insert ins to ALU if (!curr_ins.is_mem) { //current instruction is an alg ins or NOP while (cal_restrict.WaitOne()) { if (curr_ins.type == InstructionType.NOP) { continue; } else { alu.add_ins(curr_ins); } } } alu.Step(); } if (PIMConfigs.use_l1_cache) { handle_cache_req(); } update_ins_w(); if (outstanding_requests()) { memory_cycle++; } if (Config.writeback) { //handle write-back queue if (writeback_req.Count > 0) { //each step handles only one write-back req bool res = handle_writeback_queue(); if (res) { writeback_req.RemoveAt(0); } res = write_b_stall(); if (res) { //too many writeback req to be handled return; } } } // if MSHR or MCTRL queue are full last cycyle , system has to process last request bool prcessed = false; if (mshr_retry || mctrl_retry) { if (ins_w.full()) { if (Config.DEBUG_PROC) { DEBUG.WriteLine("-- InsWd : Queue Full."); } return; } //mshr/mctrl stall prcessed = handle_last(); if (!prcessed) { return; } //reissue success prcessed = true; curr_ins = null; curr_ins = get_ins_from_insp(); curr_req = null; curr_req = new ProcRequest(); curr_req.parse_ins(curr_ins); } if (curr_req.if_mem) { handle_current_req(); } else { curr_ins = null; curr_ins = get_ins_from_insp(); curr_req = null; curr_req = new ProcRequest(); curr_req.parse_ins(curr_ins); } }
/// <summary> /// Add a Processor Request to Memory Controller. /// </summary> /// <param name="req_">Processor Request</param> /// <returns></returns> public bool add_to_mctrl(ProcRequest req_) { bandwidth_bit += 64; return(mctrl.add_to_mctrl(req_)); }
public void Clear() { curr_ins = null; curr_req = null; }
/// <summary> /// One cycle of Core. /// </summary> public override void Step() { cycle++; if (Config.DEBUG_PROC) { DEBUG.WriteLine(); DEBUG.WriteLine("----------Host CPU [" + this.pid + "] Update [Cycle " + cycle + "]------------"); DEBUG.WriteLine(); } //if (Config.trace_type == Trace_Type.PC) //{ // if (curr_req != null && curr_req.pc > 0 && pc == 0) // pc = curr_req.pc; // else // { // get_ins_from_insp(); // return; // } //} /** * Free all the restricts to enable a new round of CPU cycles. **/ reset_restrict(); //reset all restriction //period statics if (cycle % Config.proc_static_period == 0 && cycle != 0) { //static } //init current request and instruction when cycle 1. //otherwise current request and instruction cannot be null. if (curr_ins == null || curr_req == null) { curr_ins = get_ins_from_insp(); if (curr_req == null) { curr_req = new ProcRequest(); } curr_req.parse_ins(curr_ins); } if (Config.trace_type == Trace_Type.PC) { pc++; Console.WriteLine(pc.ToString("x")); //if (curr_req.type != RequestType.NOP) //{ // if (pc > curr_req.pc) // { // pc = curr_req.pc; // } //} } if (Config.sim_type == SIM_TYPE.cycle) { //simulater has reach max sim cysle,exit if (cycle > Config.sim_cycle) { return; } } if (Config.trace_type != Trace_Type.PC) { /** * In PC trace mode, CPU only simulates cache and memory * behaviours. ALU should be disabled due to the lack of * detailed instruction information. Because that the trace * file is fetched by physical mechines, which provide * the correctness of execution. PIMSim just needs to * send memory or cache requests at exact time. **/ if (!curr_ins.is_mem) { //current instruction is an alg ins or NOP while (cal_restrict.WaitOne()) { if (curr_ins.type == InstructionType.NOP) { continue; } else { alu.add_ins(curr_ins); } } } alu.Step(); } if (Config.use_cache) { handle_cache_req(); } update_ins_w(); if (outstanding_requests()) { memory_cycle++; } if (Config.writeback) { //handle write-back queue if (writeback_req.Count > 0) { //each step handles only one write-back req bool res = handle_writeback_queue(); if (res) { writeback_req.RemoveAt(0); } res = write_b_stall(); if (res) { //too many writeback req to be handled return; } } } bool prcessed = false; if (mshr_retry || mctrl_retry) { if (ins_w.full()) { if (Config.DEBUG_PROC) { DEBUG.WriteLine("-- InsWd : Queue Full."); } return; } //mshr/mctrl stall prcessed = handle_last(); if (!prcessed) { return; } mem_restrict.WaitOne(); HandleNewRequest(); } if (curr_req.if_mem) { handle_current_req(); } else { HandleNewRequest(); } }