コード例 #1
0
ファイル: Proc.cs プロジェクト: sambhavk99/RamulatorSharp
        public void recv_wb_req(Req req)
        {
            //stats
            Stat.procs[pid].write_req_served.collect();
            Stat.procs[pid].write_avg_latency.collect(req.Latency);

            //destroy req
            RequestPool.Enpool(req);
        }
コード例 #2
0
ファイル: Proc.cs プロジェクト: sambhavk99/RamulatorSharp
        // Callback function when a memory request is complete. This retires instructions or inserts data back into caches.
        public void recv_req(Req req)
        {
            // Install the rest of the words in the cacheline
            bool cw_contains_write = false;

            //stats
            if (!req.CpyGenReq)
            {
                Stat.procs[pid].read_req_served.collect();
                Stat.procs[pid].read_avg_latency.collect(req.Latency);
            }

            // Handles the read write request
            if (req.RdWr)
            {
                Dbg.Assert(read_write_q.Contains(req.BlockAddr));
                read_write_q.Remove(req.BlockAddr);
            }

            //free up instruction window and mshr
            bool contains_write = inst_wnd.set_ready(req.BlockAddr);

            contains_write |= cw_contains_write;
            mshr.RemoveAll(x => x == req.BlockAddr);

            Req wb_req = null;

            // Install cachelines and handle dirty block evictions
            if (Config.proc.cache_enabled)
            {
                cache_handler(req, contains_write);
            }
            else
            {
                Dbg.AssertPrint(!contains_write, "Inst window contains write reqeusts.");
                // Writeback based on the cache filtered traces
                wb_req = req.WbReq;
                if (wb_req != null)
                {
                    bool wb_merge = wb_q.Exists(x => x.BlockAddr == wb_req.BlockAddr);
                    if (!wb_merge)
                    {
                        addWB(wb_req);
                    }
                    else
                    {
                        RequestPool.Enpool(wb_req);
                    }
                }
            }

            //destory req
            RequestPool.Enpool(req);
            out_read_req--;
        }
コード例 #3
0
ファイル: Proc.cs プロジェクト: sambhavk99/RamulatorSharp
        // Null upper_c means c is a L1 cache, otherwise L2
        public void service_cache_hit_queue(Cache c, Cache upper_c = null)
        {
            LinkedList <Req> hit_queue = c.get_hit_queue(pid);

            while (hit_queue.Count != 0)
            {
                Req req     = hit_queue.First.Value;
                int hit_pid = req.Pid;
                Dbg.Assert(hit_pid == pid);
                if ((ulong)req.TsDeparture <= cycles)
                {
                    // Hit in L2 and move L2 $line to L1
                    if (upper_c != null)
                    {
                        Cache l1c = upper_c;
                        Dbg.AssertPrint(!l1c.in_cache(req.BlockAddr),
                                        "$line from an L2 hit shouldn't be in L1.");
                        ulong l1c_wb_addr = l1c.cache_add(req.BlockAddr, req.Type, hit_pid);
                        // Dirty $line eviction from L1, check L2 first.
                        if (l1c_wb_addr != NULL_ADDRESS)
                        {
                            // Miss in L2
                            if (!c.is_cache_hit(l1c_wb_addr, ReqType.WRITE))
                            {
                                // Another potential wb from L2
                                ulong l2c_wb_addr = c.cache_add(l1c_wb_addr, ReqType.WRITE, hit_pid);
                                if (l2c_wb_addr != NULL_ADDRESS)
                                {
                                    gen_cache_wb_req(l2c_wb_addr);
                                }
                            }
                        }
                        Stat.procs[pid].l2_cache_hit_avg_latency.collect((int)(cycles - (ulong)req.TsArrival));
                    }
                    else
                    {
                        Stat.procs[pid].l1_cache_hit_avg_latency.collect((int)(cycles - (ulong)req.TsArrival));
                    }

                    // Simply hit in L1
                    hit_queue.RemoveFirst();
                    inst_wnd.set_ready(req.BlockAddr);
                    RequestPool.Enpool(req);
                }
                else
                {
                    return;
                }
            }
        }
コード例 #4
0
ファイル: Proc.cs プロジェクト: sambhavk99/RamulatorSharp
        public void recv_copy_req(Req req)
        {
            //stats
            Stat.procs[pid].copy_req_served.collect();
            Stat.procs[pid].copy_avg_latency.collect(req.Latency);

            //free up instruction window and mshr
            bool contains_write = inst_wnd.set_ready(req.BlockAddr, true);

            mshr.RemoveAll(x => x == req.BlockAddr);
            Dbg.AssertPrint(!contains_write, "Inst window contains write reqeusts. COPY is not supported in cache mode.");
            Dbg.Assert(req.WbReq == null);

            //destory req
            RequestPool.Enpool(req);
        }
コード例 #5
0
ファイル: Proc.cs プロジェクト: sambhavk99/RamulatorSharp
        // Generate a new writeback request to memory from L2 dirty block eviction
        public void gen_cache_wb_req(ulong wb_addr)
        {
            Req wb_req = RequestPool.Depool();

            wb_req.Set(pid, ReqType.WRITE, wb_addr);
            bool wb_merge = wb_q.Exists(x => x.BlockAddr == wb_req.BlockAddr);

            if (!wb_merge)
            {
                addWB(wb_req);
            }
            else
            {
                RequestPool.Enpool(wb_req);
            }
        }
コード例 #6
0
ファイル: Proc.cs プロジェクト: sambhavk99/RamulatorSharp
        public void issue_insts(bool issued_rd_req)
        {
            //issue instructions
            for (int i = 0; i < Config.proc.ipc; i++)
            {
                Dbg.Assert(curr_rd_req != null);
                if (curr_rd_req == null)
                {
                    return;
                }

                // Stats
                if (inst_wnd.is_full())
                {
                    if (i == 0)
                    {
                        Stat.procs[pid].stall_inst_wnd.collect();
                        consec_stalled++;
                    }
                    return;
                }

                //cpu instructions
                if (curr_cpu_inst_cnt > 0)
                {
                    curr_cpu_inst_cnt--;
                    inst_wnd.add(0, false, true, 0); // word oblivious
                    continue;
                }

                //only one memory instruction can be issued per cycle
                if (issued_rd_req)
                {
                    return;
                }

                // Ideal memory
                if (Config.proc.ideal_memory)
                {
                    Dbg.AssertPrint(!Config.proc.cache_enabled, "Cache is not supported in ideal memory mode.");
                    if (curr_rd_req.WbReq != null)
                    {
                        RequestPool.Enpool(curr_rd_req.WbReq);
                    }
                    RequestPool.Enpool(curr_rd_req);
                    curr_rd_req = get_req();
                    return;
                }

                // Need to mark if an instruction is a write on cache mode or COPY for a copy instruction
                inst_wnd.add(curr_rd_req.BlockAddr, true, false, curr_rd_req.WordOffset,
                             (curr_rd_req.Type == ReqType.WRITE) && Config.proc.cache_enabled, curr_rd_req.Type == ReqType.COPY);

                // check if true miss --
                bool false_miss = inst_wnd.is_duplicate(curr_rd_req.BlockAddr);
                // COPY is a special instruction, so we don't care about if its address is a duplicate of other instructions
                if (false_miss && Config.proc.issue_on_dup_req && curr_rd_req.Type != ReqType.COPY)
                {
                    Dbg.Assert(curr_rd_req.WbReq == null);
                    RequestPool.Enpool(curr_rd_req);
                    curr_rd_req = get_req();
                    continue;
                }

                // STATS
                collect_inst_stats();

                // Caches
                if (Config.proc.cache_enabled && curr_rd_req.Type != ReqType.COPY)
                {
                    // Check for in-flight rd_wr_q.
                    // Since write is duplicate, drop it....
                    bool in_rd_wr_q = read_write_q.Contains(curr_rd_req.BlockAddr);
                    // L1
                    if (l1c.is_cache_hit(curr_rd_req.BlockAddr, curr_rd_req.Type))
                    {
                        Dbg.AssertPrint(!in_rd_wr_q, "Both in rd_wr_q and L1 cache baddr=" + curr_rd_req.BlockAddr);
                        // HIT: Add to l1 cache hit queue to model the latency
                        add_cache_hit_queue(l1c, curr_rd_req);
                        curr_rd_req   = get_req();
                        issued_rd_req = true;
                        continue;
                    }
                    // L2
                    if (l2c.is_cache_hit(curr_rd_req.BlockAddr, curr_rd_req.Type))
                    {
                        Dbg.Assert(!in_rd_wr_q);
                        // HIT: Add to l2 cache hit queue to model the latency,
                        // add to l1 cache after it is served from the hit queue
                        add_cache_hit_queue(l2c, curr_rd_req);
                        curr_rd_req   = get_req();
                        issued_rd_req = true;
                        continue;
                    }
                    if (in_rd_wr_q)
                    {
                        if (curr_rd_req.Type == ReqType.WRITE)
                        {
                            inst_wnd.set_ready(curr_rd_req.BlockAddr);
                        }
                        RequestPool.Enpool(curr_rd_req);
                        curr_rd_req   = get_req();
                        issued_rd_req = true;
                        continue;
                    }
                    // If write allocate -- 1. need to make sure the following read request
                    // detects this reading request generated from write
                    // 2. don't stall the instruction window
                    // Make it into a read request, then on receving the
                    // request, put them into the cache and mark them dirty.
                    if (curr_rd_req.Type == ReqType.WRITE)
                    {
                        convert_to_read_write(ref curr_rd_req);
                    }
                }

                // **** GO TO MEMORY ****
                //try mshr
                bool mshr_ok = insert_mshr(curr_rd_req);
                if (!mshr_ok)
                {
                    mshr_retry = true;
                    return;
                }

                //try memory controller
                bool mctrl_ok = insert_mctrl(curr_rd_req);
                if (!mctrl_ok)
                {
                    mctrl_retry = true;
                    return;
                }

                //issued memory request
                issued_rd_req = true;

                //get new read request
                curr_rd_req = get_req();
            }
        }