Exemplo n.º 1
0
        public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req)
        {
            try
            {
                ulong rd_addr = binary_reader.ReadUInt64();
                cpu_inst_cnt = binary_reader.ReadInt32();


                ReqType req_type, proc_req_type;
                req_type = ReqType.RD;
                if (rd_addr >> 63 != 1)
                {
                    proc_req_type = ReqType.RD;
                }
                else
                {
                    proc_req_type = ReqType.WR;
                }

                rd_addr = rd_addr | (((ulong)pid) << 56);


                rd_req = RequestPool.depool();
                rd_req.set(pid, req_type, proc_req_type, rd_addr);
                wb_req = null;
            }
            catch (EndOfStreamException)
            {
                gzip_reader   = new GZipInputStream(File.OpenRead(trace_fname));
                binary_reader = new BinaryReader(gzip_reader);
                ulong rd_addr = binary_reader.ReadUInt64();
                cpu_inst_cnt = binary_reader.ReadInt32();


                ReqType req_type, proc_req_type;
                req_type = ReqType.RD;
                if (rd_addr >> 63 != 1)
                {
                    proc_req_type = ReqType.RD;
                }
                else
                {
                    proc_req_type = ReqType.WR;
                }

                rd_addr = rd_addr | (((ulong)pid) << 56);


                rd_req = RequestPool.depool();
                rd_req.set(pid, req_type, proc_req_type, rd_addr);
                wb_req = null;
            }
        }
Exemplo n.º 2
0
        public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req)
        {
            string line = read_trace();

            Char[]   delim  = new Char[] { ' ' };
            string[] tokens = line.Split(delim);

            cpu_inst_cnt          = int.Parse(tokens[0]);
            total_cpu_inst_count += (ulong)cpu_inst_cnt;
            ulong rd_addr = ulong.Parse(tokens[1]);

            rd_addr = rd_addr | (((ulong)pid) << 56);

            rd_req = RequestPool.depool();
            //       RequestPool.RD_Count++;
            rd_req.set(pid, ReqType.RD, rd_addr);

            if (!Config.proc.wb || tokens.Length == 2)
            {
                wb_req = null;
                return;
            }

            Dbg.Assert(tokens.Length == 3);
            ulong wb_addr = ulong.Parse(tokens[2]);

            wb_addr = wb_addr | (((ulong)pid) << 56);
            wb_req  = RequestPool.depool();
            wb_req.set(pid, ReqType.WR, wb_addr);
//            Console.WriteLine("{0}",rd_req.paddr);
        }
Exemplo n.º 3
0
        public override bool displace(Req req)
        {
            // get the set
            int temp  = set_hash(req.block_addr);
            int temp1 = set_hash_block(req.block_addr);

            // search for the entry
            for (int n = 0; n < ways; n++)
            {
                if (data[temp][n].valid && data[temp][n].block_valid[temp1] && data[temp][n].addr == (req.block_addr >> Config.proc.page_block_diff_bits))
                {
                    // displace and write back if necessary
                    data[temp][n].valid = false;

                    Sim.Dram_Utilization_size = Sim.Dram_Utilization_size - (ulong)Config.proc.page_block_diff;

                    for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++)
                    {
                        data[temp][n].block_valid[block_id] = false;
                    }


                    for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++)
                    {
                        if (data[temp][n].block_dirty[block_id])
                        {
                            Req req_insert2 = new Req();
//                            req_insert2.set(data[temp][n].pid, ReqType.RD, (data[temp][n].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true);
//new dram mapping
                            req_insert2.set(data[temp][n].pid, ReqType.RD, (ulong)((n * sets + temp) << Config.proc.page_size_bits) + (ulong)(block_id << Config.proc.block_size_bits), true);
//end new dram mapping
                            req_insert2.ts_arrival       = cycles;
                            req_insert2.migrated_request = true;
                            reqs.Enqueue(req_insert2);

                            // write data back
                            Req wb_req = RequestPool.depool();
//*************************************************************************************************************************
//yang:
//                      wb_req.set(way.req.pid, ReqType.WR, way.req.paddr);
//                        wb_req.set(way.req.pid, ReqType.WR, way.req.paddr,true);
//                        wb_req.set(data[temp][n].pid, ReqType.WR, data[temp][n].addr << Config.proc.block_size_bits, true);
                            wb_req.set(data[temp][n].pid, ReqType.WR, (data[temp][n].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true);
                            wb_req.cache_wb         = true;
                            wb_req.migrated_request = true;
                            wbs.Enqueue(wb_req);
                        }
                    }

                    return(true);
                }
            }

            return(false);
        }
Exemplo n.º 4
0
        public void service_mem_queue()
        {
            while (mem_queue.Count != 0)
            {
                Req first_request = mem_queue.First.Value;
                if ((ulong)first_request.ts_departure <= cycles)
                {
                    Stat.procs[pid].read_req_served.Collect();
                    Stat.procs[pid].read_avg_latency.Collect(first_request.latency);
                    ulong wb_addr = Proc.NULL_ADDRESS;

                    if (!cache.has_addr(first_request.block_addr, first_request.proc_req_type))
                    {
                        wb_addr = cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid);
                        if (!l1_cache.has_addr(first_request.block_addr, first_request.type))
                        {
                            l1_cache.cache_add(first_request.block_addr, first_request.type, (ulong)pid);
                        }
                        l1_cache.cache_remove(wb_addr, ReqType.RD);
                    }
                    if (Config.proc.wb == false)
                    {
                        wb_addr = Proc.NULL_ADDRESS;
                    }
                    if (wb_addr != Proc.NULL_ADDRESS)
                    {
                        Req wb_req = RequestPool.depool();
                        wb_req.set(pid, ReqType.WR, ReqType.NULL, wb_addr);
                        bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr);
                        if (!wb_merge)
                        {
                            wb_q.Add(wb_req);
                        }
                        else
                        {
                            RequestPool.enpool(wb_req);
                        }
                    }
//
                    mem_queue.RemoveFirst();
                    RequestPool.enpool(first_request);
                    inst_wnd.set_ready(first_request.block_addr);
                }
                else
                {
                    return;
                }
            }
        }
Exemplo n.º 5
0
        public void recv_req(Req req)
        {
            //stats
            Stat.procs[pid].read_req_served.Collect();
            Stat.procs[pid].read_avg_latency.Collect(req.latency);
            total_read_latency += (ulong)req.latency;

            Req   first_request = req;
            ulong wb_addr       = Proc.NULL_ADDRESS;

            //free up instruction window and mshr
            inst_wnd.set_ready(req.block_addr);
            mshr.RemoveAll(x => x == req.block_addr);

            if (!cache.has_addr(first_request.block_addr, ReqType.RD))
            {
                wb_addr = cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid);
                if (!l1_cache.has_addr(first_request.block_addr, first_request.type))
                {
                    l1_cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid);
                }
                l1_cache.cache_remove(wb_addr, ReqType.RD);
            }

            //add to cache; returns the address of evicted block; returns null if empty block has been populated
            //if there is an evicted block, writeback; another memory request is generated
            if (Config.proc.wb == false)
            {
                wb_addr = Proc.NULL_ADDRESS;
            }
            if (wb_addr != Proc.NULL_ADDRESS)
            {
                Req wb_req = RequestPool.depool();
                wb_req.set(pid, ReqType.WR, ReqType.NULL, wb_addr);
                bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr);
                if (!wb_merge)
                {
                    wb_q.Add(wb_req);
                }
                else
                {
                    RequestPool.enpool(wb_req);
                }
            }

            //destory req
            RequestPool.enpool(req);
            out_read_req--;
        }
Exemplo n.º 6
0
        public static void migrate()
        {
            ulong key       = migrationlist[0];
            ulong paddr     = key * RowStat.page_size;
            ulong units     = (ulong)1 << ((int)Config.proc.block_size_bits);
            ulong num_block = (ulong)1 << RowStat.page_bits;

            Req req1 = new Req();
            int pid1 = migrationlistPID[0];

            req1.set(pid1, ReqType.RD, paddr + current_block * units, true);
            req1.ts_arrival       = Cycles;
            req1.ts_departure     = Cycles;
            req1.migrated_request = true;

            if (!Sim.mctrls[Sim.get_mctrl(pid1)][req1.addr.cid].is_q_full(pid1, req1.type, req1.addr.rid, req1.addr.bid))
            {
                Sim.mctrls[Sim.get_mctrl(pid1)][req1.addr.cid].enqueue_req(req1);
            }
            else
            {
                return;
            }

            Req req2 = new Req();

            req2.set(pid1, ReqType.WR, paddr + current_block * units, true);
            req2.ts_arrival       = Cycles;
            req2.ts_departure     = Cycles;
            req2.migrated_request = true;
            Sim.caches[0].insert(req2);
            current_block += 1;

            if (current_block == num_block)
            {
                current_block = 0;
                migrationlist.RemoveAt(0);
                migrationlistPID.RemoveAt(0);
                RowCache.NVMCache.evict(key);
            }
        }
Exemplo n.º 7
0
        public void get_req(ref int cpu_inst_cnt, out Req rd_req, out Req wb_req)
        {
            try
            {
                ulong rd_addr = binary_reader.ReadUInt64();
                cpu_inst_cnt = binary_reader.ReadInt32();

                ReqType req_type, proc_req_type;
                req_type = ReqType.RD;
                if (rd_addr >> 63 != 1) proc_req_type = ReqType.RD;
                else proc_req_type = ReqType.WR;

                rd_addr = rd_addr | (((ulong)pid) << 56);

                rd_req = RequestPool.depool();
                rd_req.set(pid, req_type, proc_req_type, rd_addr);
                wb_req = null;

            }
            catch (EndOfStreamException)
            {
                gzip_reader = new GZipInputStream(File.OpenRead(trace_fname));
                binary_reader = new BinaryReader (gzip_reader);
                ulong rd_addr = binary_reader.ReadUInt64();
                cpu_inst_cnt = binary_reader.ReadInt32();

                ReqType req_type, proc_req_type;
                req_type = ReqType.RD;
                if (rd_addr >> 63 != 1) proc_req_type = ReqType.RD;
                else proc_req_type = ReqType.WR;

                rd_addr = rd_addr | (((ulong)pid) << 56);

                rd_req = RequestPool.depool();
                rd_req.set(pid, req_type, proc_req_type, rd_addr);
                wb_req = null;
            }
        }
Exemplo n.º 8
0
        public void recv_req(Req req)
        {
            //stats
            Stat.procs[pid].read_req_served.Collect();
            Stat.procs[pid].read_avg_latency.Collect(req.latency);
            total_read_latency += (ulong)req.latency;
            if (pid == Sim.highest_rank_proc)
            {
                high_priority_total_misses += 1;
            }


            inflight_mem_requests--;
            Req   first_request = req;
            ulong wb_addr       = Proc.NULL_ADDRESS;

            //free up instruction window and mshr
            inst_wnd.set_ready(req.block_addr);
            mshr.RemoveAll(x => x.block_addr == req.block_addr);
            if (Config.fst)
            {
                if ((req.block_addr == interference_bit_set_addr) && (interference_bit == true))
                {
                    interference_bit      = false;
                    interference_bit_core = Config.N;
                }
            }

            if (!cache.has_addr(first_request.block_addr, ReqType.RD))
            {
                wb_addr = cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid);
                if (!l1_cache.has_addr(first_request.block_addr, ReqType.RD) && !first_request.is_prefetch)
                {
                    l1_cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid);
                }
                l1_cache.cache_remove(wb_addr, ReqType.RD);
            }

            if (Config.aux_cache)
            {
                if (!aux_cache.has_addr(first_request.block_addr, ReqType.RD))
                {
                    aux_cache.cache_add(first_request.block_addr, first_request.proc_req_type, (ulong)pid);
                }
            }

            if (Config.proc.wb == false)
            {
                wb_addr = Proc.NULL_ADDRESS;
            }

            if (wb_addr != Proc.NULL_ADDRESS)
            {
                Req wb_req = RequestPool.depool();
                wb_req.set(pid, ReqType.WR, ReqType.NULL, wb_addr);
                bool wb_merge = wb_q.Exists(x => x.block_addr == wb_req.block_addr);
                if (!wb_merge)
                {
                    wb_q.Add(wb_req);
                }
                else
                {
                    RequestPool.enpool(wb_req);
                }
            }

            //destory req
            RequestPool.enpool(req);
            out_read_req--;
        }
Exemplo n.º 9
0
        public override bool insert(Req req)
        {
            if (is_cached(req))
            {
                return(false);
            }

            // get the set
            int temp  = set_hash(req.block_addr);
            int temp1 = set_hash_block(req.block_addr);

            for (int n = 0; n < ways; n++)
            {
                if (data[temp][n].valid && (!data[temp][n].block_valid[temp1]) && (data[temp][n].addr == (req.block_addr >> Config.proc.page_block_diff_bits)))
                {
                    Req req_insert1 = new Req();
//                    req_insert1.set(req.pid, ReqType.WR, req.paddr, true);
//new dram mapping
                    req_insert1.set(req.pid, ReqType.WR, (ulong)((n * sets + temp) << Config.proc.page_size_bits) + (ulong)(temp1 << Config.proc.block_size_bits), true);
//end new dram mapping

                    req_insert1.ts_arrival       = cycles;
                    req_insert1.migrated_request = true;
                    Sim.Dram_Utilization_size    = Sim.Dram_Utilization_size + 1;
                    reqs.Enqueue(req_insert1);
                    data[temp][n].access             = cycles;
                    data[temp][n].block_valid[temp1] = true;
                    data[temp][n].block_dirty[temp1] = false;
                    return(true);
                }
            }

            // find a candidate for replacement
            int  victim        = 0;
            bool victim_status = false;

/*            for (int n = 0; n < ways; n++) {
 *              if (data[temp][n].valid == false || data[temp][n].access < data[temp][victim].access)
 *                  victim = n;
 *          }
 */
//new dram mapping
            for (int n = 0; n < ways; n++)
            {
                if (!data[temp][n].valid)
                {
                    victim        = n;
                    victim_status = true;
                    break;
                }
            }
            if (!victim_status)
            {
                for (int n = 0; n < ways; n++)
                {
                    if (data[temp][n].access < data[temp][victim].access)
                    {
                        victim = n;
                    }
                }
            }


            Dbg.Assert(victim != null);

            if (data[temp][victim].valid == true)
            {
                Sim.Dram_Utilization_size = Sim.Dram_Utilization_size - (ulong)Config.proc.page_block_diff;
            }

            for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++)
            {
                data[temp][victim].block_valid[block_id] = false;
            }

            // do writeback
            switch (Config.proc.cache_write_policy)
            {
            case "WriteThrough":
                throw new System.Exception("Cache: Dirty data in a write-through cache.");

            case "WriteBack":
                // write data back
                for (int block_id = 0; block_id < Config.proc.page_block_diff; block_id++)
                {
                    if (data[temp][victim].block_dirty[block_id])
                    {
                        Req req_insert2 = new Req();
//                            req_insert2.set(data[temp][victim].pid, ReqType.RD, (data[temp][victim].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true);
//new dram mapping
                        req_insert2.set(data[temp][victim].pid, ReqType.RD, (ulong)((victim * sets + temp) << Config.proc.page_size_bits) + (ulong)(block_id << Config.proc.block_size_bits), true);
//end new dram mapping

                        req_insert2.ts_arrival       = cycles;
                        req_insert2.migrated_request = true;
                        reqs.Enqueue(req_insert2);

                        Req wb_req = RequestPool.depool();
//                        RequestPool.DRAM_TO_PCM_Count++;

//********************************************************************************************************************************
//yang:
//                      wb_req.set(victim.req.pid, ReqType.WR, victim.req.paddr);
//                        wb_req.set(victim.req.pid, ReqType.WR, victim.req.paddr, true);
//                            wb_req.set(data[temp][victim].pid, ReqType.WR, data[temp][victim].addr << Config.proc.block_size_bits, true);
                        wb_req.set(data[temp][victim].pid, ReqType.WR, (data[temp][victim].addr << Config.proc.page_size_bits) + (ulong)(block_id * Config.proc.block_size), true);
                        wb_req.cache_wb         = true;
                        wb_req.migrated_request = true;
                        wbs.Enqueue(wb_req);
                    }
                }
                break;
            }


/*          victim.valid = true;
 *          victim.addr = req.block_addr;
 *          victim.access = cycles;
 *          victim.dirty = false;
 *          victim.req = req;
 *          Stat.procs[req.pid].cache_insert.Collect();*/
//*************************************************************************************
//yang:
            Req req_insert = new Req();

//            req_insert.set(req.pid, ReqType.WR, req.paddr, true);
//new dram mapping
            req_insert.set(req.pid, ReqType.WR, (ulong)((victim * sets + temp) << Config.proc.page_size_bits) + (ulong)(temp1 << Config.proc.block_size_bits), true);
//end new dram mapping

            req_insert.ts_arrival       = cycles;
            req_insert.migrated_request = true;

            Sim.Dram_Utilization_size = Sim.Dram_Utilization_size + 1;

            reqs.Enqueue(req_insert);
            data[temp][victim].valid = true;
//            data[temp][victim].addr = req_insert.block_addr >> Config.proc.page_block_diff_bits;
//new dram mapping
            data[temp][victim].addr = req.block_addr >> Config.proc.page_block_diff_bits;
//end new dram mapping

            data[temp][victim].access = cycles;
//            data[temp][victim].dirty = false;
            data[temp][victim].block_dirty[temp1] = false;
//            victim.req = req_insert;
            data[temp][victim].pid = req_insert.pid;
            data[temp][victim].block_valid[temp1] = true;
//**************************************************************************************
            return(true);
        }