Ejemplo n.º 1
0
        /*
         * public void receivePacket(MemoryPacket p)
         * {
         *  Simulator.Ready cb;
         *
         *  //receive WB or request from memory
         *  if(p.type == MemoryRequestType.RD)
         *  {
         *      cb = delegate()
         *          {
         *              MemoryPacket mp = new MemoryPacket(
         *                  p.request, p.block,
         *                  MemoryRequestType.DAT, p.dest, p.src);
         *
         *              node.queuePacket(mp);
         *          };
         *  }
         *  else
         *  {
         *      // WB don't need a callback
         *      cb = delegate(){};
         *  }
         *
         *  access(p.request, cb);
         * }
         */

        public void access(Request req, Simulator.Ready cb)
        {
            MemoryRequest mreq = new MemoryRequest(req, cb);

            sched.issue_req(mreq);
            bank[mreq.b_index].outstandingReqs_perapp[req.requesterID]++;
            bank[mreq.b_index].outstandingReqs++;
        }
Ejemplo n.º 2
0
 public CachePacket(int reqNode, int from, int to, int flits, int _class, int _vcclass, Simulator.Ready _cb)
     : base(null, 0, flits, new Coord(from), new Coord(to))
 {
     cb          = _cb;
     m_class     = _class;
     m_VCclass   = mapClass(_vcclass);
     requesterID = reqNode;
 }
Ejemplo n.º 3
0
		public CachePacket(int reqNode, int from, int to, int flits, int _class, int _vcclass, Simulator.Ready _cb, CmpCache_Txn txn, bool critical)
			: base(null, 0, flits, new Coord(from), new Coord(to), txn, critical)
        {
            cb = _cb;
            m_class = _class;
            m_VCclass = mapClass(_vcclass);   // assign network VC class here.
            requesterID = reqNode;
        }
Ejemplo n.º 4
0
        void access_mem(int requestor, ulong addr, bool write, Simulator.Ready cb)
        {
            Request req = new Request(requestor, addr, write);

            int node = map_addr_mem(requestor, addr);

            Simulator.network.nodes[node].mem.access(req, cb);
        }
Ejemplo n.º 5
0
 public CachePacket(int reqNode, int from, int to, int flits, int _class, int _vcclass, Simulator.Ready _cb)
     : base(null, 0, flits, new Coord(from), new Coord(to))
 {
     cb = _cb;
     m_class = _class;
     m_VCclass = mapClass(_vcclass);
     requesterID = reqNode;
 }
Ejemplo n.º 6
0
        void send_noc(int reqNode, int from, int to, int flits, Simulator.Ready cb, bool off_crit, int vc)
        {
            int cl = off_crit ? 2 : // packet class (used for split queues): 0 = ctl, 1 = data, 2 = off-crit (writebacks)
                     (flits > 1 ? 1 : 0);

            CachePacket p = new CachePacket(reqNode, from, to, flits, cl, vc, cb);

            Simulator.network.nodes[from].queuePacket(p);
        }
Ejemplo n.º 7
0
 // TODO: Rachata, here is the problem, all the cachepacket lost anything related to GPU if we need to use the shared CPU-GPU cache
 public CachePacket(int reqNode, int from, int to, int flits, int _class, int _vcclass, Simulator.Ready _cb, bool is_GPU)
     : base(null, 0, flits, new Coord(from), new Coord(to))
 {
     cb          = _cb;
     m_class     = _class;
     m_VCclass   = mapClass(_vcclass);
     requesterID = reqNode;
     //cache requests -- TODO: This is a hack, but once we start using a shared CPU-GPU cache then this need to be fixed
     this.from_GPU = is_GPU;
 }
Ejemplo n.º 8
0
 // TODO: Rachata, here is the problem, all the cachepacket lost anything related to GPU if we need to use the shared CPU-GPU cache
 public CachePacket(int reqNode, int from, int to, int flits, int _class, int _vcclass, Simulator.Ready _cb, bool is_GPU)
     : base(null, 0, flits, new Coord(from), new Coord(to))
 {
     cb = _cb;
     m_class = _class;
     m_VCclass = mapClass(_vcclass);
     requesterID = reqNode;
     //cache requests -- TODO: This is a hack, but once we start using a shared CPU-GPU cache then this need to be fixed
     this.from_GPU = is_GPU;
 }
Ejemplo n.º 9
0
        public MemoryRequest(Request req, Simulator.Ready cb)
        {
            this.cb          = cb;
            request          = req;
            req.beenToMemory = true;

            mapAddr(req.blockAddress, out m_index, out b_index, out r_index, out glob_b_index);

            //scheduling related
            //sched = Config.memory.mem[m_index].sched;
            sched    = null;
            isMarked = false;
        }
Ejemplo n.º 10
0
        public MemoryRequest(Request req, Simulator.Ready cb)
        {
            this.cb          = cb;
            request          = req;
            isWrite          = req.write;
            req.beenToMemory = true;

//            mapAddr(req.blockAddress, out shift_row, out mem_index, out channel_index,
//                    out rank_index, out bank_index, out row_index);
            mapAddr(req.requesterID, req.blockAddress, out shift_row, out mem_index, out channel_index,
                    out rank_index, out bank_index, out row_index);

//	    Console.WriteLine("Address:{0:x}, shift_row:{1:x}", req.address, shift_row );

            //scheduling related
            isMarked = false;

            /* HWA CODE */ // Bug Fix??
            this.from_GPU = req.from_GPU;
        }
Ejemplo n.º 11
0
        // Called by CPU.cs to issue a request to the MemoryCoalescing
        // This just places the request in the appropriate client queue
        // This cannot be used when we model the network
        public void issueReq(int targetID, Request req, Simulator.Ready cb)
        {
//            Console.WriteLine("In MemoryCoalescing, issueReq is called requester {0}, addr = {1} at cycle {2}", req.requesterID, req.address, Simulator.CurrentRound);
            MemoryRequest mreq = new MemoryRequest(req, cb);

            mreq.from_GPU = true;
            //Console.WriteLine("Get a GPU Request {0}", req.from_GPU);
            int c = (int)req.client;
            int w = req.write?1:0;

            if (Config.useMemoryCoalescing)
            {
//            Console.WriteLine("In MemoryCoalescing, enqueue to the client queue requester {0}, addr = {1} at cycle {2}", req.requesterID, req.address, Simulator.CurrentRound);
                clientQueue[c, w].Enqueue(new Tuple3(targetID, Simulator.CurrentRound, mreq));
            }
            else
            {
                bool l1hit = false, l1upgr = false, l1ev = false, l1wb = false;
                bool l2access = false, l2hit = false, l2ev = false, l2wb = false, c2c = false;
                Simulator.network.cache.access(req.requesterID, req.address, req.write, cb, out l1hit, out l1upgr, out l1ev, out l1wb, out l2access, out l2hit, out l2ev, out l2wb, out c2c);
            }
        }
Ejemplo n.º 12
0
        // The main scheduling function.  Walk the client queues and decide what to send
        // onward to the main (UNB) memory controller
        public void doStep()
        {
            int  winnerClient = -1;
            int  winnerRead   = -1;
            int  bestWeight   = 0;
            bool seenUrgent   = false;
            bool stillHitting = false;

            // 1. check to see if anyone has reached their urgency limit, if so, issue the oldest
            for (int c = 0; c < numClients; c++)
            {
                for (int rw = 0; rw < 2; rw++)
                {
                    if (clientQueue[c, rw].Count > 0)
                    {
                        Tuple3        t    = clientQueue[c, rw].Peek();
                        int           dest = t.Item1;
                        MemoryRequest mreq = t.Item3;

                        if (BankAvailable(mreq) && Simulator.network.nodes[dest].mem.RequestEnqueueable(mreq))
                        {
                            int age = (int)(Simulator.CurrentRound - t.Item2);
                            if (age > urgentThreshold)
                            {
                                // if urgent, choose oldest
                                if (age > bestWeight)
                                {
                                    bestWeight   = age;
                                    winnerClient = c;
                                    winnerRead   = rw;
                                    seenUrgent   = true;
                                }
                            }
                        }
                    }
                }
            }

            if (!seenUrgent) // nothing urgent
            {
                // TODO: update this to check that requests still hitting, although could potentially
                // be in a different bank (i.e., currentRow need not be same).  Need rough model of
                // what pages are open.  XXX: doesn't seem like MemoryCoalescing does this though

                // 2. else check to see if current client still has requests to same row
                //    if so and we haven't hit the streak limit, issue the request
                if (clientQueue[currentClient, currentRead].Count > 0)
                {
                    Tuple3        t    = clientQueue[currentClient, currentRead].Peek();
                    int           dest = t.Item1;
                    MemoryRequest mreq = t.Item3;
                    if (BankAvailable(mreq) &&
                        (mreq.shift_row == currentRow[mreq.channel_index, mreq.rank_index, mreq.bank_index]) &&
                        (consecutiveRequests < streakMax) &&
                        Simulator.network.nodes[dest].mem.RequestEnqueueable(mreq)) // still hitting same page
                    {
                        winnerClient = currentClient;
                        winnerRead   = currentRead;
                        stillHitting = true;
                    }
                }

                if (consecutiveRequests >= streakMax)
                {
                    consecutiveRequests = 0;
                }

                // 3. if we haven't crossed the read/write switch threshold, scan the clients
                //    to see which queue to use next (else switch read/write and do same)
                if (!stillHitting)
                {
                    bool allLazy = true;

                    // This is messy: if we haven't passed the rwSwitchThreshold, then we
                    // only consider the current read/write mode (e.g., if we're reading,
                    // we don't consider any writes).  The least two bits of rwSet determine
                    // which we consider: in binary, ...000wr.  If below threshold, we AND
                    // out the other bit.
                    int rwSet = 3; // by default, bit mask set to consider both reads and writes
                    if ((Simulator.CurrentRound - whenSwitchReadWrite) < (ulong)rwSwitchThreshold)
                    {
                        rwSet &= ~(1 << (1 - currentRead));
                    }

                    for (int rw = 0; rw < 2; rw++)
                    {
                        if (((rwSet >> rw) & 1) != 0)
                        {
                            for (int c = 0; c < numClients; c++)
                            {
                                if (clientQueue[c, rw].Count > 0)
                                {
                                    Tuple3        t      = clientQueue[c, rw].Peek();
                                    int           dest   = t.Item1;
                                    int           age    = (int)(Simulator.CurrentRound - t.Item2);
                                    MemoryRequest mreq   = t.Item3;
                                    int           weight = ComputeWeight(age, clientQueue[c, rw].Count);
                                    bool          lazy   = age < lazyThreshold;

                                    if (BankAvailable(mreq) &&
                                        Simulator.network.nodes[dest].mem.RequestEnqueueable(mreq))
                                    {
                                        if (!lazy && allLazy)
                                        {
                                            winnerClient = c;
                                            winnerRead   = rw;
                                            allLazy      = false;
                                        }
                                        else if (lazy && !allLazy)
                                        {
                                            // if this is lazy but a non-lazy requests has been seen, skip
                                        }
                                        else if (weight > bestWeight)
                                        {
                                            winnerClient = c;
                                            winnerRead   = rw;
                                            bestWeight   = weight;
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }

//            Console.WriteLine("In MemoryCoalescing Dostep, winner client is {0} at cyc {1}",winnerClient,  Simulator.CurrentRound);
            if (winnerClient != -1)
            {
                Tuple3        t    = clientQueue[winnerClient, winnerRead].Peek();
                int           dest = t.Item1;
                MemoryRequest mreq = t.Item3;

                Simulator.Ready prev_cb = mreq.cb;
                mreq.cb = delegate() {
                    numInFlightRequests[winnerClient, winnerRead]--;
                    prev_cb();
                };

                clientQueue[winnerClient, winnerRead].Dequeue();
                numInFlightRequests[winnerClient, winnerRead]++;

                uint combineMask = GetCombineBit(mreq);

                // if next request is combinable, grab that, too.
                int numCombos = 1;
                int comboMax  = Config.memory.MemoryCoalescingComboMax;
                if (clientQueue[winnerClient, winnerRead].Count > 0)
                {
                    Tuple3 n = clientQueue[winnerClient, winnerRead].Peek();
                    while (Combinable(mreq.request.address, n.Item3.request.address))
                    {
                        WrapDelegates(mreq, n.Item3);

                        combineMask |= GetCombineBit(mreq);
                        Simulator.stats.MemoryCoalescingNumCombinedRequests.Add();

                        clientQueue[winnerClient, winnerRead].Dequeue();
                        numCombos++;
                        if (numCombos >= comboMax) // limit number of requests that can be combined
                        {
                            break;
                        }

                        if (clientQueue[winnerClient, winnerRead].Count > 0)
                        {
                            n = clientQueue[winnerClient, winnerRead].Peek();
                        }
                        else
                        {
                            break;
                        }
                    }
                }

//                Console.WriteLine("In MemoryCoalescing, issueing request, access to NoCs (through CmpCache module requester {0}, addr = {1} at cycle {2}", mreq.request.requesterID, mreq.request.address, Simulator.CurrentRound);
                // edited for NoCs CmpCache
                bool l1hit = false, l1upgr = false, l1ev = false, l1wb = false;
                bool l2access = false, l2hit = false, l2ev = false, l2wb = false, c2c = false;
                Simulator.network.cache.access(mreq.request.requesterID, mreq.request.address, mreq.request.write, mreq.cb, out l1hit, out l1upgr, out l1ev, out l1wb, out l2access, out l2hit, out l2ev, out l2wb, out c2c);
//                Console.WriteLine("In MemoryCoalescing, accessed to NoCs (through CmpCache module requester {0}, addr = {1} at cycle {2}", mreq.request.requesterID, mreq.request.address, Simulator.CurrentRound);

                if ((winnerClient == currentClient) && (winnerRead == currentRead))
                {
                    consecutiveRequests++;
                }
                if (currentRead != winnerRead)
                {
                    whenSwitchReadWrite = Simulator.CurrentRound;
                }

                mreq.mem_size = ConvertMaskToSize(combineMask);

                uint burstLength = mreq.mem_size / Config.memory.busWidth / 2;
                if (currentRow[mreq.channel_index, mreq.rank_index, mreq.bank_index] == mreq.shift_row) // RB hit
                {
                    whenAvailable[mreq.channel_index, mreq.rank_index, mreq.bank_index] = Simulator.CurrentRound + burstLength;
                }
                else
                {
                    whenAvailable[mreq.channel_index, mreq.rank_index, mreq.bank_index] = Simulator.CurrentRound + Config.memory.cRP + Config.memory.cRCD;
                }
                currentRow[mreq.channel_index, mreq.rank_index, mreq.bank_index] = mreq.shift_row;
                currentClient = winnerClient;
                currentRead   = winnerRead;
            }

            int num = 0;

            for (int c = 0; c < numClients; c++)
            {
                for (int rw = 0; rw < 2; rw++)
                {
                    num += numInFlightRequests[c, rw];
                }
            }
        }
Ejemplo n.º 13
0
        public void access(int node, ulong addr, bool write, Simulator.Ready cb,
                           out bool L1hit, out bool L1upgr, out bool L1ev, out bool L1wb,
                           out bool L2access, out bool L2hit, out bool L2ev, out bool L2wb, out bool c2c)
        {
            CmpCache_Txn txn      = null;
            int          sh_slice = map_addr(node, addr);

            // ------------- first, we probe the cache (private, and shared if necessary) to
            //               determine current state.

            // probe private cache
            CmpCache_State state;
            bool           prv_state;
            bool           prv_hit = m_prv[node].probe(addr, out prv_state);

            bool sh_hit = false;

            if (m_sh_perfect)
            {
                ulong blk = addr >> m_blkshift;
                sh_hit = true;
                if (m_perf_sh.ContainsKey(blk))
                {
                    state = m_perf_sh[blk];
                }
                else
                {
                    state          = new CmpCache_State();
                    m_perf_sh[blk] = state;
                }
            }
            else
            {
                sh_hit = m_sh.probe(addr, out state);
            }

            bool prv_excl = sh_hit ? (state.excl == node) : false;

            if (prv_hit)
            {
                // we always update the timestamp on the private cache
                m_prv[node].update(addr, Simulator.CurrentRound);
            }

            // out-params
            L1hit    = prv_hit;
            L1upgr   = L1hit && !prv_excl;
            L2hit    = sh_hit;
            c2c      = false; // will be set below for appropriate cases
            L1ev     = false; // will be set below
            L1wb     = false; // will be set below
            L2ev     = false; // will be set below
            L2wb     = false; // will be set below
            L2access = false; // will be set below

            // ----------------- now, we execute one of four cases:
            //                   1a. present in private cache, with appropriate ownership.
            //                   1b. present in private cache, but not excl (for a write)
            //                   2. not present in private cache, but in shared cache.
            //                   3. not present in private or shared cache.
            //
            // in each case, we update functional state and generate the packet DAG as we go.

            if (prv_hit && (!write || prv_excl)) // CASE 1a: present in prv cache, have excl if write
            {
                // just set modified-bit in state, then we're done (no protocol interaction)
                if (write)
                {
                    state.modified = true;
                }
            }
            else if (prv_hit && write && !prv_excl) // CASE 1b: present in prv cache, need upgr
            {
                txn      = new CmpCache_Txn();
                txn.node = node;

                // request packet
                CmpCache_Pkt req_pkt  = add_ctl_pkt(txn, node, sh_slice, false);
                CmpCache_Pkt done_pkt = null;

                // present in others?
                if (state.owners.others_set(node))
                {
                    done_pkt = do_inval(txn, state, req_pkt, node, addr);
                }
                else
                {
                    // not present in others, but we didn't have excl -- send empty grant
                    // (could happen if others have evicted and we are the only one left)
                    done_pkt       = add_ctl_pkt(txn, sh_slice, node, true);
                    done_pkt.delay = m_shdelay;
                    add_dep(req_pkt, done_pkt);
                }

                state.owners.reset();
                state.owners.set(node);
                state.excl     = node;
                state.modified = true;
            }
            else if (!prv_hit && sh_hit) // CASE 2: not in prv cache, but in sh cache
            {
                txn      = new CmpCache_Txn();
                txn.node = node;

                // update functional shared state
                if (!m_sh_perfect)
                {
                    m_sh.update(addr, Simulator.CurrentRound);
                }

                // request packet
                CmpCache_Pkt req_pkt  = add_ctl_pkt(txn, node, sh_slice, false);
                CmpCache_Pkt done_pkt = null;

                if (state.owners.any_set())   // in other caches?
                {
                    if (write)                // need to invalidate?
                    {
                        if (state.excl != -1) // someone else has exclusive -- c-to-c xfer
                        {
                            c2c = true;       // out-param

                            CmpCache_Pkt xfer_req = add_ctl_pkt(txn, sh_slice, state.excl, false);
                            CmpCache_Pkt xfer_dat = add_data_pkt(txn, state.excl, node, true);
                            done_pkt = xfer_dat;

                            xfer_req.delay = m_shdelay;
                            xfer_dat.delay = m_prvdelay;

                            add_dep(req_pkt, xfer_req);
                            add_dep(xfer_req, xfer_dat);

                            bool evicted_state;
                            m_prv[state.excl].inval(addr, out evicted_state);
                        }
                        else // others have it -- inval to all, c-to-c from closest
                        {
                            int close = closest(node, state.owners);
                            if (close != -1)
                            {
                                c2c = true;              // out-param
                            }
                            done_pkt = do_inval(txn, state, req_pkt, node, addr, close);
                        }

                        // for a write, we need exclusive -- update state
                        state.owners.reset();
                        state.owners.set(node);
                        state.excl     = node;
                        state.modified = true;
                    }
                    else // just a read -- joining sharer set, c-to-c from closest
                    {
                        if (state.excl != -1)
                        {
                            CmpCache_Pkt xfer_req = add_ctl_pkt(txn, sh_slice, state.excl, false);
                            CmpCache_Pkt xfer_dat = add_data_pkt(txn, state.excl, node, true);
                            done_pkt = xfer_dat;

                            c2c = true; // out-param

                            xfer_req.delay = m_shdelay;
                            xfer_dat.delay = m_prvdelay;

                            add_dep(req_pkt, xfer_req);
                            add_dep(xfer_req, xfer_dat);

                            // downgrade must also trigger writeback
                            if (state.modified)
                            {
                                CmpCache_Pkt wb_dat = add_data_pkt(txn, state.excl, sh_slice, false);
                                add_dep(xfer_req, wb_dat);
                                state.modified = false;
                                state.sh_dirty = true;
                            }
                        }
                        else
                        {
                            int close = closest(node, state.owners);
                            if (close != -1)
                            {
                                c2c = true;              // out-param
                            }
                            CmpCache_Pkt xfer_req = add_ctl_pkt(txn, sh_slice, close, false);
                            CmpCache_Pkt xfer_dat = add_data_pkt(txn, close, node, true);
                            done_pkt = xfer_dat;

                            xfer_req.delay = m_shdelay;
                            xfer_dat.delay = m_prvdelay;

                            add_dep(req_pkt, xfer_req);
                            add_dep(xfer_req, xfer_dat);
                        }

                        state.owners.set(node);
                        state.excl = -1;
                    }
                }
                else
                {
                    // not in other prv caches, need to get from shared slice
                    L2access = true;

                    CmpCache_Pkt dat_resp = add_data_pkt(txn, sh_slice, node, true);
                    done_pkt = dat_resp;

                    add_dep(req_pkt, done_pkt);

                    dat_resp.delay = m_shdelay;

                    state.owners.reset();
                    state.owners.set(node);
                    state.excl     = node;
                    state.modified = write;
                }

                // insert into private cache, get evicted block (if any)
                ulong evict_addr;
                bool  evict_data;
                bool  evicted = m_prv[node].insert(addr, true, out evict_addr, out evict_data, Simulator.CurrentRound);

                // add either a writeback or a release packet
                if (evicted)
                {
                    L1ev = true;
                    do_evict(txn, done_pkt, node, evict_addr, out L1wb);
                }
            }
            else if (!prv_hit && !sh_hit) // CASE 3: not in prv or shared cache
            {
                // here, we need to go to memory
                Debug.Assert(!m_sh_perfect);

                txn      = new CmpCache_Txn();
                txn.node = node;

                L2access = true;

                // request packet
                CmpCache_Pkt req_pkt = add_ctl_pkt(txn, node, sh_slice, false);

                // cache response packet
                CmpCache_Pkt resp_pkt = add_data_pkt(txn, sh_slice, node, true);
                resp_pkt.delay = m_opdelay; // req already active -- just a pass-through op delay here

                // memory request packet
                int          mem_slice  = map_addr_mem(node, addr);
                CmpCache_Pkt memreq_pkt = add_ctl_pkt(txn, sh_slice, mem_slice, false);
                memreq_pkt.delay = m_shdelay;

                // memory-access virtual node
                CmpCache_Pkt mem_access = add_ctl_pkt(txn, 0, 0, false);
                mem_access.send          = false;
                mem_access.mem           = true;
                mem_access.mem_addr      = addr;
                mem_access.mem_write     = false; // cache-line fill
                mem_access.mem_requestor = node;

                // memory response packet
                CmpCache_Pkt memresp_pkt = add_data_pkt(txn, mem_slice, sh_slice, false);

                // connect up the critical path first
                add_dep(req_pkt, memreq_pkt);
                add_dep(memreq_pkt, mem_access);
                add_dep(mem_access, memresp_pkt);
                add_dep(memresp_pkt, resp_pkt);

                // now, handle replacement in the shared cache...
                CmpCache_State new_state = new CmpCache_State();

                new_state.owners.reset();
                new_state.owners.set(node);
                new_state.excl     = node;
                new_state.modified = write;
                new_state.sh_dirty = false;

                ulong          sh_evicted_addr;
                CmpCache_State sh_evicted_state;
                bool           evicted = m_sh.insert(addr, new_state, out sh_evicted_addr, out sh_evicted_state, Simulator.CurrentRound);

                if (evicted)
                {
                    // shared-cache eviction (different from the private-cache evictions elsewhere):
                    // we must evict any private-cache copies, because we model an inclusive hierarchy.

                    L2ev = true;

                    CmpCache_Pkt prv_evict_join = add_joinpt(txn, false);

                    if (sh_evicted_state.excl != -1) // evicted block lives only in one prv cache
                    {
                        // invalidate request to prv cache before sh cache does eviction
                        CmpCache_Pkt prv_invl = add_ctl_pkt(txn, sh_slice, sh_evicted_state.excl, false);
                        add_dep(memresp_pkt, prv_invl);
                        CmpCache_Pkt prv_wb;

                        prv_invl.delay = m_opdelay;

                        if (sh_evicted_state.modified)
                        {
                            // writeback
                            prv_wb       = add_data_pkt(txn, sh_evicted_state.excl, sh_slice, false);
                            prv_wb.delay = m_prvdelay;
                            sh_evicted_state.sh_dirty = true;
                        }
                        else
                        {
                            // simple ACK
                            prv_wb       = add_ctl_pkt(txn, sh_evicted_state.excl, sh_slice, false);
                            prv_wb.delay = m_prvdelay;
                        }

                        add_dep(prv_invl, prv_wb);
                        add_dep(prv_wb, prv_evict_join);

                        bool prv_evicted_dat;
                        m_prv[sh_evicted_state.excl].inval(sh_evicted_addr, out prv_evicted_dat);
                    }
                    else if (sh_evicted_state.owners.any_set()) // evicted block has greater-than-one sharer set
                    {
                        for (int i = 0; i < m_N; i++)
                        {
                            if (sh_evicted_state.owners.is_set(i))
                            {
                                CmpCache_Pkt prv_invl = add_ctl_pkt(txn, sh_slice, i, false);
                                CmpCache_Pkt prv_ack  = add_ctl_pkt(txn, i, sh_slice, false);

                                prv_invl.delay = m_opdelay;
                                prv_ack.delay  = m_prvdelay;

                                add_dep(memresp_pkt, prv_invl);
                                add_dep(prv_invl, prv_ack);
                                add_dep(prv_ack, prv_evict_join);

                                bool prv_evicted_dat;
                                m_prv[i].inval(sh_evicted_addr, out prv_evicted_dat);
                            }
                        }
                    }
                    else // evicted block has no owners (was only in shared cache)
                    {
                        add_dep(memresp_pkt, prv_evict_join);
                    }

                    // now writeback to memory, if we were dirty
                    if (sh_evicted_state.sh_dirty)
                    {
                        CmpCache_Pkt mem_wb = add_data_pkt(txn, sh_slice, mem_slice, false);
                        mem_wb.delay = m_opdelay;
                        add_dep(prv_evict_join, mem_wb);
                        CmpCache_Pkt mem_wb_op = add_ctl_pkt(txn, 0, 0, false);
                        mem_wb_op.send          = false;
                        mem_wb_op.mem           = true;
                        mem_wb_op.mem_addr      = sh_evicted_addr;
                        mem_wb_op.mem_write     = true;
                        mem_wb_op.mem_requestor = node;
                        add_dep(mem_wb, mem_wb_op);
                        L2wb = true;
                    }
                }

                // ...and insert and handle replacement in the private cache
                ulong evict_addr;
                bool  evict_data;
                bool  prv_evicted = m_prv[node].insert(addr, true, out evict_addr, out evict_data, Simulator.CurrentRound);

                // add either a writeback or a release packet
                if (prv_evicted)
                {
                    L1ev = true;
                    do_evict(txn, resp_pkt, node, evict_addr, out L1wb);
                }
            }
            else // shouldn't happen.
            {
                Debug.Assert(false);
            }

            // now start the transaction, if one was needed
            if (txn != null)
            {
                txn.cb = cb;

                assignVCclasses(txn.pkts);

                // start running the protocol DAG. It may be an empty graph (for a silent upgr), in
                // which case the deferred start (after cache delay)
                Simulator.Defer(delegate()
                {
                    start_pkts(txn);
                }, Simulator.CurrentRound + m_prvdelay);
            }
            // no transaction -- just the cache access delay. schedule deferred callback.
            else
            {
                Simulator.Defer(cb, Simulator.CurrentRound + m_prvdelay);
            }
        }
Ejemplo n.º 14
0
 public static void Defer(Simulator.Ready cb, ulong cyc)
 {
     m_deferQueue.Enqueue(cb, cyc);
 }
Ejemplo n.º 15
0
        public MemoryRequest(Request req, Simulator.Ready cb)
        {
            this.cb = cb;
            request = req;
            isWrite = req.write;
            req.beenToMemory = true;

//            mapAddr(req.blockAddress, out shift_row, out mem_index, out channel_index,
//                    out rank_index, out bank_index, out row_index);
            mapAddr(req.requesterID, req.blockAddress, out shift_row, out mem_index, out channel_index,
                    out rank_index, out bank_index, out row_index);

//	    Console.WriteLine("Address:{0:x}, shift_row:{1:x}", req.address, shift_row );

            //scheduling related
            isMarked = false;
	    
	    /* HWA CODE */ // Bug Fix??
	    this.from_GPU = req.from_GPU;
        }
Ejemplo n.º 16
0
        public void access(int node, ulong addr, bool write, Simulator.Ready cb,
                           out bool L1hit, out bool L1upgr, out bool L1ev, out bool L1wb,
                           out bool L2access, out bool L2hit, out bool L2ev, out bool L2wb, out bool c2c)
        {
            CmpCache_Txn txn      = null;
            int          sh_slice = map_addr(node, addr);
            bool         sh_hit   = false;
            // probe private cache
            CmpCache_State state = new CmpCache_State();
            bool           prv_state;
            bool           prv_hit = m_prv[node].probe(addr, out prv_state);

            // -- GPU - TODO: need to fix this
            if (Simulator.network.nodes[node].cpu.is_GPU())
            {
                prv_hit = false;
                sh_hit  = false;
            }
            /* HWA CODE */
            else if (Simulator.network.nodes[node].cpu.is_HWA() ||
                     Config.is_through_all_cache)
            {
                prv_hit = false;
                sh_hit  = false;
            }
            /* HWA CODE END */
            else
            {
                // ------------- first, we probe the cache (private, and shared if necessary) to
                //               determine current state.

                if (m_sh_perfect)
                {
                    ulong blk = addr >> m_blkshift;
                    sh_hit = true;
                    if (m_perf_sh.ContainsKey(blk))
                    {
                        state = m_perf_sh[blk];
                    }
                    else
                    {
                        m_perf_sh[blk] = state;
                    }
                }
                else
                {
                    sh_hit = m_sh.probe(addr, out state);
                }

                /////// TODO: buggy code. Fix (remove) this!!

                /*Hashtable pHT = Simulator.network.nodes[node].cpu.addr_l2m;
                 * if (pHT.ContainsKey(addr))
                 * {
                 *  //if ((bool)pHT[addr] != sh_hit)
                 *  //   throw new Exception(String.Format("Inconsistent value for L2 status. stored {0} addr {1}",pHT[addr],addr));
                 *  pHT.Remove(addr);
                 *  pHT.Add(addr,sh_hit);
                 * }
                 * else
                 *  pHT.Add(addr,sh_hit);*/
                ///////
            }
            bool prv_excl = sh_hit ? (state.excl == node) : false;

            if (prv_hit)
            {
                // we always update the timestamp on the private cache
                m_prv[node].update(addr, Simulator.CurrentRound);
            }

            // out-params
            L1hit    = prv_hit;
            L1upgr   = L1hit && !prv_excl;
            L2hit    = sh_hit;
            c2c      = false; // will be set below for appropriate cases
            L1ev     = false; // will be set below
            L1wb     = false; // will be set below
            L2ev     = false; // will be set below
            L2wb     = false; // will be set below
            L2access = false; // will be set below

            // ----------------- now, we execute one of four cases:
            //                   1a. present in private cache, with appropriate ownership.
            //                   1b. present in private cache, but not excl (for a write)
            //                   2. not present in private cache, but in shared cache.
            //                   3. not present in private or shared cache.
            //
            // in each case, we update functional state and generate the packet DAG as we go.

            if (prv_hit && (!write || prv_excl)) // CASE 1a: present in prv cache, have excl if write
            {
                // just set modified-bit in state, then we're done (no protocol interaction)
                if (write)
                {
                    state.modified = true;
                }
            }
            else if (prv_hit && write && !prv_excl) // CASE 1b: present in prv cache, need upgr
            {
                txn      = new CmpCache_Txn();
                txn.node = node;

                // request packet
                CmpCache_Pkt req_pkt  = add_ctl_pkt(txn, node, sh_slice, false, false);
                CmpCache_Pkt done_pkt = null;

                // present in others?
                if (state.owners.others_set(node))
                {
                    done_pkt = do_inval(txn, state, req_pkt, node, addr);
                }
                else
                {
                    // not present in others, but we didn't have excl -- send empty grant
                    // (could happen if others have evicted and we are the only one left)
                    done_pkt       = add_ctl_pkt(txn, sh_slice, node, true, false);
                    done_pkt.delay = m_shdelay;
                    add_dep(req_pkt, done_pkt);
                }

                state.owners.reset();
                state.owners.set(node);
                state.excl     = node;
                state.modified = true;
            }
            else if (!prv_hit && sh_hit) // CASE 2: not in prv cache, but in sh cache
            {
                txn      = new CmpCache_Txn();
                txn.node = node;

                // update functional shared state
                if (!m_sh_perfect)
                {
                    m_sh.update(addr, Simulator.CurrentRound);
                }

                // request packet
                CmpCache_Pkt req_pkt  = add_ctl_pkt(txn, node, sh_slice, false, false);
                CmpCache_Pkt done_pkt = null;

                if (state.owners.any_set())   // in other caches?
                {
                    if (write)                // need to invalidate?
                    {
                        if (state.excl != -1) // someone else has exclusive -- c-to-c xfer
                        {
                            c2c = true;       // out-param

                            CmpCache_Pkt xfer_req = add_ctl_pkt(txn, sh_slice, state.excl, false, false);
                            CmpCache_Pkt xfer_dat = add_data_pkt(txn, state.excl, node, true, false);
                            done_pkt = xfer_dat;

                            xfer_req.delay = m_shdelay;
                            xfer_dat.delay = m_prvdelay;

                            add_dep(req_pkt, xfer_req);
                            add_dep(xfer_req, xfer_dat);

                            bool evicted_state;
                            m_prv[state.excl].inval(addr, out evicted_state);
                        }
                        else // others have it -- inval to all, c-to-c from closest
                        {
                            int close = closest(node, state.owners);
                            if (close != -1)
                            {
                                c2c = true;              // out-param
                            }
                            done_pkt = do_inval(txn, state, req_pkt, node, addr, close);
                        }

                        // for a write, we need exclusive -- update state
                        state.owners.reset();
                        state.owners.set(node);
                        state.excl     = node;
                        state.modified = true;
                    }
                    else // just a read -- joining sharer set, c-to-c from closest
                    {
                        if (state.excl != -1)
                        {
                            CmpCache_Pkt xfer_req = add_ctl_pkt(txn, sh_slice, state.excl, false, false);
                            CmpCache_Pkt xfer_dat = add_data_pkt(txn, state.excl, node, true, false);
                            done_pkt = xfer_dat;

                            c2c = true; // out-param

                            xfer_req.delay = m_shdelay;
                            xfer_dat.delay = m_prvdelay;

                            add_dep(req_pkt, xfer_req);
                            add_dep(xfer_req, xfer_dat);

                            // downgrade must also trigger writeback
                            if (state.modified)
                            {
                                CmpCache_Pkt wb_dat = add_data_pkt(txn, state.excl, sh_slice, false, false);
                                add_dep(xfer_req, wb_dat);
                                state.modified = false;
                                state.sh_dirty = true;
                            }
                        }
                        else
                        {
                            int close = closest(node, state.owners);
                            if (close != -1)
                            {
                                c2c = true;              // out-param
                            }
                            CmpCache_Pkt xfer_req = add_ctl_pkt(txn, sh_slice, close, false, false);
                            CmpCache_Pkt xfer_dat = add_data_pkt(txn, close, node, true, false);
                            done_pkt = xfer_dat;

                            xfer_req.delay = m_shdelay;
                            xfer_dat.delay = m_prvdelay;

                            add_dep(req_pkt, xfer_req);
                            add_dep(xfer_req, xfer_dat);
                        }

                        state.owners.set(node);
                        state.excl = -1;
                    }
                }
                else
                {
                    // not in other prv caches, need to get from shared slice
                    L2access = true;

                    CmpCache_Pkt dat_resp = add_data_pkt(txn, sh_slice, node, true, false);
                    done_pkt = dat_resp;

                    add_dep(req_pkt, done_pkt);

                    dat_resp.delay = m_shdelay;

                    state.owners.reset();
                    state.owners.set(node);
                    state.excl     = node;
                    state.modified = write;
                }

                // insert into private cache, get evicted block (if any)
                ulong evict_addr;
                bool  evict_data;
                bool  evicted = m_prv[node].insert(addr, true, out evict_addr, out evict_data, Simulator.CurrentRound);

                // add either a writeback or a release packet
                if (evicted)
                {
                    L1ev = true;
                    do_evict(txn, done_pkt, node, evict_addr, out L1wb);
                }
            }
            else if (!prv_hit && !sh_hit) // CASE 3: not in prv or shared cache
            {
                // here, we need to go to memory
                Debug.Assert(!m_sh_perfect);

                txn      = new CmpCache_Txn();
                txn.node = node;

                /* HWA CODE */
                // HWA does not access private and shared caches, sends a packet to the memory controller directly
                if ((Simulator.network.nodes[node].cpu.is_HWA()) ||
                    (Config.is_through_all_cache))
                {
                    int mem_slice = map_addr_mem(node, addr);
                    if (write)
                    {
                        CmpCache_Pkt mem_wr = add_data_pkt(txn, node, mem_slice, false, false);
                        mem_wr.delay = 0; // Is it right??
                        // Send virtual node
                        CmpCache_Pkt mem_wr_op = add_ctl_pkt(txn, 0, 0, true, false);
                        mem_wr_op.send          = false;
                        mem_wr_op.mem           = true;
                        mem_wr_op.mem_addr      = addr;
                        mem_wr_op.mem_write     = true;
                        mem_wr_op.mem_requestor = node;
                        add_dep(mem_wr, mem_wr_op); // When write, HWA does not wait for response packet from memory
                    }
                    else
                    {
                        CmpCache_Pkt memreq_pkt = add_ctl_pkt(txn, node, mem_slice, false, false);
                        memreq_pkt.delay = 0; // Is it right??
                        CmpCache_Pkt mem_access = add_ctl_pkt(txn, 0, 0, false, false);
                        mem_access.send          = false;
                        mem_access.mem           = true;
                        mem_access.mem_addr      = addr;
                        mem_access.mem_write     = false;
                        mem_access.mem_requestor = node;
                        // When read, HWA waits for response packet from memory
                        CmpCache_Pkt memresp_pkt = add_data_pkt(txn, mem_slice, node, true, false);
                        memresp_pkt.delay = 0;

                        add_dep(memreq_pkt, mem_access);
                        add_dep(mem_access, memresp_pkt);
                    }
                }
                else
                {
                    /* HWA CODE END */

                    L2access = true;

                    //TODO: This seems to be true ... but not getting propagated...
                    //Console.WriteLine("This is a GPU request, going into case 3 in the access function, isGPU = {0}",Simulator.network.nodes[node].cpu.is_GPU());
                    //

                    // request packet
                    CmpCache_Pkt req_pkt = add_ctl_pkt(txn, node, sh_slice, false, Simulator.network.nodes[node].cpu.is_GPU());

                    // cache response packet
                    CmpCache_Pkt resp_pkt = add_data_pkt(txn, sh_slice, node, true, Simulator.network.nodes[node].cpu.is_GPU());
                    resp_pkt.delay = m_opdelay; // req already active -- just a pass-through op delay here

                    // memory request packet
                    int mem_slice = map_addr_mem(node, addr);
                    // TODO: Rachata: Check this part. This has to includes the GPU tag
                    CmpCache_Pkt memreq_pkt = add_ctl_pkt(txn, sh_slice, mem_slice, false, Simulator.network.nodes[node].cpu.is_GPU());
                    memreq_pkt.delay = m_shdelay;

                    // memory-access virtual node
                    CmpCache_Pkt mem_access = add_ctl_pkt(txn, 0, 0, false, Simulator.network.nodes[node].cpu.is_GPU());
                    mem_access.send          = false;
                    mem_access.mem           = true;
                    mem_access.mem_addr      = addr;
                    mem_access.mem_write     = false; // cache-line fill
                    mem_access.mem_requestor = node;

                    // memory response packet
                    // TODO: Rachata: Same here: check this part. This has to includes the GPU tag for the return packet
                    CmpCache_Pkt memresp_pkt = add_data_pkt(txn, mem_slice, sh_slice, false, Simulator.network.nodes[node].cpu.is_GPU());

                    // connect up the critical path first
                    add_dep(req_pkt, memreq_pkt);
                    add_dep(memreq_pkt, mem_access);
                    add_dep(mem_access, memresp_pkt);
                    add_dep(memresp_pkt, resp_pkt);

                    // -- not the GPU -- can evict
                    //  TODO: check this
//                if(!Simulator.network.nodes[node].cpu.is_GPU())
                    if (!Simulator.network.nodes[node].cpu.is_GPU())
                    // test
                    //if(true)
                    {
                        // now, handle replacement in the shared cache...
                        CmpCache_State new_state = new CmpCache_State();

                        new_state.owners.reset();
                        new_state.owners.set(node);
                        new_state.excl     = node;
                        new_state.modified = write;
                        new_state.sh_dirty = false;

                        ulong          sh_evicted_addr;
                        CmpCache_State sh_evicted_state;
                        bool           evicted = m_sh.insert(addr, new_state, out sh_evicted_addr, out sh_evicted_state, Simulator.CurrentRound);

                        if (evicted)
                        {
                            // shared-cache eviction (different from the private-cache evictions elsewhere):
                            // we must evict any private-cache copies, because we model an inclusive hierarchy.

                            L2ev = true;

                            CmpCache_Pkt prv_evict_join = add_joinpt(txn, false);

                            if (sh_evicted_state.excl != -1) // evicted block lives only in one prv cache
                            {
                                // invalidate request to prv cache before sh cache does eviction
                                CmpCache_Pkt prv_invl = add_ctl_pkt(txn, sh_slice, sh_evicted_state.excl, false, false);
                                add_dep(memresp_pkt, prv_invl);
                                CmpCache_Pkt prv_wb;

                                prv_invl.delay = m_opdelay;

                                if (sh_evicted_state.modified)
                                {
                                    // writeback
                                    prv_wb       = add_data_pkt(txn, sh_evicted_state.excl, sh_slice, false, false);
                                    prv_wb.delay = m_prvdelay;
                                    sh_evicted_state.sh_dirty = true;
                                }
                                else
                                {
                                    // simple ACK
                                    prv_wb       = add_ctl_pkt(txn, sh_evicted_state.excl, sh_slice, false, false);
                                    prv_wb.delay = m_prvdelay;
                                }

                                add_dep(prv_invl, prv_wb);
                                add_dep(prv_wb, prv_evict_join);

                                bool prv_evicted_dat;
                                m_prv[sh_evicted_state.excl].inval(sh_evicted_addr, out prv_evicted_dat);
                            }
                            else if (sh_evicted_state.owners.any_set()) // evicted block has greater-than-one sharer set
                            {
                                for (int i = 0; i < m_N; i++)
                                {
                                    if (sh_evicted_state.owners.is_set(i))
                                    {
                                        CmpCache_Pkt prv_invl = add_ctl_pkt(txn, sh_slice, i, false, false);
                                        CmpCache_Pkt prv_ack  = add_ctl_pkt(txn, i, sh_slice, false, false);

                                        prv_invl.delay = m_opdelay;
                                        prv_ack.delay  = m_prvdelay;

                                        add_dep(memresp_pkt, prv_invl);
                                        add_dep(prv_invl, prv_ack);
                                        add_dep(prv_ack, prv_evict_join);

                                        bool prv_evicted_dat;
                                        m_prv[i].inval(sh_evicted_addr, out prv_evicted_dat);
                                    }
                                }
                            }
                            else // evicted block has no owners (was only in shared cache)
                            {
                                add_dep(memresp_pkt, prv_evict_join);
                            }

                            // now writeback to memory, if we were dirty
                            if (sh_evicted_state.sh_dirty)
                            {
                                CmpCache_Pkt mem_wb = add_data_pkt(txn, sh_slice, mem_slice, false, false);
                                mem_wb.delay = m_opdelay;
                                add_dep(prv_evict_join, mem_wb);
                                CmpCache_Pkt mem_wb_op = add_ctl_pkt(txn, 0, 0, false, false);
                                mem_wb_op.send          = false;
                                mem_wb_op.mem           = true;
                                mem_wb_op.mem_addr      = sh_evicted_addr;
                                mem_wb_op.mem_write     = true;
                                mem_wb_op.mem_requestor = node;
                                add_dep(mem_wb, mem_wb_op);
                                L2wb = true;
                            }
                        }

                        // ...and insert and handle replacement in the private cache
                        ulong evict_addr;
                        bool  evict_data;
                        bool  prv_evicted = m_prv[node].insert(addr, true, out evict_addr, out evict_data, Simulator.CurrentRound);

                        // add either a writeback or a release packet
                        if (prv_evicted)
                        {
                            L1ev = true;
                            do_evict(txn, resp_pkt, node, evict_addr, out L1wb);
                        }
                    }
                    else // GPU -- not doing anything
                    {
                    }
                    /* HWA CODE */
                }
                /* HWA CODE END */
            }
            else // shouldn't happen.
            {
                Debug.Assert(false);
            }

            // now start the transaction, if one was needed
            if (txn != null)
            {
                txn.cb = cb;

                assignVCclasses(txn.pkts);

                // start running the protocol DAG. It may be an empty graph (for a silent upgr), in
                // which case the deferred start (after cache delay)
                Simulator.Defer(delegate()
                {
                    start_pkts(txn);
                }, Simulator.CurrentRound + m_prvdelay);
            }
            // no transaction -- just the cache access delay. schedule deferred callback.
            else
            {
                Simulator.Defer(cb, Simulator.CurrentRound + m_prvdelay);
            }
        }
Ejemplo n.º 17
0
 // This is needed just because of the wacky binding/scoping rules used by
 // delegates; without this, you can't keep recursively wrapping delegates
 // within a loop.
 protected void WrapDelegates(MemoryRequest m1, MemoryRequest m2)
 {
     Simulator.Ready prev_cb = m1.cb;
     m1.cb = delegate() { prev_cb(); m2.cb(); };
 }
Ejemplo n.º 18
0
        public MemoryRequest(Request req, Simulator.Ready cb)
        {
            this.cb = cb;
            request = req;
            req.beenToMemory = true;

            mapAddr(req.blockAddress, out m_index, out b_index, out r_index, out glob_b_index);

            //scheduling related
            //sched = Config.memory.mem[m_index].sched;
            sched = null;
            isMarked = false;
        }
Ejemplo n.º 19
0
Archivo: Mem.cs Proyecto: hirous/test
        public void access(Request req, Simulator.Ready cb)
        {
            MemoryRequest mreq = new MemoryRequest(req, cb);

            ReceivePacket(mreq);
        }