コード例 #1
0
        public bool is_BATCH(MemoryRequest r1, MemoryRequest r2)
        {
            bool det = false;
            bool result;

            result = BATCH_A(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = BATCH_B(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = FR(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #2
0
        public bool is_MARK_PRIO_FR_RANK_FCFS(MemoryRequest r1, MemoryRequest r2, int prio1, int prio2, int rank1, int rank2)
        {
            bool det = false;
            bool result;

            result = MARK(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = PRIO(prio1, prio2, ref det);
            if (det)
            {
                return(result);
            }

            result = FR(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = RANK(rank1, rank2, ref det);
            if (det)
            {
                return(result);
            }

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #3
0
        /**
         * Remove a memory request from the buffer (that holds requests from each processor to banks)
         * 
         * @param req the memory request to remove
         */
        public override void remove_req(MemoryRequest req)
        {
            int threadID = req.request.requesterID;

            //stats
            bstat.inc(threadID, ref bstat.total_serviced[threadID]);

            if (req.isMarked) {

                cur_marked_req--;
                cur_marked_per_proc[threadID]--;
                cur_marked_per_procbank[threadID, req.glob_b_index]--;

                //----- STATS START -----
                bstat.inc(threadID, ref bstat.total_serviced_marked[threadID]);

                if (cur_marked_req == 0)
                    Config.memory.tavgNumReqPBPerProcRemark += MemCtlr.cycle % Config.memory.mark_interval;

                if (cur_marked_per_proc[threadID] == 0 && cur_period_marked_per_proc[threadID] > 0) {
                    bstat.inc(threadID, ref bstat.total_finished_batch_duration[threadID], MemCtlr.cycle - batch_start_time);

                    bstat.inc(threadID, ref bstat.total_finished_batch_cnt[threadID]);
                }
                //----- STATS END -----
            }

            base.remove_req(req);
        }
コード例 #4
0
        private void req_insert(MemoryRequest[] markable, MemoryRequest req)
        {
            int insert = -1;

            for (int i = 0; i < markable.Length; i++) {

                MemoryRequest cur = markable[i];

                if (cur == null) {
                    insert = i;
                    continue;
                }

                if (cur.timeOfArrival <= req.timeOfArrival)
                    break;

                insert = i;
            }

            if (insert == -1)
                return;

            if (markable[insert] != null) {
                //shift younger requests to left
                for (int i = 0; i < insert; i++) {
                    markable[i] = markable[i + 1];
                }
            }
            markable[insert] = req;
        }
コード例 #5
0
ファイル: Mem.cs プロジェクト: hirous/test
        public bool RequestEnqueueable(MemoryRequest mreq)
        {
            // Check either maxCoreRequests or maxGPURequests, and also check the sum
            // of current coreRequests and GPURequests.
#if DETAILEDPACKETDUMP
            Console.WriteLine("in RequestEnqueueable, mreq from {0}, GPURequests = {1}, maxGPURequests = {2}, currentRequests = {3}, maxRequests = {4}, coreRequests = {5}, maxCoreRequests = {6}", mreq.request.requesterID, GPURequests, maxGPURequests, currentRequests, maxRequests, coreRequests, maxCoreRequests);
#endif
            /* HWA Code */ // bug fixed??
            //                    if(mreq.from_GPU)
            if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_GPU())
            {
                /* HWA Code End */
                return((GPURequests < maxGPURequests) && (currentRequests < maxRequests));
            }
            /* HWA Code */
            else if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_HWA())
            {
                return((HWARequests < maxHWARequests) && (currentRequests < maxRequests));
            }
            /* HWA Code End */
            else
            {
                return((coreRequests < maxCoreRequests) && (currentRequests < maxRequests));
            }
        }
コード例 #6
0
ファイル: Bank.cs プロジェクト: rachmadvwp/NOCulator
        /**
         * Set a memory request to the bank.
         * This can only be done if there are no requests currently being serviced.
         * Time left to service the request is set to full value.
         * @param req the memory request
         */
        public void add_req(MemoryRequest req)
        {
            //check if current request has been serviced
            Debug.Assert(cur_req == null);

            //proceed to service new request; update as the current request
            cur_req       = req;
            is_cur_marked = cur_req.isMarked;

            //----- STATS START -----
            stat.inc(ref BankStat.req_cnt[cur_req.request.requesterID]);
            //Simulator.stats.bank_access_persrc[bank_id, cur_req.request.requesterID].Add();
            if (cur_req.isMarked)
            {
                stat.inc(ref BankStat.marked_req_cnt[cur_req.request.requesterID]);
            }
            else
            {
                stat.inc(ref BankStat.unmarked_req_cnt[cur_req.request.requesterID]);
            }
            //----- STATS END ------

            //time to serve the request; bus latency
            wait_left = Config.memory.bus_busy_time;

            //time to serve the request; row access latency
            if (state == RowState.Closed)
            {
                //row is closed
                wait_left += Config.memory.row_closed_latency;
                state      = RowState.Open;
            }
            else
            {
                //row is open
                if (cur_req.r_index == cur_row && !Config.memory.row_same_latency)
                {
                    //hit
                    stat.inc(ref stat.row_hit);
                    stat.inc(ref stat.row_hit_per_proc[cur_req.request.requesterID]);
                    //Simulator.stats.bank_rowhits_persrc[bank_id, cur_req.request.requesterID].Add();

                    wait_left += Config.memory.row_hit_latency;
                }
                else
                {
                    //conflict
                    stat.inc(ref stat.row_miss);
                    stat.inc(ref stat.row_miss_per_proc[cur_req.request.requesterID]);

                    wait_left += Config.memory.row_conflict_latency;

                    //Close row, mark last cycle row to be closed was open
                    lastOpen[cur_row] = Simulator.CurrentRound;
                }
            }

            //set as current row
            cur_row = cur_req.r_index;
        }
コード例 #7
0
ファイル: Mem.cs プロジェクト: hirous/test
        public void ReceivePacket(MemoryRequest mreq)
        {
            /* HWA CODE */
//            queueFromCores[mreq.channel_index].Enqueue(mreq);
            queueFromCores[mreq.channel_index].Add(mreq);
            /* HWA CODE END */
        }
コード例 #8
0
        private bool PLL(MemoryRequest r1, MemoryRequest r2, ref bool det)
        {
            det = true;

            int bank_cnt1 = 0, bank_cnt2 = 0;

            for (int b = 0; b < bank_max; b++)
            {
                if (!bank[b].is_ready() && (bank[b].get_cur_req().request.requesterID == r1.request.requesterID))
                {
                    bank_cnt1++;
                }

                if (!bank[b].is_ready() && (bank[b].get_cur_req().request.requesterID == r2.request.requesterID))
                {
                    bank_cnt2++;
                }
            }

            if (bank_cnt1 > bank_cnt2)
            {
                return(true);
            }

            if (bank_cnt1 < bank_cnt2)
            {
                return(false);
            }

            det = false;
            return(false);
        }
コード例 #9
0
        public bool is_NONWB_FR_X_FCFS(MemoryRequest r1, MemoryRequest r2, double x1, double x2)
        {
            bool det = false;
            bool result;

            result = NONWB(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = FR(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = X(x1, x2, ref det);
            if (det)
            {
                return(result);
            }

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #10
0
ファイル: Sched.cs プロジェクト: hirous/test
 public void Allocate(MemoryRequest mreq)
 {
     this.mreq = mreq;
     mreq.buf_index = index;
     whenArrived = Simulator.CurrentRound;
     moreCommands = true;
     burstLength = mreq.mem_size / Config.memory.busWidth / 2;
 }
コード例 #11
0
ファイル: Mem.cs プロジェクト: rachmadvwp/NOCulator
        /*
         * public void receivePacket(MemoryPacket p)
         * {
         *  Simulator.Ready cb;
         *
         *  //receive WB or request from memory
         *  if(p.type == MemoryRequestType.RD)
         *  {
         *      cb = delegate()
         *          {
         *              MemoryPacket mp = new MemoryPacket(
         *                  p.request, p.block,
         *                  MemoryRequestType.DAT, p.dest, p.src);
         *
         *              node.queuePacket(mp);
         *          };
         *  }
         *  else
         *  {
         *      // WB don't need a callback
         *      cb = delegate(){};
         *  }
         *
         *  access(p.request, cb);
         * }
         */

        public void access(Request req, Simulator.Ready cb)
        {
            MemoryRequest mreq = new MemoryRequest(req, cb);

            sched.issue_req(mreq);
            bank[mreq.b_index].outstandingReqs_perapp[req.requesterID]++;
            bank[mreq.b_index].outstandingReqs++;
        }
コード例 #12
0
ファイル: Trace.cs プロジェクト: hirous/test
        private bool findBankIdx(mem_req req)
        {
            ulong s_row;
            int   mem_idx, ch_idx, rank_idx, bank_idx, row_idx;

            MemoryRequest.mapAddr(req.address >> Config.cache_block, out s_row, out mem_idx, out ch_idx, out rank_idx, out bank_idx, out row_idx);
            return(bank_idx == search_bank_idx);
        }
コード例 #13
0
ファイル: Sched.cs プロジェクト: hirous/test
 public void Allocate(MemoryRequest mreq)
 {
     this.mreq      = mreq;
     mreq.buf_index = index;
     whenArrived    = Simulator.CurrentRound;
     moreCommands   = true;
     burstLength    = mreq.mem_size / Config.memory.busWidth / 2;
 }
コード例 #14
0
        public bool is_FCFS(MemoryRequest r1, MemoryRequest r2)
        {
            bool det = false;
            bool result;

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #15
0
 public override void remove_req(MemoryRequest request)
 {
     if (request.isMarked)
     {
         markedReqThisBatchPerPriority[threadPriority[request.request.requesterID]]--;
     }
     base.remove_req(request);
 }
コード例 #16
0
ファイル: OUR.cs プロジェクト: rachmadvwp/NOCulator
        /**
         * This is the main function of interest. Another memory scheduler needs to
         * implement this function in a different way!
         */
        public override MemoryRequest get_next_req(MemCtlr mem)
        {
            MemoryRequest next_req = null;
            //search in global/local banks
            int bank_index;
            int next_req_bank = -1;

            if (Config.memory.is_shared_MC)
            {
                bank_index = mem.mem_id * Config.memory.bank_max_per_mem;
            }
            else
            {
                bank_index = 0;
            }
            //Select a memory request according to priorities:
            //Order: Ready > Timeout > priority > rowHit > rank > totAU > BankRank > time
            for (int b = bank_index; b < bank_index + mem.bank_max; b++)  // for each bank
            {
                //Rule 1: Ready
                if (!bank[b].is_ready() || buf_load[b] == 0)
                {
                    continue;
                }
                for (int j = 0; j < buf_load[b]; j++)
                {
                    //Mark, priority, row hit, rank, totAU, bank rank, bank rank per thread, FCFS
                    if (next_req == null)
                    {
                        next_req      = buf[b, j];
                        next_req_bank = b;
                        continue;
                    }

                    if (!helper.is_MARK_PRIO_FR_RANK_TOTAU_BR_BRT_FCFS(next_req, buf[b, j],
                                                                       rank[next_req.memoryRequesterID], rank[buf[b, j].memoryRequesterID],
                                                                       priority[getGroup(next_req.memoryRequesterID)], priority[getGroup(buf[b, j].memoryRequesterID)],
                                                                       bankRank[next_req.memoryRequesterID], bankRank[buf[b, j].memoryRequesterID],
                                                                       bankRankPerThread[next_req.glob_b_index, next_req.memoryRequesterID], bankRankPerThread[b, buf[b, j].memoryRequesterID],
                                                                       totAU[next_req.memoryRequesterID], totAU[buf[b, j].memoryRequesterID]))
                    {
                        next_req      = buf[b, j];
                        next_req_bank = b;
                    }
                }
            }
            //Note: All requests (in this simulator) are either load, store, or writeback
            if (next_req != null)
            {
                bankRank[next_req.memoryRequesterID]--;
                bankRankPerThread[next_req_bank, next_req.memoryRequesterID]--;
                utility[next_req.memoryRequesterID]++;
                utilityMinFrame[next_req.memoryRequesterID]++;
            }

            return(next_req);
        }
コード例 #17
0
ファイル: FR_FCFS_Cap.cs プロジェクト: rachmadvwp/NOCulator
        public override MemoryRequest get_next_req(MemCtlr mem)
        {
            MemoryRequest nextRequest = null;

            bool ScheduleWB = (this.get_wb_fraction() > Config.memory.wb_full_ratio);

            // Get the highest priority request.
            int bank_index;
            if (Config.memory.is_shared_MC) {
                bank_index = mem.mem_id * Config.memory.bank_max_per_mem;
            }
            else {
                bank_index = 0;
            }
            for (int b = bank_index; b < bank_index + mem.bank_max; b++)  // for each bank
            {
                bool cap = ((MemCtlr.cycle - bankTimers[b]) >= Config.memory.prio_inv_thresh);
                if (Config.memory.prio_inv_thresh == 0) {
                    if (cap == false) {
                        Environment.Exit(1);
                    }
                }

                if (bank[b].is_ready() && buf_load[b] > 0) {
                    // Find the highest priority request for this bank
                    MemoryRequest nextBankRequest = buf[b, 0];
                    for (int j = 1; j < buf_load[b]; j++) {

                        if (cap) {
                            if (ScheduleWB && !helper.is_FCFS(nextBankRequest, buf[b, j]) ||
                               (!ScheduleWB && !helper.is_NONWB_FCFS(nextBankRequest, buf[b, j]))) {
                                nextBankRequest = buf[b, j];
                            }
                        }
                        else {
                            if (ScheduleWB && !helper.is_FR_FCFS(nextBankRequest, buf[b, j]) ||
                               (!ScheduleWB && !helper.is_NONWB_FR_FCFS(nextBankRequest, buf[b, j]))) {
                                nextBankRequest = buf[b, j];
                            }

                        }
                    }
                    // Compare between highest priority between different banks
                    if (nextRequest == null || !helper.is_FCFS(nextRequest, nextBankRequest)) {
                        nextRequest = nextBankRequest;
                    }
                }
            }

            if (nextRequest != null) {
                // Update the bank timers if the row has changed!
                if (bank[nextRequest.glob_b_index].get_cur_row() != nextRequest.r_index)
                    bankTimers[nextRequest.glob_b_index] = MemCtlr.cycle;
            }

            return nextRequest;
        }
コード例 #18
0
ファイル: Mem.cs プロジェクト: hirous/test
        public void Enqueue(MemoryRequest mreq)
        {
            mreq.timeOfArrival = Simulator.CurrentRound;
//            Console.WriteLine("Incrementing load at src {0}", mreq.request.requesterID);
            loadPerProc[mreq.request.requesterID]++;
            // Walk array until empty entry is found; make sure to record buf index in mreq.
            for (int i = 0; i < maxRequests; i++)
            {
#if PACKETDUMP
                Console.WriteLine("Enqueueing mreq from {0} to the buffers", mreq.request.requesterID);
#endif
                if (buf[i].Available)
                {
                    buf[i].Allocate(mreq);
                    /* HWA Code */ // bug fixed??
//                    if(mreq.from_GPU)
                    if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_GPU())
                    {
                        /* HWA Code End */
                        GPURequests++;
                    }
                    /* HWA Code */
                    else if (Simulator.network.nodes[mreq.request.requesterID].cpu.is_HWA())
                    {
                        HWARequests++;
                        HWAUnIssueRequests++;
                    }

                    /* HWA Code End */
                    else
                    {
                        coreRequests++;
                    }

                    RequestsPerBank[mreq.bank_index]++;
                    Simulator.QoSCtrl.RequestsPerBank[mreq.bank_index]++;
                    if (!Simulator.network.nodes[mreq.request.requesterID].cpu.is_HWA())
                    {
                        Simulator.QoSCtrl.CPURequestsPerBank[mreq.bank_index]++;
                    }


//		    Console.WriteLine("EnQueue: CPU:{0},GPU:{1},HWA:{2}", coreRequests, GPURequests, HWARequests);
                    unIssueRequestsPerCore[mreq.request.requesterID]++;
                    if (!mreq.isWrite)
                    {
                        unIssueReadRequestsPerCore[mreq.request.requesterID]++;
                    }
                    Simulator.QoSCtrl.mem_req_enqueue(mreq.request.requesterID, mreq.request.address, mem_id);
//		    Console.WriteLine("EnQueue({0}) from {1}, addr:{2:x}", mem_id, mreq.request.requesterID, mreq.request.address);
                    return;
                }
            }
            Console.WriteLine("Failed to allocate ({0} req): {1} coreRequests, {2} GPUrequests, {3} buf size ({4} current reqs)",
                              mreq.from_GPU?"GPU":"core", coreRequests, GPURequests, buf.Length, currentRequests);
            System.Environment.Exit(-1);
        }
コード例 #19
0
 private void mark_old_requests()
 {
     for (int i = 0; i < chan.buf.Length; i++)
     {
         MemoryRequest req = chan.buf[i].mreq;
         if (req != null && (Simulator.CurrentRound - req.timeOfArrival > Config.sched.threshold_cycles))
         {
             req.is_marked = true;
         }
     }
 }
コード例 #20
0
        public BatchMemSchedHelper bhelper;         //ranking algorithm helper

        /**
         * Constructor
         */
        public BatchMemSched(int buf_size, Bank[] bank, RankAlgo rank_algo, BatchSchedAlgo batch_sched_algo)
            : base(buf_size, bank)
        {
            //stat
            bstat = new BatchMemSchedStat(this);

            //components
            this.rank_algo = rank_algo;
            this.batch_sched_algo = batch_sched_algo;

            rank_to_proc = new int[Config.N];
            proc_to_rank = new int[Config.N];
            for (int p = 0; p < Config.N; p++) {
                rank_to_proc[p] = p;
                proc_to_rank[p] = Config.N - 1;
            }

            //batch formulation; marking
            markable = new MemoryRequest[Config.N][][];
            for (int p = 0; p < Config.N; p++) {
                markable[p] = new MemoryRequest[bank_max][];
                for (int b = 0; b < bank_max; b++) {
                    markable[p][b] = new MemoryRequest[markable_len];
                }
            }

            markable_cnt = new int[Config.N][];
            for (int p = 0; p < Config.N; p++) {
                markable_cnt[p] = new int[bank_max];
            }

            markable_cnt_unbound = new int[Config.N][];
            for (int p = 0; p < Config.N; p++) {
                markable_cnt_unbound[p] = new int[bank_max];
            }

            //marked requests per processor
            cur_marked_per_proc = new int[Config.N];
            cur_period_marked_per_proc = new int[Config.N];

            //marked requests per processor, bank
            cur_marked_per_procbank = new int[Config.N, bank_max];
            cur_period_marked_per_procbank = new int[Config.N, bank_max];

            //bhelper
            bhelper = new BatchMemSchedHelper(this);

            Console.WriteLine("Initialized BATCH_MemoryScheduler");
            Console.WriteLine("Ranking Scheme: " + rank_algo.ToString());
            Console.WriteLine("WithinBatch Priority: " + batch_sched_algo.ToString());
            Console.WriteLine("BatchingCap: " + Config.memory.batch_cap);
        }
コード例 #21
0
ファイル: MemSchedHelper.cs プロジェクト: hoangt/NOCulator
        private bool BATCH_A(MemoryRequest r1, MemoryRequest r2, ref bool det)
        {
            det = true;

            if (((MemCtlr.cycle - r1.timeOfArrival) > 500) && ((MemCtlr.cycle - r2.timeOfArrival) < 500))
                return true;

            if (((MemCtlr.cycle - r1.timeOfArrival) < 500) && ((MemCtlr.cycle - r2.timeOfArrival) > 500))
                return false;

            det = false;
            return false;
        }
コード例 #22
0
ファイル: MemSchedHelper.cs プロジェクト: hoangt/NOCulator
        private bool BATCH_B(MemoryRequest r1, MemoryRequest r2, ref bool det)
        {
            det = true;

            if ((cur_max_load_per_proc[r1.request.requesterID] < 5) && (cur_max_load_per_proc[r2.request.requesterID] >= 5))
                return true;

            if ((cur_max_load_per_proc[r1.request.requesterID] >= 5) && (cur_max_load_per_proc[r2.request.requesterID] < 5))
                return false;

            det = false;
            return false;
        }
コード例 #23
0
ファイル: LAS_BA_FR.cs プロジェクト: rachmadvwp/NOCulator
        /**
         * This is the main function of interest. Another memory scheduler needs to
         * implement this function in a different way!
         */
        public override MemoryRequest get_next_req(MemCtlr mem)
        {
            MemoryRequest next_req = null;

            //search in global/local banks
            int bank_index;

            if (Config.memory.is_shared_MC)
            {
                bank_index = mem.mem_id * Config.memory.bank_max_per_mem;
            }
            else
            {
                bank_index = 0;
            }

            if (mem.mem_id == 0 && MemCtlr.cycle % 10000 == 0)
            {
                Console.Write("Ranking: ");
                for (int p = 0; p < Config.N; p++)
                {
                    Console.Write(service_rank[p].ToString() + '\t');
                }
                Console.WriteLine("");
            }

            //search for next request
            for (int b = bank_index; b < bank_index + mem.bank_max; b++)  // for each bank
            {
                if (!bank[b].is_ready() || buf_load[b] == 0)
                {
                    continue;
                }

                MemoryRequest cur_req  = buf[b, 0];
                int           cur_proc = cur_req.request.requesterID;
                for (int j = 1; j < buf_load[b]; j++)
                {
                    if (!helper.is_MARK_RANK_FR_FCFS(cur_req, buf[b, j], service_rank[cur_proc], service_rank[buf[b, j].request.requesterID]))
                    {
                        cur_req = buf[b, j];
                    }
                }

                if (next_req == null || !helper.is_MARK_RANK_FR_FCFS(next_req, cur_req, service_rank[next_req.request.requesterID], service_rank[cur_req.request.requesterID]))
                {
                    next_req = cur_req;
                }
            }
            return(next_req);
        }
コード例 #24
0
ファイル: MemSched.cs プロジェクト: rachmadvwp/NOCulator
        /**
         * Removes a request from the memory request buffer when it is finished. The implementation
         * removes this request from the memory request buffer and updates all statistic
         * variables. If some data structure needs to be updated for the scheduler, this
         * method should be overriden
         */
        virtual public void remove_req(MemoryRequest req)
        {
            int bank_index = req.glob_b_index;
            int slot       = req.buf_index;

            Debug.Assert(buf[bank_index, slot] == req);

            //overwrite last req to the req to be removed
            int buf_last_index = buf_load[bank_index] - 1;

            buf[bank_index, buf_last_index].buf_index = slot;
            buf[bank_index, slot] = buf[bank_index, buf_last_index];

            //clear the last buffer entry
            buf[bank_index, buf_last_index] = null;
            buf_load[bank_index]--;
            Debug.Assert(buf_load[bank_index] >= 0);

            //----- STATS START -----
            //shared load needs to be updated before cur_load_per_proc
            if (cur_load_per_proc[req.request.requesterID] > Config.memory.buf_size_per_proc)
            {
                cur_shared_load--;
            }

            cur_load--;
            cur_load_per_proc[req.request.requesterID]--;
            cur_load_per_procbank[req.request.requesterID, req.glob_b_index]--;

            Debug.Assert(cur_load >= 0);
            Debug.Assert(cur_load_per_proc[req.request.requesterID] >= 0);
            Debug.Assert(cur_load_per_procbank[req.request.requesterID, req.glob_b_index] >= 0);

            if (req.type != MemoryRequestType.WB)
            {
                cur_nonwb_load--;
                cur_nonwb_per_proc[req.request.requesterID]--;
                cur_nonwb_per_procbank[req.request.requesterID, req.glob_b_index]--;
            }
            //----- STATS END ------

            //find maximum load for any bank for this request
            cur_max_load_per_proc[req.request.requesterID] = 0;
            for (int b = 0; b < bank_max; b++)
            {
                if (cur_max_load_per_proc[req.request.requesterID] < cur_load_per_procbank[req.request.requesterID, b])
                {
                    cur_max_load_per_proc[req.request.requesterID] = cur_load_per_procbank[req.request.requesterID, b];
                }
            }
        }
コード例 #25
0
        public bool is_RANK_FCFS(MemoryRequest r1, MemoryRequest r2, int rank1, int rank2)
        {
            bool det = false;
            bool result;

            result = RANK(rank1, rank2, ref det);
            if (det)
            {
                return(result);
            }

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #26
0
        public bool is_MARK_PRIO_FR_RANK_TOTAU_BR_BRT_FCFS(MemoryRequest r1, MemoryRequest r2, int rank1, int rank2, int prio1, int prio2, int brank1, int brank2, int brankt1, int brankt2, float totAU1, float totAU2)
        {
            bool det = false;
            bool result;

            result = MARK(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = PRIO(prio1, prio2, ref det);
            if (det)
            {
                return(result);
            }

            result = FR(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = RANK(rank1, rank2, ref det);
            if (det)
            {
                return(result);
            }

            result = TOTAU(totAU1, totAU2, ref det);
            if (det)
            {
                return(result);
            }

            result = BRANK(brank1, brank2, ref det);
            if (det)
            {
                return(result);
            }

            result = BRT(brankt1, brankt2, ref det);
            if (det)
            {
                return(result);
            }

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #27
0
        public bool is_PLL(MemoryRequest r1, MemoryRequest r2)
        {
            bool det = false;
            bool result;

            result = PLL(r1, r2, ref det);
            if (det)
            {
                return(result);
            }

            result = FCFS(r1, r2, ref det);
            return(result);
        }
コード例 #28
0
        public override void tick()
        {
            base.tick();

            //update servicing banks
            for (int p = 0; p < Config.N; p++)
            {
                service_bank[p] = 0;
            }

            for (int b = 0; b < bank_max; b++)  // for each bank
            {
                MemoryRequest cur_req = bank[b].get_cur_req();
                if (cur_req == null)
                {
                    continue;
                }

                service_bank[cur_req.request.requesterID]++;
            }

            //update serviced time
            for (int p = 0; p < Config.N; p++)
            {
                service_time[p] += service_bank[p];

                //wrap-around
                if (service_time[p] >= 4000)
                {
                    service_time[p] = 0;
                }
            }

            //rank
            for (int cur_proc = 0; cur_proc < Config.N; cur_proc++)
            {
                int cur_rank = 0;

                for (int p = 0; p < Config.N; p++)
                {
                    if (service_time[p] > service_time[cur_proc])
                    {
                        cur_rank++;
                    }
                }

                service_rank[cur_proc] = cur_rank;
            }
        }
コード例 #29
0
ファイル: Sched.cs プロジェクト: hirous/test
        public void Deallocate()
        {
            mreq.buf_index = -1;
            mreq=null;
            whenArrived=ulong.MaxValue;
            whenStarted=ulong.MaxValue;
            whenCompleted=ulong.MaxValue;
            whenIdle = 0;
            issuedActivation = false;
            marked = false;
            rank = -1;
	    /* HWA CODE */
	    wait_num=0;
	    /* HWA CODE END */
        }
コード例 #30
0
ファイル: Sched.cs プロジェクト: hirous/test
 public void Deallocate()
 {
     mreq.buf_index   = -1;
     mreq             = null;
     whenArrived      = ulong.MaxValue;
     whenStarted      = ulong.MaxValue;
     whenCompleted    = ulong.MaxValue;
     whenIdle         = 0;
     issuedActivation = false;
     marked           = false;
     rank             = -1;
     /* HWA CODE */
     wait_num = 0;
     /* HWA CODE END */
 }
コード例 #31
0
ファイル: TCM.cs プロジェクト: hirous/test
        // Override this for other algorithms
        override protected SchedBuf Pick(SchedBuf winner, SchedBuf candidate)
        {
            if (winner == null)
            {
                winner = candidate;
            }
            MemoryRequest req1  = winner.mreq;
            MemoryRequest req2  = candidate.mreq;
            int           rank1 = rank[req1.request.requesterID];
            int           rank2 = rank[req2.request.requesterID];

            if (rank1 != rank2)
            {
                if (rank1 > rank2)
                {
                    return(winner);
                }
                else
                {
                    return(candidate);
                }
            }

            bool hit1 = winner.IsRowBufferHit;
            bool hit2 = candidate.IsRowBufferHit;

            if (hit1 ^ hit2)
            {
                if (hit1)
                {
                    return(winner);
                }
                else
                {
                    return(candidate);
                }
            }

            if (candidate.IsOlderThan(winner))
            {
                return(candidate);
            }
            else
            {
                return(winner);
            }
        }
コード例 #32
0
ファイル: LAS_FCFS_F2.cs プロジェクト: rachmadvwp/NOCulator
        /**
         * This is the main function of interest. Another memory scheduler needs to
         * implement this function in a different way!
         */
        public override MemoryRequest get_next_req(MemCtlr mem)
        {
            MemoryRequest next_req = null;

            //search in global/local banks
            int bank_index;

            if (Config.memory.is_shared_MC)
            {
                bank_index = mem.mem_id * Config.memory.bank_max_per_mem;
            }
            else
            {
                bank_index = 0;
            }

            //search for next request
            for (int b = bank_index; b < bank_index + mem.bank_max; b++)  // for each bank
            {
                if (!bank[b].is_ready() || buf_load[b] == 0)
                {
                    continue;
                }

                MemoryRequest cur_req  = buf[b, 0];
                int           cur_proc = cur_req.request.requesterID;
                for (int j = 1; j < buf_load[b]; j++)
                {
                    if (!helper.is_RANK_FCFS(cur_req, buf[b, j], service_rank[cur_proc], service_rank[buf[b, j].request.requesterID]))
                    {
                        cur_req = buf[b, j];
                    }
                }

                if (next_req == null || !helper.is_RANK_FCFS(next_req, cur_req, service_rank[next_req.request.requesterID], service_rank[cur_req.request.requesterID]))
                {
                    next_req = cur_req;
                }
            }

            if (next_req != null)
            {
                last_service_time[next_req.request.requesterID] = Simulator.CurrentRound;
            }

            return(next_req);
        }
コード例 #33
0
        //private bool RANK_FR_SR(MemoryRequest r1, Req r2, int rank1, int rank2, ref bool det)
        //{
        //    det = true;

        //    bool FR_det = new bool();
        //    bool FR_res = FR(r1, r2, ref FR_det);

        //    if (!FR_det) {
        //        bool RANK_det = new bool();
        //        bool RANK_res = RANK(rank1, rank2, ref RANK_det);

        //        det = RANK_det;
        //        return RANK_res;
        //    }

        //    if (FR_res) {
        //        //r1 is FR and r2 is FR
        //        if (rank1 >= Simulator.NumberOfApplication_Processors / 2)
        //            return true;
        //        else
        //            return false;
        //    }

        //    //r2 is FR and r1 is non-FR
        //    if (rank2 >= Simulator.NumberOfApplication_Processors / 2)
        //        return false;
        //    else
        //        return true;
        //}

        //private bool RANK_FR_SAS(MemoryRequest r1, Req r2, int as1, int as2, ref bool det)
        //{
        //    det = true;

        //    bool FR_det = new bool();
        //    bool FR_res = FR(r1, r2, ref FR_det);

        //    if (!FR_det) {
        //        bool RANK_det = new bool();
        //        bool RANK_res = RANK(rank1, rank2, ref RANK_det);

        //        det = RANK_det;
        //        return RANK_res;
        //    }

        //    if (FR_res) {
        //        //r1 is FR and r2 is FR
        //        if (rank1 >= Simulator.NumberOfApplication_Processors / 2)
        //            return true;
        //        else
        //            return false;
        //    }

        //    //r2 is FR and r1 is non-FR
        //    if (rank2 >= Simulator.NumberOfApplication_Processors / 2)
        //        return false;
        //    else
        //        return true;
        //}

        private bool FCFS(MemoryRequest r1, MemoryRequest r2, ref bool det)
        {
            det = true;

            if (r1.timeOfArrival < r2.timeOfArrival)
            {
                return(true);
            }

            if (r1.timeOfArrival > r2.timeOfArrival)
            {
                return(false);
            }

            det = false;
            return(false);
        }
コード例 #34
0
        private bool BATCH_B(MemoryRequest r1, MemoryRequest r2, ref bool det)
        {
            det = true;

            if ((cur_max_load_per_proc[r1.request.requesterID] < 5) && (cur_max_load_per_proc[r2.request.requesterID] >= 5))
            {
                return(true);
            }

            if ((cur_max_load_per_proc[r1.request.requesterID] >= 5) && (cur_max_load_per_proc[r2.request.requesterID] < 5))
            {
                return(false);
            }

            det = false;
            return(false);
        }
コード例 #35
0
ファイル: MemSchedHelper.cs プロジェクト: hoangt/NOCulator
        private bool PLL(MemoryRequest r1, MemoryRequest r2, ref bool det)
        {
            det = true;

            int bank_cnt1 = 0, bank_cnt2 = 0;
            for (int b = 0; b < bank_max; b++) {

                if (!bank[b].is_ready() && (bank[b].get_cur_req().request.requesterID == r1.request.requesterID))
                    bank_cnt1++;

                if (!bank[b].is_ready() && (bank[b].get_cur_req().request.requesterID == r2.request.requesterID))
                    bank_cnt2++;
            }

            if (bank_cnt1 > bank_cnt2)
                return true;

            if (bank_cnt1 < bank_cnt2)
                return false;

            det = false;
            return false;
        }
コード例 #36
0
 public override void remove_req(MemoryRequest request)
 {
     if (request.isMarked) {
         markedReqThisBatchPerPriority[threadPriority[request.request.requesterID]]--;
     }
     base.remove_req(request);
 }
コード例 #37
0
ファイル: MemoryCoalescing.cs プロジェクト: hirous/test
 protected uint GetCombineBit(MemoryRequest mreq)
 {
     int x = (int)(mreq.request.address & MemoryCoalescingCombineMask) >> MemoryCoalescingCombineShift;
     if(mreq.mem_size == 32)
         return (uint)1 << x;
     else if(mreq.mem_size == 64)
         return (uint)3 << x;
     else
         throw new Exception(String.Format("unsupported MemoryCoalescing mreq.mem_size: {0}",mreq.mem_size));
 }
コード例 #38
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        public override void remove_req(MemoryRequest request)
        {
            int threadID = request.request.requesterID;
            nrTotalRequests[threadID]++;
            if (request.isMarked)
            {
                nrTotalMarkedRequests[threadID]++;
                curMarkedPerProc[threadID]--;
                curMarkedPerProcBank[threadID, request.glob_b_index]--;
                // do not touch thisPeriodMarkedPerProcBank[procID, bank]

                // If there had been requests marked, but they are all done now, 
                // increase the stats variables!
                if (curMarkedPerProc[threadID] == 0 && thisPeriodMarkedPerProc[threadID] > 0)
                {
                    totalBatchCompletionTime[threadID] += MemCtlr.cycle - currentBatchStartTime;
                    numberOfActiveBatches[threadID]++;

                    if (samplingRankingPolicy)
                    {
                        sample_totalBatchCompletionTime[threadID] += MemCtlr.cycle - currentBatchStartTime;
                        sample_numberOfActiveBatches[threadID]++;

                        thisBatchCompletionTime += MemCtlr.cycle - currentBatchStartTime;
                        thisBatchCompletedThreadCount++;

                    }

                }


                markedReqThisBatch--;
                if (markedReqThisBatch == 0)
                    Config.memory.tavgNumReqPBPerProcRemark += MemCtlr.cycle % Config.memory.mark_interval;
            }
            base.remove_req(request);
        }
コード例 #39
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        /**
       * Returns true if r1 has higher priority than r2, if both requests are in the same bank!
       */
        protected bool higherFVDPriority(MemoryRequest r1, MemoryRequest r2, ulong VFT1, ulong VFT2)
        {
            // 1. Priority: First-Virtual Deadline first (regardless of row-buffer)
            // 2. Priority: If from same thread -> use open row buffer first!

            if (r1.request.requesterID != r2.request.requesterID)
                return (VFT1 < VFT2);

            // NOTE: Nesbit does not actually write it this way. It should always have a cap!!!
            else  // if two requests from the same thread - probably prioritize open row hit first ?!
            {
                bool isRowHit1 = (r1.r_index == bank[r1.glob_b_index].get_cur_row());
                bool isRowHit2 = (r2.r_index == bank[r2.glob_b_index].get_cur_row());
                if (!isRowHit1 && isRowHit2)
                    return false;
                else if (isRowHit1 && !isRowHit2)
                    return true;
                else // either both the same row or both a different row!
                    return (r1.timeOfArrival < r2.timeOfArrival);

            }
        }
コード例 #40
0
ファイル: MemoryCoalescing.cs プロジェクト: hirous/test
        // Called by CPU.cs to issue a request to the MemoryCoalescing
        // This just places the request in the appropriate client queue
        // This cannot be used when we model the network
        public void issueReq(int targetID, Request req, Simulator.Ready cb)
        {
//            Console.WriteLine("In MemoryCoalescing, issueReq is called requester {0}, addr = {1} at cycle {2}", req.requesterID, req.address, Simulator.CurrentRound);
            MemoryRequest mreq = new MemoryRequest(req, cb);
            mreq.from_GPU = true;
            //Console.WriteLine("Get a GPU Request {0}", req.from_GPU);
            int c = (int)req.client;
            int w = req.write?1:0;
            if(Config.useMemoryCoalescing)
            {
//            Console.WriteLine("In MemoryCoalescing, enqueue to the client queue requester {0}, addr = {1} at cycle {2}", req.requesterID, req.address, Simulator.CurrentRound);
                clientQueue[c,w].Enqueue(new Tuple3(targetID,Simulator.CurrentRound,mreq));
            }
            else
            {
                bool l1hit = false, l1upgr = false, l1ev = false, l1wb = false;
                bool l2access = false, l2hit = false, l2ev = false, l2wb = false, c2c = false;
                Simulator.network.cache.access(req.requesterID, req.address, req.write, cb, out l1hit, out l1upgr, out l1ev, out l1wb, out l2access, out l2hit, out l2ev, out l2wb, out c2c);
            }
        }
コード例 #41
0
ファイル: Mem.cs プロジェクト: hoangt/NOCulator
        /*
        public void receivePacket(MemoryPacket p)
        {
            Simulator.Ready cb;
            
            //receive WB or request from memory        
            if(p.type == MemoryRequestType.RD)
            {
                cb = delegate()
                    {
                        MemoryPacket mp = new MemoryPacket(
                            p.request, p.block,
                            MemoryRequestType.DAT, p.dest, p.src);

                        node.queuePacket(mp);
                    };                
            }
            else
            {
                // WB don't need a callback
                cb = delegate(){};
            }
                        
            access(p.request, cb);
        }
        */

        public void access(Request req, Simulator.Ready cb)
        {
            MemoryRequest mreq = new MemoryRequest(req, cb);
            sched.issue_req(mreq);
            bank[mreq.b_index].outstandingReqs_perapp[req.requesterID]++;
            bank[mreq.b_index].outstandingReqs++;
        }
コード例 #42
0
ファイル: Bank.cs プロジェクト: anderson1008/NOCulator
        /**
         * Set a memory request to the bank.
         * This can only be done if there are no requests currently being serviced.
         * Time left to service the request is set to full value.
         * @param req the memory request
         */
        public void add_req(MemoryRequest req)
        {
            //check if current request has been serviced
            Debug.Assert(cur_req == null);

            //proceed to service new request; update as the current request
            cur_req = req;
            is_cur_marked = cur_req.isMarked;

            //----- STATS START -----
            stat.inc(ref BankStat.req_cnt[cur_req.request.requesterID]);
            //Simulator.stats.bank_access_persrc[bank_id, cur_req.request.requesterID].Add();
            if (cur_req.isMarked)
                stat.inc(ref BankStat.marked_req_cnt[cur_req.request.requesterID]);
            else
                stat.inc(ref BankStat.unmarked_req_cnt[cur_req.request.requesterID]);
            //----- STATS END ------
            
            //time to serve the request; bus latency
            wait_left = Config.memory.bus_busy_time;

            //time to serve the request; row access latency
            if (state == RowState.Closed) {
                //row is closed
                wait_left += Config.memory.row_closed_latency;
                state = RowState.Open;
            }
            else {
                //row is open
                if (cur_req.r_index == cur_row && !Config.memory.row_same_latency) {
                    //hit
                    stat.inc(ref stat.row_hit);
                    stat.inc(ref stat.row_hit_per_proc[cur_req.request.requesterID]);
                    //Simulator.stats.bank_rowhits_persrc[bank_id, cur_req.request.requesterID].Add();

                    wait_left += Config.memory.row_hit_latency;
                }
                else {
                    //conflict
                    stat.inc(ref stat.row_miss);
                    stat.inc(ref stat.row_miss_per_proc[cur_req.request.requesterID]);

                    wait_left += Config.memory.row_conflict_latency;

                    //Close row, mark last cycle row to be closed was open
                    lastOpen[cur_row] = Simulator.CurrentRound;
                }
            }

            //set as current row
            cur_row = cur_req.r_index;

        }
コード例 #43
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        /**
      * Returns true if r1 has higher priority than r2, if both requests are in the same bank!
         * Writebacks are deprioritized
      */
        protected bool higherNesbitPriorityWB(MemoryRequest r1, MemoryRequest r2, ulong VFT1, ulong VFT2)
        {
            // 1. Priority: Requests with the open row hit
            // 2. Priority: Read request before writeback request
            // 3. Priority: Thread with smaller virtual finish time
            // 4. Priority: Requests with earlier arrival time
            bool isRowHit1 = (r1.r_index == bank[r1.glob_b_index].get_cur_row());
            bool isRowHit2 = (r2.r_index == bank[r2.glob_b_index].get_cur_row());
            bool isWriteback1 = (r1.type == MemoryRequestType.WB);
            bool isWriteback2 = (r2.type == MemoryRequestType.WB);

            if (!isRowHit1 && !isRowHit2)
            {
                if (isWriteback1 && !isWriteback2)
                    return false;
                else if (!isWriteback1 && isWriteback2)
                    return true;
                else if (r1.request.requesterID != r2.request.requesterID) // which thread should be prioritized?
                    return (VFT1 < VFT2);
                else  // if two requests from the same thread 
                    return (r1.timeOfArrival < r2.timeOfArrival);
            }
            else if (!isRowHit1 && isRowHit2)
                return false;
            else if (isRowHit1 && !isRowHit2)
                return true;
            // (r1.rowIndex != currentRow1 && r2.rowIndex != currentRow2)
            else
            {
                if (isWriteback1 && !isWriteback2)
                    return false;
                else if (!isWriteback1 && isWriteback2)
                    return true;
                else if (r1.request.requesterID != r2.request.requesterID)
                    return (VFT1 < VFT2);
                else  // if two requests from the same thread 
                    return (r1.timeOfArrival < r2.timeOfArrival);
            }

        }
コード例 #44
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        /**
         * First checks whether the value has already been computed!
         * If so, don't compute it again!
         */
        protected override ulong computeVirtualFinishTime(MemoryRequest r)
        {
            if (bank[r.glob_b_index].get_cur_row() == r.r_index)
                if (VFTHitCache[r.request.requesterID, r.glob_b_index] != 0)
                {
                    return VFTHitCache[r.request.requesterID, r.glob_b_index];
                }
                else
                {
                    VFTHitCache[r.request.requesterID, r.glob_b_index] = base.computeVirtualFinishTime(r);
                    return VFTHitCache[r.request.requesterID, r.glob_b_index];
                }
            else  // if bank[r.bankIndex].getCurrentRowIndex() != r.rowIndex
            {
                if (VFTMissCache[r.request.requesterID, r.glob_b_index] != 0)
                {
                    return VFTMissCache[r.request.requesterID, r.glob_b_index];
                }
                else
                {
                    VFTMissCache[r.request.requesterID, r.glob_b_index] = base.computeVirtualFinishTime(r);
                    return VFTMissCache[r.request.requesterID, r.glob_b_index];
                }
            }

        }
コード例 #45
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
 public override bool issue_req(MemoryRequest request)
 {
     //if (curMarkedPerProcBank[request.threadID, request.bankIndex] < Simulator.BatchingCap)
     if (cur_period_marked_per_procbank[request.request.requesterID, request.glob_b_index] < Config.memory.batch_cap)
     {
         request.isMarked = true;
         cur_marked_per_procbank[request.request.requesterID, request.glob_b_index]++;
         cur_period_marked_per_procbank[request.request.requesterID, request.glob_b_index]++;
         cur_marked_req++;
         bank[request.glob_b_index].update_marking();
     }
     return base.issue_req(request);
 }
コード例 #46
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        /**
      * Returns true if r1 has higher priority than r2, if both requests are in the same bank!
      */
        protected bool higherFVDPriorityWB(MemoryRequest r1, MemoryRequest r2, ulong VFT1, ulong VFT2)
        {
            // 1. Priority: Read operation before Writeback 
            // 2. Priority: First-Virtual Deadline first (regardless of row-buffer)
            // 3. Priority: If from same thread -> use open row buffer first!
            bool isWriteback1 = (r1.type == MemoryRequestType.WB);
            bool isWriteback2 = (r2.type == MemoryRequestType.WB);

            if (isWriteback1 && !isWriteback2)
                return false;
            else if (!isWriteback1 && isWriteback2)
                return true;
            else if (r1.request.requesterID != r2.request.requesterID)
                return (VFT1 < VFT2);
            else  // if two requests from the same thread - probably prioritize open row hit first ?!
            {
                bool isRowHit1 = (r1.r_index == bank[r1.glob_b_index].get_cur_row());
                bool isRowHit2 = (r2.r_index == bank[r2.glob_b_index].get_cur_row());
                if (!isRowHit1 && isRowHit2)
                    return false;
                else if (isRowHit1 && !isRowHit2)
                    return true;
                else // either both the same row or both a different row!
                    return (r1.timeOfArrival < r2.timeOfArrival);

            }
        }
コード例 #47
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
 public override void remove_req(MemoryRequest request)
 {
     nrOfSamples[request.request.requesterID]++;
     updateIdealLatencies(request);
     // Remove the request from the buffer. 
     base.remove_req(request);
 }
コード例 #48
0
ファイル: MemoryCoalescing.cs プロジェクト: hirous/test
 // This is needed just because of the wacky binding/scoping rules used by
 // delegates; without this, you can't keep recursively wrapping delegates
 // within a loop.
 protected void WrapDelegates(MemoryRequest m1, MemoryRequest m2)
 {
     Simulator.Ready prev_cb = m1.cb;
     m1.cb = delegate() { prev_cb(); m2.cb(); };
 }
コード例 #49
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
 protected virtual void updateIdealLatencies(MemoryRequest request)
 {
     // Here, update latencies and currentRowBuffers!
     if (currentRowBuffer[request.request.requesterID, request.glob_b_index] == EMPTY_SLOT)
     {   // Row closed. 
         idealLatency[request.request.requesterID] += (ulong)(Config.memory.bus_busy_time + Config.memory.row_closed_latency);
         currentRowBuffer[request.request.requesterID, request.glob_b_index] = request.r_index;
     }
     else if (request.r_index == currentRowBuffer[request.request.requesterID, request.glob_b_index])
     {   // Row hit. 
         idealLatency[request.request.requesterID] += (ulong)(Config.memory.bus_busy_time + Config.memory.row_hit_latency);
     }
     else
     {   // Row conflict. 
         idealLatency[request.request.requesterID] += (ulong)(Config.memory.bus_busy_time + Config.memory.row_conflict_latency);
         currentRowBuffer[request.request.requesterID, request.glob_b_index] = request.r_index;
     }
 }
コード例 #50
0
ファイル: MemoryCoalescing.cs プロジェクト: hirous/test
 public Tuple3(int i1, ulong i2, MemoryRequest i3)
 {
     Item1 = i1;
     Item2 = i2;
     Item3 = i3;
 }
コード例 #51
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
 protected override void updateIdealLatencies(MemoryRequest request)
 {
     if (request.type != MemoryRequestType.WB)
     {
         base.updateIdealLatencies(request);
     }
 }
コード例 #52
0
ファイル: MemoryCoalescing.cs プロジェクト: hirous/test
 protected bool BankAvailable(MemoryRequest mreq)
 {
     return (mreq.shift_row == currentRow[mreq.channel_index,mreq.rank_index,mreq.bank_index]) ||
             whenAvailable[mreq.channel_index,mreq.rank_index,mreq.bank_index] <= Simulator.CurrentRound;
 }
コード例 #53
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        /**
         * Updates the Time-of-arrival queue and calls the base function
         */
        public override bool issue_req(MemoryRequest request)
        {
            bool success = base.issue_req(request);

            if (success)
            {
                // if it is the first request in the buffer issued by this thread, adjust arrivalTime
                if (oldestArrivalTime[request.request.requesterID] == EMPTY_SLOT)
                    oldestArrivalTime[request.request.requesterID] = request.timeOfArrival;

                // update queue
                toaQueue[request.request.requesterID, youngest[request.request.requesterID]] = request.timeOfArrival;
                youngest[request.request.requesterID]++;
                //     Console.WriteLine(Memory.memoryTime + "   Issue: " + youngest[request.threadID] + "   " + oldest[request.threadID] + "  " + request);
                if (youngest[request.request.requesterID] == queueSize) youngest[request.request.requesterID] = 0;
            }
            return success;
        }
コード例 #54
0
ファイル: MemSchedHelper.cs プロジェクト: hoangt/NOCulator
        public bool is_BATCH(MemoryRequest r1, MemoryRequest r2)
        {
            bool det = false;
            bool result;

            result = BATCH_A(r1, r2, ref det);
            if (det)
                return result;

            result = BATCH_B(r1, r2, ref det);
            if (det)
                return result;

            result = FR(r1, r2, ref det);
            if (det)
                return result;

            result = FCFS(r1, r2, ref det);
            return result;
        }
コード例 #55
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        /**
         * Selection procedure
         */
        protected void updateVMTSRegisters(MemoryRequest nextRequest)
        {
            int thread = nextRequest.request.requesterID;
            int bk = nextRequest.glob_b_index;

            // first update the oldest virtual arrival time!
            int cur = oldest[thread];
            int sentinel = oldest[thread] - 1;
            if (sentinel == -1) sentinel = queueSize - 1;
            while (nextRequest.timeOfArrival != toaQueue[thread, cur] && cur != sentinel)
            {
                cur++;
                if (cur == queueSize) cur = 0;
            }
            if (nextRequest.timeOfArrival != toaQueue[thread, cur])
                throw new Exception("Time-of-Arrival Queue has been corrupted. Entry is missing.");

            oldestArrivalTime[thread] = toaQueue[thread, oldest[thread]];

            /////// Reorder this after the recording of oldestArrivalTime[thread] = toaQueue[thread, oldest[thread]];
            toaQueue[thread, cur] = EMPTY_SLOT;
            //      Console.WriteLine(Memory.memoryTime + "   Remove1: " + youngest[thread] + "   " + oldest[thread] + "  " + nextRequest);

            while (toaQueue[thread, oldest[thread]] == EMPTY_SLOT && oldest[thread] != youngest[thread])
            {
                oldest[thread]++;
                if (oldest[thread] == queueSize) oldest[thread] = 0;
            }
            //       Console.WriteLine(Memory.memoryTime + "   Remove2: " + youngest[thread] + "   " + oldest[thread]);
            /////// End of reordering

            // now update the vtms registers
            int serviceTime = Config.memory.row_conflict_latency;
            if (nextRequest.r_index == bank[bk].get_cur_row()) serviceTime = Config.memory.row_hit_latency;

            // TODO: THIS IS ONLY TRUE IF PRIORITIES ARE EQUAL!!!
            if (Config.memory.use_weight > 0)
            {
                vtmsBankFinishTime[thread, bk] =
                    Math.Max(oldestArrivalTime[thread], vtmsBankFinishTime[thread, bk]) + (ulong)((double)serviceTime * (double)(1.0 / Config.memory.weight[thread]));
                vtmsBusFinishTime[thread] =
                    Math.Max(vtmsBankFinishTime[thread, bk], vtmsBusFinishTime[thread]) + (ulong)((double)Config.memory.bus_busy_time * (double)(1.0 / Config.memory.weight[thread]));
            }
            else
            {
                vtmsBankFinishTime[thread, bk] =
                    Math.Max(oldestArrivalTime[thread], vtmsBankFinishTime[thread, bk]) + (ulong)(serviceTime * Config.N);
                vtmsBusFinishTime[thread] =
                    Math.Max(vtmsBankFinishTime[thread, bk], vtmsBusFinishTime[thread]) + (ulong)(Config.memory.bus_busy_time * Config.N);
            }
            // ONUR
            //System.Console.WriteLine("T: " + thread + " B: " + bk + " Bank: " + vtmsBankFinishTime[thread, bk] + " Bus: " + vtmsBusFinishTime[thread] + " R0:" + currentNonWBPerProcBank[0,0] + " R1:" + currentNonWBPerProcBank[1,0] + " A0: " + currentLoadPerProcBank[0, 0] + " A1: " + currentLoadPerProcBank[1, 0]);

        }
コード例 #56
0
ファイル: OtherMemSched.cs プロジェクト: hoangt/NOCulator
        protected virtual ulong computeVirtualFinishTime(MemoryRequest r)
        {
            int serviceTime = Config.memory.row_conflict_latency;
            if (r.r_index == bank[r.glob_b_index].get_cur_row()) serviceTime = Config.memory.row_hit_latency;

            if (Config.memory.use_weight > 0)
            {
                return Math.Max(
                        Math.Max(
                            oldestArrivalTime[r.request.requesterID],
                            vtmsBankFinishTime[r.request.requesterID, r.glob_b_index]
                        ) + (ulong)((double)serviceTime * (double)(1.0 / Config.memory.weight[r.request.requesterID])),
                        vtmsBusFinishTime[r.request.requesterID]
                    ) + (ulong)((double)Config.memory.bus_busy_time * (double)(1.0 / Config.memory.weight[r.request.requesterID]));
            }
            else
            {
                return Math.Max(
                           Math.Max(
                               oldestArrivalTime[r.request.requesterID],
                               vtmsBankFinishTime[r.request.requesterID, r.glob_b_index]
                           ) + (ulong)(serviceTime * Config.N),
                           vtmsBusFinishTime[r.request.requesterID]
                       ) + (ulong)(Config.memory.bus_busy_time * Config.N);
            }
        }
コード例 #57
0
ファイル: Bank.cs プロジェクト: anderson1008/NOCulator
        /**
         * Progress time and (possibly) service the current request. 
         * Decrement the time left to fully service the current request.
         * If it reaches zero, service it and notify the processor.
         */
        public void tick()
        {
            //for (int i = 0; i < Config.N; i++){
                //Console.WriteLine(bank_id.ToString() + '\t' + i.ToString());
                //Simulator.stats.bank_queuedepth_persrc[bank_id, i].Add(outstandingReqs_perapp[i]);
            //}
            Simulator.stats.bank_queuedepth[bank_id].Add(outstandingReqs);

            //sanity check
            Debug.Assert((wait_left >= 0) && (wait_left <= Config.memory.row_conflict_latency + Config.memory.bus_busy_time));
            Debug.Assert(!(cur_req == null && wait_left != 0));
            Debug.Assert(!(cur_req != null && wait_left == 0));

            //decrement time left to serve current request
            if (wait_left > 0)
                wait_left--;

            //can't serve current request
            if (cur_req == null || wait_left != 0)
                return;

            //we can now serve the current request

            //Console.WriteLine("Request complete, sending reply");

            //serve request by removing current request from scheduler buffer
            sched.remove_req(cur_req);
            outstandingReqs--;
            if (outstandingReqs < 0)
                throw new Exception("Bank has negative number of requests!");
            outstandingReqs_perapp[cur_req.request.requesterID]--;
            if (outstandingReqs_perapp[cur_req.request.requesterID] < 0)
                throw new Exception("App has negative number of requests!");
            cur_req.cb();

            //send back the serviced request to cache (which in turn sends it to processor)
            Request request = cur_req.request;
            if (request == null) throw new Exception("No request! don't know who to send it back to!");
            //Console.WriteLine("Returning mc_data packet to cache slice at Proc {0}, ({1},{2})", mcaddrpacket.source.ID, mcaddrpacket.source.x, mcaddrpacket.source.y);


            CPU cpu = Simulator.network.nodes[request.requesterID].cpu;
            cpu.outstandingReqsMemory--;
            if (cpu.outstandingReqsMemory == 0)
            {
                Simulator.stats.memory_episode_persrc[request.requesterID].Add(Simulator.CurrentRound - cpu.outstandingReqsMemoryCycle);
                cpu.outstandingReqsMemoryCycle = Simulator.CurrentRound;
            }

            //----- STATS START -----
            stat.dec(ref BankStat.req_cnt[request.requesterID]);
            if (cur_req.isMarked)
                stat.dec(ref BankStat.marked_req_cnt[request.requesterID]);
            else
                stat.dec(ref BankStat.unmarked_req_cnt[request.requesterID]);
            //----- STATS END ------

            //reset current req
            cur_req = null;
        }