/// <summary> /// Return true if the memory request type is a Fetch. /// </summary> public bool IsFetch(MemoryCycle c) { return(c == MemoryCycle.Fetch || c == MemoryCycle.Fetch2 || c == MemoryCycle.Fetch4 || c == MemoryCycle.Fetch4R); }
/// <summary> /// Requests a specific memory cycle type at the specified address. Here we route /// the request to the separate fetch and store queues, which vastly simplifies the /// complicated overlapped operation of RasterOp. /// </summary> /// <param name="cycleType">Any Fetch or Store type</param> /// <param name="address">Starting address</param> /// <param name="id">Transaction ID</param> public void RequestMemoryCycle(long id, int address, MemoryCycle cycleType) { #if TRACING_ENABLED if (Trace.TraceOn) { Trace.Log(LogType.MemoryState, "\nMemory: Requested {0} cycle in T{1} ID={2} addr={3:x5}", cycleType, _Tstate, id, address); } #endif // // Queue up the request. We're in no-man's land at the bottom of the CPU cycle, // but the queue controller will have stalled the processor until the correct // time, which will start at the next Tick(). In some cases (buggy microcode) // the hardware would actually ignore memory references; we don't do that here, // but read all the gory details in the comments at the end of MemoryController.cs. // if (IsFetch(cycleType)) { _mdiQueue.Request(id, address, cycleType); } else { _mdoQueue.Request(id, address, cycleType); } }
/// <summary> /// Clocks this memory queue's state machine, setting flags appropriately for /// the current running request, or setting up for the next one. Called from /// Memory.Tick(), this executes at the top of the microcycle, so it may abort /// the current instruction if a new request is issued at the wrong time. /// </summary> public void Clock(MemoryCycle nextCycle) { #if TRACING_ENABLED if (Trace.TraceOn) { Trace.Log(LogType.MemoryState, "{0} queue IN: Clock T{1} cycle={2} bkm={3} next={4} state={5} next={6}", _name, Tstate, _current.CycleType, _bookmark, nextCycle, _state, _nextState); } #endif // Update the current op Recognize(); // Update state and set flags for this cycle RunStateMachine(); // Update bookmarks for the next cycle UpdateBookmarks(nextCycle); #if TRACING_ENABLED if (Trace.TraceOn) { Trace.Log(LogType.MemoryState, "{0} queue OUT: Clock T{1} cycle={2} bkm={3} next={4} state={5} next={6}", _name, Tstate, _current.CycleType, _bookmark, nextCycle, _state, _nextState); } #endif }
/// <summary> /// Accept a new memory request (at the bottom of the CPU cycle, after R is /// computed). Because the CPU now aborts until the correct cycle when issuing /// new memory operations, we simply latch the new request and let the state /// machine mechanism do all the right magic at the next Clock(). /// </summary> public void Request(long id, int startAddr, MemoryCycle cycleType) { #if DEBUG if (_pending.Active) { Console.WriteLine("{0} queue: ** new Request() when _pending already Active?", _name); } #endif _pending.RequestID = id; _pending.StartAddress = startAddr; _pending.CycleType = cycleType; _pending.Active = true; _pending.Bookmark = _nextBookmark; }
/// <summary> /// First half of the memory cycle: clocks the state counter, clocks the MDI and /// MDO queues, then sets up executes the current Fetch (if any). Asserts the /// Wait signal if the CPU should abort this cycle. /// </summary> public void Tick(MemoryCycle cycleType) { // Bump cycle counter _Tstate = (_Tstate + 1) & 0x3; #if TRACING_ENABLED if (Trace.TraceOn) { Trace.Log(LogType.MemoryState, "\nMemory: Tick! T{0} cycle={1}", _Tstate, cycleType); } #endif // Segregate Fetches and Stores into separate queues if (IsFetch(cycleType)) { _mdiQueue.Clock(cycleType); _mdoQueue.Clock(MemoryCycle.None); } else { _mdiQueue.Clock(MemoryCycle.None); _mdoQueue.Clock(cycleType); } ExecuteFetch(); // // Set the wait flag if we need to abort the current instruction. If // output is pending, we never wait; otherwise we let the conbined status // of the request queues determine our result. // if (MDONeeded) { _wait = false; } else { _wait = _mdiQueue.Wait || _mdoQueue.Wait; } }
/// <summary> /// Sets bookmarks for the next cycle, and modifies the current one if necessary. /// WARNING: THIS IS WHERE THE SAUSAGE IS MADE. /// </summary> private void UpdateBookmarks(MemoryCycle nextCycle) { if (nextCycle == MemoryCycle.None) { // If no active or pending op, reset our bookmark if (!_current.Active && !_pending.Active) { _bookmark = 0; } } else { // This microinstruction specifies a new memory request: initialize // the next bookmark value based on the request type int book = (int)nextCycle; // // Special cases for RasterOp // if (RasterOp.Instance.Enabled) { if (Tstate == 0) { // First: we're allowed to issue Store4/4R in T0, ahead of the // usual T3. So we tweak the cycle type to index the bookmark // ROM with the modified timings. if (nextCycle == MemoryCycle.Store4R) { book = 0x2; // "RopStore4R" } else if (nextCycle == MemoryCycle.Store4) { book = 0x4; // "RopStore4" } } else if (Tstate == 3) { // // Second: Fetch4/4Rs are issued back-to-back (in the correct t3) // but must NOT introduce the possible CPU abort of a WaitT2 state; // MDI must remain valid AND the index values must count down correctly // for the operation in progress, so that after the t0,t1 complete the // next op's four words arrive in the four subsequent Tstates. This // introduces two additional fake cycle types, as with the case above. Ugh.. // if (_current.CycleType == MemoryCycle.Fetch4R && nextCycle == MemoryCycle.Fetch4R) { _bookmark = book = 0x1; // "RopFetch4R" } else if (_current.CycleType == MemoryCycle.Fetch4 && nextCycle == MemoryCycle.Fetch4) { _bookmark = book = 0x3; // "RopFetch4" } } } // For RasterOp special cases, use the modified bookmark for the entire cycle _nextBookmark = book; // // Special cases for indirect or overlapped Fetches (non-RasterOp) // if (MemoryBoard.Instance.IsFetch(nextCycle)) { // // Back-to-back Fetch or Fetch2 requests present unique timing challenges // (and allow a small performance boost by eliminating some wait states). // To accommodate this with as little embarrassment as possible, we use a // transitional bookmark value to cover the overlap. For a Fetch, this may // terminate the op early, invalidating one or more time slots where MDI is // valid (and forcing a CPU wait so that incorrect data is not returned). // In other cases we have to let the current op retire normally but drop // immediately into a WaitT2 (rather than WaitT3) for the new op. There's // no pretty way to deal with this... // // Gory details in the comments below. Look away now for "plausible deniability". // if (_current.CycleType == MemoryCycle.Fetch || _current.CycleType == MemoryCycle.Fetch2) { book = 0x6; // "IndFetch" covers the overlap... _bookmark = book; // ...force immediate switch for the (t2,t3)... _nextBookmark = (int)nextCycle; // ...but switch back to the real cycle type in Request() } else if (_current.CycleType == MemoryCycle.Fetch4 && !RasterOp.Instance.Enabled) { book = 0x7; // "IndFetch4" is for the specific case of a RefillOp _bookmark = book; // followed immediately by another Fetch; can't clobber the _nextBookmark = (int)nextCycle; // last index word, or the last two OpFile bytes are screwed } } // Get a new set of flags -- these may modify the current cycle! BookmarkEntry flags = GetBookmarkEntry(book, _nextState); #if DEBUG // If the Recognize flag is not set, we're really out in left field... // ... but all of this can go away entirely once things are fully debugged. if (!flags.Recognize) { Console.WriteLine("-->\t{0} queue: Recognize not set for new {1} request in T{2}!", _name, nextCycle, Tstate); // If the Abort flag isn't set either, our BKM16 ROM is buggy; force an // abort and just hope for the best? if (!flags.Abort) { Console.WriteLine("-->\tForced abort in T{0} due to new request in wrong cycle.", Tstate); Console.WriteLine("\tFlags: {0}", flags); DumpQueue(); flags.Abort = true; // PERQSystem.Instance.Break(); } } #endif // If the done flag is set, retire the current op (may be early, // if a Fetch is overlapped) if (flags.Complete) { #if TRACING_ENABLED if (Trace.TraceOn) { Trace.Log(LogType.MemoryState, "{0} queue: Terminated {1}", _name, _current); } #endif _current.Clear(); } // Set the wait and next state based on the new flags _wait = flags.Abort; _nextState = flags.NextState; } }