//------------------------------------------------- // start - called by interface_pre_start so we // can set ourselves up //------------------------------------------------- public void start(device_execute_interface execute, int linenum) { m_execute = execute; m_linenum = linenum; reset(); }
// for emergencies only! //------------------------------------------------- // eat_all_cycles - eat a ton of cycles on all // CPUs to force a quick exit //------------------------------------------------- public void eat_all_cycles() { for (device_execute_interface exec = m_execute_list; exec != null; exec = exec.m_nextexec) { exec.eat_cycles(1000000000); } }
int m_qindex; // index within the queue //------------------------------------------------- // device_input - constructor //------------------------------------------------- public device_input() { m_execute = null; m_linenum = 0; m_stored_vector = 0; m_curvector = 0; m_curstate = (u8)line_state.CLEAR_LINE; m_qindex = 0; Array.Clear(m_queue, 0, m_queue.Length); // std::fill(std::begin(m_queue), std::end(m_queue), 0); }
//------------------------------------------------- // start - called by interface_pre_start so we // can set ourselves up //------------------------------------------------- public void start(device_execute_interface execute, int linenum) { m_execute = execute; m_linenum = linenum; reset(); device_t device = m_execute.device(); device.save_item(m_stored_vector, "m_stored_vector", m_linenum); device.save_item(m_curvector, "m_curvector", m_linenum); device.save_item(m_curstate, "m_curstate", m_linenum); }
// scheduling helpers //------------------------------------------------- // compute_perfect_interleave - compute the // "perfect" interleave interval //------------------------------------------------- public void compute_perfect_interleave() { // ensure we have a list of executing devices if (m_execute_list == null) { rebuild_execute_list(); } // start with the first one device_execute_interface first = m_execute_list; if (first != null) { // start with a huge time factor and find the 2nd smallest cycle time attoseconds_t smallest = first.minimum_quantum(); attoseconds_t perfect = ATTOSECONDS_PER_SECOND - 1; for (device_execute_interface exec = first.m_nextexec; exec != null; exec = exec.m_nextexec) { // find the 2nd smallest cycle interval attoseconds_t curquantum = exec.minimum_quantum(); if (curquantum < smallest) { perfect = smallest; smallest = curquantum; } else if (curquantum < perfect) { perfect = curquantum; } } // if this is a new minimum quantum, apply it if (m_quantum_minimum != perfect) { // adjust all the actuals; this doesn't affect the current m_quantum_minimum = perfect; for (quantum_slot quant = m_quantum_list.first(); quant != null; quant = quant.next()) { quant.actual_set(std.max(quant.requested(), m_quantum_minimum)); } } } }
//------------------------------------------------- // apply_suspend_changes - applies suspend/resume // changes to all device_execute_interfaces //------------------------------------------------- void apply_suspend_changes() { u32 suspendchanged = 0; for (device_execute_interface exec = m_execute_list; exec != null; exec = exec.m_nextexec) { suspendchanged |= exec.m_suspend ^ exec.m_nextsuspend; exec.m_suspend = exec.m_nextsuspend; exec.m_nextsuspend &= ~SUSPEND_REASON_TIMESLICE; exec.m_eatcycles = exec.m_nexteatcycles; } // recompute the execute list if any CPUs changed their suspension state if (suspendchanged != 0) { rebuild_execute_list(); } else { m_suspend_changes_pending = false; } }
public attoseconds_t m_attoseconds_per_cycle; // attoseconds per adjusted clock cycle // construction/destruction //------------------------------------------------- // device_execute_interface - constructor //------------------------------------------------- public device_execute_interface(machine_config mconfig, device_t device) : base(device, "execute") { m_scheduler = null; m_disabled = false; m_vblank_interrupt = null; m_vblank_interrupt_screen = null; m_timed_interrupt = null; m_timed_interrupt_period = attotime.zero; m_nextexec = null; m_driver_irq = null; m_timedint_timer = null; m_profiler = profile_type.PROFILER_IDLE; m_icountptr = null; m_cycles_running = 0; m_cycles_stolen = 0; m_suspend = 0; m_nextsuspend = 0; m_eatcycles = 0; m_nexteatcycles = 0; m_trigger = 0; m_inttrigger = 0; m_totalcycles = 0; m_divisor = 0; m_divshift = 0; m_cycles_per_second = 0; m_attoseconds_per_cycle = 0; for (int line = 0; line < m_input.Length; line++) { m_input[line] = new device_input(); } // configure the fast accessor assert(device.interfaces().m_execute == null); device.interfaces().m_execute = this; }
//------------------------------------------------- // rebuild_execute_list - rebuild the list of // executing CPUs, moving suspended CPUs to the // end //------------------------------------------------- void rebuild_execute_list() { // if we haven't yet set a scheduling quantum, do it now if (m_quantum_list.empty()) { // set the core scheduling quantum, ensuring it's no longer than 60Hz attotime min_quantum = machine().config().maximum_quantum(attotime.from_hz(60)); // if the configuration specifies a device to make perfect, pick that as the minimum device_execute_interface exec = machine().config().perfect_quantum_device(); if (exec != null) { min_quantum = std.min(new attotime(0, exec.minimum_quantum()), min_quantum); } // inform the timer system of our decision add_scheduling_quantum(min_quantum, attotime.never); } // start with an empty list //device_execute_interface **active_tailptr = &m_execute_list; //*active_tailptr = NULL; // also make an empty list of suspended devices //device_execute_interface *suspend_list = NULL; //device_execute_interface **suspend_tailptr = &suspend_list; List <device_execute_interface> active_list = new List <device_execute_interface>(); List <device_execute_interface> suspend_list = new List <device_execute_interface>(); // iterate over all devices foreach (device_execute_interface exec in new execute_interface_enumerator(machine().root_device())) { // append to the appropriate list exec.m_nextexec = null; if (exec.m_suspend == 0) { //*active_tailptr = exec; //active_tailptr = &exec.m_nextexec; active_list.Add(exec); } else { //*suspend_tailptr = exec; //suspend_tailptr = &exec.m_nextexec; suspend_list.Add(exec); } } // append the suspend list to the end of the active list //*active_tailptr = suspend_list; active_list.AddRange(suspend_list); if (active_list.Count > 0) { m_execute_list = active_list[0]; for (int i = 0; i < active_list.Count; i++) { if (i < active_list.Count - 1) { active_list[i].m_nextexec = active_list[i + 1]; } else { active_list[i].m_nextexec = null; } } } }
// execution //------------------------------------------------- // timeslice - execute all devices for a single // timeslice //------------------------------------------------- public void timeslice() { bool call_debugger = (machine().debug_flags & DEBUG_FLAG_ENABLED) != 0; // build the execution list if we don't have one yet //if (UNEXPECTED(m_execute_list == null)) if (m_execute_list == null) { rebuild_execute_list(); } // if the current quantum has expired, find a new one while (m_basetime >= m_quantum_list.first().expire()) { m_quantum_allocator.reclaim(m_quantum_list.detach_head()); } // loop until we hit the next timer while (m_basetime < m_timer_list.expire()) { // by default, assume our target is the end of the next quantum attotime target = m_basetime + new attotime(0, m_quantum_list.first().actual()); // however, if the next timer is going to fire before then, override if (m_timer_list.expire() < target) { target = m_timer_list.expire(); } if (machine().video().frame_update_count() % 1000 == 0) { //LOG(("------------------\n")); LOG("device_scheduler.timeslice() - cpu_timeslice: target = {0}, m_timer_list.expire: {1}\n", target.as_string(), m_timer_list.expire().as_string()); } // do we have pending suspension changes? if (m_suspend_changes_pending) { apply_suspend_changes(); } // loop over all CPUs for (device_execute_interface exec = m_execute_list; exec != null; exec = exec.m_nextexec) { // only process if this CPU is executing or truly halted (not yielding) // and if our target is later than the CPU's current time (coarse check) if ((exec.m_suspend == 0 || exec.m_eatcycles > 0) && target.seconds() >= exec.m_localtime.seconds()) //if (EXPECTED((exec->m_suspend == 0 || exec->m_eatcycles) && target.seconds() >= exec->m_localtime.seconds())) { // compute how many attoseconds to execute this CPU attoseconds_t delta = target.attoseconds() - exec.m_localtime.attoseconds(); if (delta < 0 && target.seconds() > exec.m_localtime.seconds()) { delta += ATTOSECONDS_PER_SECOND; } assert(delta == (target - exec.m_localtime).as_attoseconds()); if (exec.m_attoseconds_per_cycle == 0) { exec.m_localtime = target; } // if we have enough for at least 1 cycle, do the math else if (delta >= exec.m_attoseconds_per_cycle) { // compute how many cycles we want to execute int ran = exec.m_cycles_running = (int)divu_64x32((u64)delta >> exec.m_divshift, (u32)exec.m_divisor); if (machine().video().frame_update_count() % 1000 == 0) { LOG("device_scheduler.timeslice() - cpu '{0}': {1} ({2} cycles)\n", exec.device().tag(), delta, exec.m_cycles_running); } // if we're not suspended, actually execute if (exec.m_suspend == 0) { g_profiler.start(exec.m_profiler); // note that this global variable cycles_stolen can be modified // via the call to cpu_execute exec.m_cycles_stolen = 0; m_executing_device = exec; exec.m_icountptr.i = exec.m_cycles_running; // *exec->m_icountptr = exec->m_cycles_running; if (!call_debugger) { exec.run(); } else { exec.debugger_start_cpu_hook(target); exec.run(); exec.debugger_stop_cpu_hook(); } // adjust for any cycles we took back //throw new emu_unimplemented(); #if false assert(ran >= *exec->m_icountptr); #endif ran -= exec.m_icountptr.i; //ran -= *exec->m_icountptr; //throw new emu_unimplemented(); #if false assert(ran >= exec->m_cycles_stolen); #endif ran -= exec.m_cycles_stolen; g_profiler.stop(); } // account for these cycles exec.m_totalcycles += (u64)ran; // update the local time for this CPU attotime deltatime; if (ran < exec.m_cycles_per_second) { deltatime = new attotime(0, exec.m_attoseconds_per_cycle * ran); } else { u32 remainder; s32 secs = (s32)divu_64x32_rem((u64)ran, exec.m_cycles_per_second, out remainder); deltatime = new attotime(secs, remainder * exec.m_attoseconds_per_cycle); } assert(deltatime >= attotime.zero); exec.m_localtime += deltatime; if (machine().video().frame_update_count() % 100 == 0) { LOG("device_scheduler.timeslice() - {0} ran, {1} total, time = {2}\n", ran, exec.m_totalcycles, exec.m_localtime.as_string()); } // if the new local CPU time is less than our target, move the target up, but not before the base if (exec.m_localtime < target) { target = std.max(exec.m_localtime, m_basetime); if (machine().video().frame_update_count() % 1000 == 0) { LOG("device_scheduler.timeslice() - (new target)\n"); } } } } } m_executing_device = null; // update the base time m_basetime = target; } // execute timers execute_timers(); }