From 657dc3df0f9961e55c65cfc6b1e5a3893f591e9f Mon Sep 17 00:00:00 2001 From: Christian Vogelgsang Date: Tue, 8 Oct 2024 22:06:57 +0200 Subject: [PATCH] added wait/signals/forbid/permit to scheduler --- amitools/vamos/schedule/scheduler.py | 42 ++++-- amitools/vamos/schedule/task.py | 67 ++++++++- pyproject.toml | 2 +- test/unit/schedule_scheduler.py | 201 ++++++++++++++++++++++++++- 4 files changed, 298 insertions(+), 14 deletions(-) diff --git a/amitools/vamos/schedule/scheduler.py b/amitools/vamos/schedule/scheduler.py index 3dbeebf0..ca457cb7 100644 --- a/amitools/vamos/schedule/scheduler.py +++ b/amitools/vamos/schedule/scheduler.py @@ -58,18 +58,23 @@ def schedule(self): if self.get_num_tasks() == 0: break - # find a task to run - task = self._find_run_task() - if task is None: - log_schedule.error("schedule(): no task to run?!") - return False + # has the current task forbid state? + if self.cur_task and self.cur_task.is_forbidden(): + log_schedule.debug("run: keep current (forbid state)") + task = self.cur_task + else: + # find a task to run + task = self._find_run_task() + if task is None: + log_schedule.error("schedule(): no task to run?!") + return False # current tasks stays the same? # no context switch required. simply switch to it if task == self.cur_task: self.num_switch_same += 1 log_schedule.debug("run: current %s", task.name) - task.switch() + task.keep_scheduled() else: self.num_switch_other += 1 # switch out old @@ -88,9 +93,13 @@ def schedule(self): log_schedule.debug("run: switch in %s", task.name) task.restore_ctx() - task.switch() + # enter greenlet of task and resume it + task.switch() + + # end of scheduling self._make_current(None) + log_schedule.info( "schedule(): done (switches: same=%d, other=%d)", self.num_switch_same, @@ -121,6 +130,21 @@ def _make_current(self, task): if self.cur_task_hook: self.cur_task_hook(task) + def wait_task(self, task): + """set the given task into wait state""" + log_schedule.debug("wait_task: task %s", task.name) + self.waiting_tasks.append(task) + self.reschedule(task) + + def wake_up_task(self, task): + """take task from waiting list and allow him to schedule""" + log_schedule.debug("wake_up_task: task %s", task.name) + self.waiting_tasks.remove(task) + # add to front + self.ready_tasks.insert(0, task) + # directly reschedule + self.reschedule(task) + def add_task(self, task): """add a new task and prepare for execution. @@ -134,7 +158,7 @@ def add_task(self, task): return True def rem_task(self, task): - # find task: is it current? + # find task: is it current? removing myself... if self.cur_task == task: self.cur_task = None # in ready list? @@ -148,7 +172,7 @@ def rem_task(self, task): self.added_tasks.remove(task) # not found else: - log_schedule.warn("rem_task: unknown task %s", task.name) + log_schedule.warning("rem_task: unknown task %s", task.name) return False # mark as removed task.set_state(TaskState.TS_REMOVED) diff --git a/amitools/vamos/schedule/task.py b/amitools/vamos/schedule/task.py index 82599d03..120abebd 100644 --- a/amitools/vamos/schedule/task.py +++ b/amitools/vamos/schedule/task.py @@ -28,6 +28,10 @@ def __init__(self, name, machine): self.exit_code = None self.glet = None self.cpu_ctx = None + self.forbid_cnt = 0 + self.forbid_reschedule = False + self.sigmask_wait = 0 + self.sigmask_received = 0 def __repr__(self): return "TaskBase(%r, %s)" % (self.name, self.state) @@ -60,6 +64,65 @@ def reschedule(self): """give up this tasks execution and allow the scheduler to run another task""" self.scheduler.reschedule(self) + def forbid(self): + """do not switch away from my task until permit() is called""" + self.forbid_cnt += 1 + + def permit(self): + """re-enable task switching""" + self.forbid_cnt -= 1 + if self.forbid_cnt == 0: + # if a schedule was triggered during the forbid state then reschedule + if self.forbid_reschedule: + self.forbid_reschedule = 0 + self.reschedule() + + def is_forbidden(self): + return self.forbid_cnt > 0 + + def keep_scheduled(self): + """notified by scheduler that the task stays scheduled""" + if self.forbid_cnt > 0: + # set a flag that we scheduled and kept the task + self.forbid_reschedule = True + + def wait(self, sigmask): + """suspend the task in wait state""" + # there are already matching signals set + # return without waiting + got_mask = self.sigmask_received & sigmask + if got_mask != 0: + return got_mask + + # set our wait mask + self.sigmask_wait = sigmask + + # go waiting. reschedule + self.scheduler.wait_task(self) + + # we will return here if we were removed from waiting list + # and our got mask is not empty anymore + got_mask = self.sigmask_received & sigmask + + # now reset wait mask + self.sigmask_wait = 0 + + return got_mask + + def get_signal(self): + return self.sigmask_received + + def set_signal(self, new_signals, sigmask): + """set some of our signals""" + self.sigmask_received = new_signals | (self.sigmask_received & ~sigmask) + # check if task is waiting for some of the signals + if self.sigmask_wait != 0: + got_mask = self.sigmask_received & self.sigmask_wait + if got_mask != 0: + self.scheduler.wake_up_task(self) + # return current mask + return self.sigmask_received + def free(self): """clean up task resources, e.g. stack""" pass @@ -78,12 +141,12 @@ def switch(self): def save_ctx(self): if self.runtime.is_running(): log_schedule.debug("%s: save cpu context", self) - self.cpu_ctx = self.machine.cpu.get_context() + self.cpu_ctx = self.machine.cpu.get_cpu_context() def restore_ctx(self): if self.runtime.is_running(): log_schedule.debug("%s: restore cpu context", self) - self.machine.cpu.set_context(self.cpu_ctx) + self.machine.cpu.set_cpu_context(self.cpu_ctx) class NativeTask(TaskBase): diff --git a/pyproject.toml b/pyproject.toml index fe14be9d..9ea0b013 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ dynamic = ["version", "readme"] [project.optional-dependencies] -vamos = ["machine68k"] +vamos = ["machine68k", "greenlet"] [tool.setuptools] zip-safe = false diff --git a/test/unit/schedule_scheduler.py b/test/unit/schedule_scheduler.py index 57c6fbb4..2b21dbeb 100644 --- a/test/unit/schedule_scheduler.py +++ b/test/unit/schedule_scheduler.py @@ -5,9 +5,9 @@ from amitools.vamos.machine.regs import * -def setup(): +def setup(slice_cycles=1000): machine = Machine() - sched = Scheduler(machine) + sched = Scheduler(machine, slice_cycles=slice_cycles) alloc = MemoryAlloc.for_machine(machine) return machine, sched, alloc @@ -104,3 +104,200 @@ def trap(op, pc): assert alloc.is_all_free() machine.cleanup() assert tasks == [task, None] + + +def schedule_scheduler_native_task_remove_test(): + tasks = [] + my_task = None + + def cb(task): + tasks.append(task) + + machine, sched, alloc = setup() + sched.set_cur_task_callback(cb) + mem = alloc.get_mem() + pc = machine.get_scratch_begin() + + def trap(op, pc): + # remove my task to end execution + sched.rem_task(my_task) + + addr = machine.setup_quick_trap(trap) + + # task setup + mem.w16(pc, op_jmp) + mem.w32(pc + 2, addr) + mem.w16(pc + 6, op_jmp) + mem.w32(pc + 8, pc) + + task = create_native_task(machine, alloc, pc, {REG_D0: 42}) + my_task = task + + # add task + assert sched.add_task(task) + # run scheduler + sched.schedule() + exit_code = task.get_exit_code() + assert exit_code == 42 + assert alloc.is_all_free() + machine.cleanup() + assert tasks == [task, None] + + +def schedule_scheduler_native_task_multi_test(): + tasks = [] + my_task = None + + def cb(task): + tasks.append(task) + + machine, sched, alloc = setup(slice_cycles=30) + sched.set_cur_task_callback(cb) + mem = alloc.get_mem() + pc = machine.get_scratch_begin() + + def trap1(op, pc): + pass + + def trap2(op, pc): + pass + + addr1 = machine.setup_quick_trap(trap1) + addr2 = machine.setup_quick_trap(trap2) + + # task1 setup + mem.w16(pc, op_jmp) + mem.w32(pc + 2, addr1) + off = pc + 6 + for i in range(100): + mem.w16(off, op_nop) + off += 2 + mem.w16(off, op_jmp) + mem.w32(off + 2, addr2) + mem.w16(off + 6, op_rts) + + task1 = create_native_task(machine, alloc, pc, {REG_D0: 42}, name="task1") + my_task = task1 + + # task2 setup + pc2 = off + 8 + mem.w16(pc2, op_rts) + + task2 = create_native_task(machine, alloc, pc2, {REG_D0: 23}, name="task2") + + # add task + assert sched.add_task(task1) + assert sched.add_task(task2) + # run scheduler + sched.schedule() + assert task1.get_exit_code() == 42 + assert task2.get_exit_code() == 23 + assert alloc.is_all_free() + machine.cleanup() + # check that the tasks switched + assert tasks == [task1, task2, task1, None] + + +def schedule_scheduler_native_task_multi_forbid_test(): + tasks = [] + my_task = None + + def cb(task): + tasks.append(task) + + machine, sched, alloc = setup(slice_cycles=30) + sched.set_cur_task_callback(cb) + mem = alloc.get_mem() + pc = machine.get_scratch_begin() + + def trap1(op, pc): + my_task.forbid() + + def trap2(op, pc): + my_task.permit() + + addr1 = machine.setup_quick_trap(trap1) + addr2 = machine.setup_quick_trap(trap2) + + # task1 setup + mem.w16(pc, op_jmp) + mem.w32(pc + 2, addr1) + off = pc + 6 + for i in range(100): + mem.w16(off, op_nop) + off += 2 + mem.w16(off, op_jmp) + mem.w32(off + 2, addr2) + mem.w16(off + 6, op_rts) + + task1 = create_native_task(machine, alloc, pc, {REG_D0: 42}, name="task1") + my_task = task1 + + # task2 setup + pc2 = off + 8 + mem.w16(pc2, op_rts) + + task2 = create_native_task(machine, alloc, pc2, {REG_D0: 23}, name="task2") + + # add task + assert sched.add_task(task1) + assert sched.add_task(task2) + # run scheduler + sched.schedule() + assert task1.get_exit_code() == 42 + assert task2.get_exit_code() == 23 + assert alloc.is_all_free() + machine.cleanup() + # check that the tasks switched + assert tasks == [task1, task2, None] + + +def schedule_scheduler_native_task_multi_wait_test(): + tasks = [] + my_task = None + + def cb(task): + tasks.append(task) + + machine, sched, alloc = setup(slice_cycles=30) + sched.set_cur_task_callback(cb) + mem = alloc.get_mem() + pc = machine.get_scratch_begin() + + def trap1(op, pc): + got = my_task.wait(3) + assert got == 1 + + def trap2(op, pc): + my_task.set_signal(1, 1) + + addr1 = machine.setup_quick_trap(trap1) + addr2 = machine.setup_quick_trap(trap2) + + # task1 setup + mem.w16(pc, op_jmp) + mem.w32(pc + 2, addr1) + mem.w16(pc + 6, op_rts) + + task1 = create_native_task(machine, alloc, pc, {REG_D0: 42}, name="task1") + my_task = task1 + + # task2 setup + pc2 = pc + 8 + mem.w16(pc2, op_jmp) + mem.w32(pc2 + 2, addr2) + mem.w16(pc2 + 6, op_rts) + + task2 = create_native_task(machine, alloc, pc2, {REG_D0: 23}, name="task2") + + # add task + assert sched.add_task(task1) + assert sched.add_task(task2) + # run scheduler + sched.schedule() + assert task1.get_exit_code() == 42 + assert task2.get_exit_code() == 23 + assert alloc.is_all_free() + machine.cleanup() + # check that the tasks switched + assert tasks == [task1, task2, task1, task2, None]