diff --git a/jobrunner/queries.py b/jobrunner/queries.py index 6d301a58..b0ea228f 100644 --- a/jobrunner/queries.py +++ b/jobrunner/queries.py @@ -4,7 +4,7 @@ from operator import attrgetter from jobrunner.lib.database import find_all, find_one, find_where, upsert -from jobrunner.models import Flag, Job, State +from jobrunner.models import Flag, Job def calculate_workspace_state(workspace): @@ -19,10 +19,6 @@ def calculate_workspace_state(workspace): for action, jobs in group_by(all_jobs, attrgetter("action")): if action == "__error__": continue - # Remove running jobs from the list - this function is used to compare a - # job to a previously completed job, if we include running jobs here - # we will compare a job to itself (with hilarious consequences!) - jobs = filter(lambda x: x.state != State.RUNNING, jobs) ordered_jobs = sorted(jobs, key=attrgetter("created_at"), reverse=True) latest_jobs.append(ordered_jobs[0]) return latest_jobs diff --git a/jobrunner/run.py b/jobrunner/run.py index 58c94dda..0d2e2862 100644 --- a/jobrunner/run.py +++ b/jobrunner/run.py @@ -380,12 +380,8 @@ def handle_job(job, api, mode=None, paused=None): # final state - we have finished! results = api.get_results(job_definition) - # We need to calculate obsolete before saving the results, as when we save - # the job will move to State.SUCCEEDED & then `calculate_workspace_state()` - # will mean that we are comparing results.outputs to itself (instead of - # the previous job) - obsolete = get_obsolete_files(job_definition, results.outputs) save_results(job, job_definition, results) + obsolete = get_obsolete_files(job_definition, results.outputs) if obsolete: errors = api.delete_files(job_definition.workspace, Privacy.HIGH, obsolete)