Skip to content

Commit 11dbaa9

Browse files
committed
fix:ruff
1 parent 61cb395 commit 11dbaa9

File tree

2 files changed

+23
-23
lines changed

2 files changed

+23
-23
lines changed

scheduler/worker/scheduler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def start(self) -> None:
9393

9494
def request_stop_and_wait(self) -> None:
9595
"""Toggle self._stop_requested that's checked on every loop"""
96-
self.log(DEBUG, f"Stop Scheduler requested")
96+
self.log(DEBUG, "Stop Scheduler requested")
9797
self._stop_requested = True
9898
if self._thread is not None:
9999
self._thread.join()

scheduler/worker/worker.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ def clean_registries(self) -> None:
182182
def _install_signal_handlers(self) -> None:
183183
"""Installs signal handlers for handling SIGINT and SIGTERM gracefully."""
184184
if threading.current_thread() is not threading.main_thread():
185-
self.log(DEBUG, f"Running in a thread, skipping signal handlers installation")
185+
self.log(DEBUG, "Running in a thread, skipping signal handlers installation")
186186
return
187187
signal.signal(signal.SIGINT, self.request_stop)
188188
signal.signal(signal.SIGTERM, self.request_stop)
@@ -213,14 +213,14 @@ def work(self, max_jobs: Optional[int] = None, max_idle_time: Optional[int] = No
213213
self.run_maintenance_tasks()
214214

215215
if self._model.shutdown_requested_date:
216-
self.log(INFO, f"stopping on request")
216+
self.log(INFO, "stopping on request")
217217
break
218218

219219
timeout = None if self.burst else (SCHEDULER_CONFIG.DEFAULT_WORKER_TTL - 15)
220220
job, queue = self.dequeue_job_and_maintain_ttl(timeout, max_idle_time)
221221
if job is None or queue is None:
222222
if self.burst:
223-
self.log(INFO, f"done, quitting")
223+
self.log(INFO, "done, quitting")
224224
break
225225
elif max_idle_time is not None:
226226
self.log(INFO, f"idle for {max_idle_time} seconds, quitting")
@@ -239,13 +239,13 @@ def work(self, max_jobs: Optional[int] = None, max_idle_time: Optional[int] = No
239239
return self._model.completed_jobs > 0
240240

241241
except TimeoutErrorTypes:
242-
self.log(ERROR, f"Redis connection timeout, quitting...")
242+
self.log(ERROR, "Redis connection timeout, quitting...")
243243
except StopRequested:
244-
self.log(INFO, f"Worker was requested to stop, quitting")
244+
self.log(INFO, "Worker was requested to stop, quitting")
245245
except SystemExit: # Cold shutdown detected
246246
raise
247247
except Exception:
248-
self.log(ERROR, f"found an unhandled exception, quitting...", exc_info=True)
248+
self.log(ERROR, "found an unhandled exception, quitting...", exc_info=True)
249249
finally:
250250
self.teardown()
251251
return False
@@ -264,10 +264,10 @@ def handle_job_failure(self, job: JobModel, queue: Queue, exc_string: str = "")
264264
stopped_job_name = self._model.get_field("stopped_job_name", self.connection)
265265
self._model.current_job_name = None
266266
if stopped_job_name == job.name:
267-
self.log(DEBUG, f"Job was stopped, setting status to STOPPED")
267+
self.log(DEBUG, "Job was stopped, setting status to STOPPED")
268268
new_job_status = JobStatus.STOPPED
269269
else:
270-
self.log(DEBUG, f"Job has failed, setting status to FAILED")
270+
self.log(DEBUG, "Job has failed, setting status to FAILED")
271271
new_job_status = JobStatus.FAILED
272272

273273
queue.job_handle_failure(new_job_status, job, exc_string)
@@ -309,12 +309,12 @@ def _check_for_suspension(self, burst: bool) -> None:
309309
while self._model.is_suspended:
310310
if burst:
311311
self.log(
312-
INFO, f"Suspended in burst mode, exiting, Note: There could still be unfinished jobs on the queue"
312+
INFO, "Suspended in burst mode, exiting, Note: There could still be unfinished jobs on the queue"
313313
)
314314
raise StopRequested()
315315

316316
if not notified:
317-
self.log(INFO, f"Worker suspended, trigger ResumeCommand")
317+
self.log(INFO, "Worker suspended, trigger ResumeCommand")
318318
before_state = self._model.state
319319
self._model.set_field("state", WorkerStatus.SUSPENDED, connection=self.connection)
320320
notified = True
@@ -332,14 +332,14 @@ def run_maintenance_tasks(self) -> None:
332332
if not self.with_scheduler:
333333
return
334334
if self.scheduler is None and self.with_scheduler:
335-
self.log(DEBUG, f"Creating scheduler")
335+
self.log(DEBUG, "Creating scheduler")
336336
self.scheduler = WorkerScheduler(self.queues, worker_name=self.name, connection=self.connection)
337337
if self.scheduler.status == SchedulerStatus.STOPPED:
338-
self.log(DEBUG, f"Starting scheduler thread")
338+
self.log(DEBUG, "Starting scheduler thread")
339339
self.scheduler.start()
340340
self._model.has_scheduler = True
341341
if self.burst:
342-
self.log(DEBUG, f"Stopping scheduler thread (burst mode)")
342+
self.log(DEBUG, "Stopping scheduler thread (burst mode)")
343343
self.scheduler.request_stop_and_wait()
344344
self._model.has_scheduler = False
345345
self._model.save(connection=self.connection)
@@ -407,7 +407,7 @@ def _validate_name_uniqueness(self) -> None:
407407

408408
def worker_start(self) -> None:
409409
"""Registers its own birth."""
410-
self.log(DEBUG, f"Registering birth")
410+
self.log(DEBUG, "Registering birth")
411411
now = utcnow()
412412
self._model.birth = now
413413
self._model.last_heartbeat = now
@@ -426,7 +426,7 @@ def _kill_job_execution_process(self, sig: signal.Signals = SIGKILL) -> None:
426426
except OSError as e:
427427
if e.errno != errno.ESRCH: # "No such process" is fine with us
428428
raise
429-
self.log(DEBUG, f"Job execution process already dead")
429+
self.log(DEBUG, "Job execution process already dead")
430430

431431
def _wait_for_job_execution_process(self) -> Tuple[Optional[int], Optional[int]]:
432432
"""Waits for the job execution process to complete.
@@ -449,10 +449,10 @@ def request_force_stop(self, signum: int, frame: Optional[FrameType]) -> None:
449449
# when a user hits Ctrl+C. In this case, if we receive the second signal within 1 second, we ignore it.
450450
shutdown_date = self._model.shutdown_requested_date
451451
if shutdown_date is not None and (utcnow() - shutdown_date) < timedelta(seconds=1):
452-
self.log(DEBUG, f"Shutdown signal ignored, received twice in less than 1 second")
452+
self.log(DEBUG, "Shutdown signal ignored, received twice in less than 1 second")
453453
return
454454

455-
self.log(WARNING, f"Could shut down")
455+
self.log(WARNING, "Could shut down")
456456

457457
# Take down the job execution process with the worker
458458
if self._model.job_execution_process_pid:
@@ -472,7 +472,7 @@ def request_stop(self, signum: int, frame: Optional[FrameType]) -> None:
472472
signal.signal(signal.SIGINT, self.request_force_stop)
473473
signal.signal(signal.SIGTERM, self.request_force_stop)
474474

475-
self.log(INFO, f"warm shut down requested")
475+
self.log(INFO, "warm shut down requested")
476476

477477
self.stop_scheduler()
478478
# If shutdown is requested in the middle of a job, wait until finish before shutting down and save the request.
@@ -481,7 +481,7 @@ def request_stop(self, signum: int, frame: Optional[FrameType]) -> None:
481481

482482
self.log(
483483
DEBUG,
484-
f"Stopping after current job execution process is finished. Press Ctrl+C again for a cold shutdown.",
484+
"Stopping after current job execution process is finished. Press Ctrl+C again for a cold shutdown.",
485485
)
486486
else:
487487
raise StopRequested()
@@ -526,7 +526,7 @@ def stop_scheduler(self) -> None:
526526
return
527527
self.log(INFO, f"Stopping scheduler thread {self.scheduler.pid}")
528528
self.scheduler.request_stop_and_wait()
529-
self.log(DEBUG, f"Scheduler thread stopped")
529+
self.log(DEBUG, "Scheduler thread stopped")
530530
self.scheduler = None
531531

532532
def refresh(self, update_queues: bool = False) -> None:
@@ -626,10 +626,10 @@ def monitor_job_execution_process(self, job: JobModel, queue: Queue) -> None:
626626
stopped_job_name = self._model.get_field("stopped_job_name", self.connection)
627627

628628
if job_status is None:
629-
self.log(WARNING, f"Job status is None, completed and expired?")
629+
self.log(WARNING, "Job status is None, completed and expired?")
630630
return
631631
elif stopped_job_name == job.name: # job execution process killed deliberately
632-
self.log(WARNING, f"Job stopped by user, moving job to failed-jobs-registry")
632+
self.log(WARNING, "Job stopped by user, moving job to failed-jobs-registry")
633633
job.call_stopped_callback()
634634
self.handle_job_failure(
635635
job, queue=queue, exc_string="Job stopped by user, job execution process terminated."

0 commit comments

Comments
 (0)