From 9424e3f383e04a16c95a57ef1f60237ef3889374 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Sep 2023 20:54:06 -0700 Subject: [PATCH] Bump ruff from 0.0.270 to 0.0.290 (#1331) Signed-off-by: dependabot[bot] Signed-off-by: Luciano Resende Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Luciano Resende Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- enterprise_gateway/client/gateway_client.py | 4 +- enterprise_gateway/enterprisegatewayapp.py | 8 +-- enterprise_gateway/mixins.py | 4 +- .../services/kernels/remotemanager.py | 10 ++- .../services/kernelspecs/handlers.py | 2 +- .../services/kernelspecs/kernelspec_cache.py | 26 +++----- .../services/processproxies/conductor.py | 30 +++------ .../services/processproxies/container.py | 8 +-- .../services/processproxies/distributed.py | 10 ++- .../services/processproxies/docker_swarm.py | 4 +- .../services/processproxies/processproxy.py | 62 ++++++++----------- .../services/processproxies/yarn.py | 46 +++++++------- .../services/sessions/kernelsessionmanager.py | 4 +- .../services/sessions/sessionmanager.py | 2 +- .../R/scripts/server_listener.py | 11 ++-- pyproject.toml | 2 +- 17 files changed, 90 insertions(+), 145 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a7379e10..2cad912d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: - id: black - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.290 hooks: - id: ruff args: ["--fix"] diff --git a/enterprise_gateway/client/gateway_client.py b/enterprise_gateway/client/gateway_client.py index 7b92c4ba..beaf53d3 100644 --- a/enterprise_gateway/client/gateway_client.py +++ b/enterprise_gateway/client/gateway_client.py @@ -237,9 +237,7 @@ def interrupt(self): self.log.debug(f"Kernel {self.kernel_id} interrupted") return True else: - msg = "Unexpected response interrupting kernel {}: {}".format( - self.kernel_id, response.content - ) + msg = f"Unexpected response interrupting kernel {self.kernel_id}: {response.content}" raise RuntimeError(msg) def restart(self, timeout=REQUEST_TIMEOUT): diff --git a/enterprise_gateway/enterprisegatewayapp.py b/enterprise_gateway/enterprisegatewayapp.py index aab63bf1..3b10fe3f 100644 --- a/enterprise_gateway/enterprisegatewayapp.py +++ b/enterprise_gateway/enterprisegatewayapp.py @@ -12,7 +12,7 @@ import sys import time import weakref -from typing import List, Optional +from typing import ClassVar, List, Optional from jupyter_client.kernelspec import KernelSpecManager from jupyter_core.application import JupyterApp, base_aliases @@ -78,7 +78,7 @@ class EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp): """ # Also include when generating help options - classes = [ + classes: ClassVar = [ KernelSpecCache, FileKernelSessionManager, WebhookKernelSessionManager, @@ -369,7 +369,7 @@ def _signal_stop(self, sig, frame) -> None: self.io_loop.add_callback_from_signal(self.io_loop.stop) _last_config_update = int(time.time()) - _dynamic_configurables = {} + _dynamic_configurables: ClassVar = {} def update_dynamic_configurables(self) -> bool: """ @@ -403,7 +403,7 @@ def update_dynamic_configurables(self) -> bool: self.log.info( "Configuration file changes detected. Instances for the following " - "configurables have been updated: {}".format(configs) + f"configurables have been updated: {configs}" ) return updated diff --git a/enterprise_gateway/mixins.py b/enterprise_gateway/mixins.py index 1e9b017c..9d2e4ee1 100644 --- a/enterprise_gateway/mixins.py +++ b/enterprise_gateway/mixins.py @@ -9,7 +9,7 @@ import traceback from distutils.util import strtobool from http.client import responses -from typing import Any, Awaitable, Dict, List, Optional, Set +from typing import Any, Awaitable, ClassVar, Dict, List, Optional, Set from tornado import web from tornado.log import LogFormatter @@ -36,7 +36,7 @@ class CORSMixin: Mixes CORS headers into tornado.web.RequestHandlers. """ - SETTINGS_TO_HEADERS = { + SETTINGS_TO_HEADERS: ClassVar = { "eg_allow_credentials": "Access-Control-Allow-Credentials", "eg_allow_headers": "Access-Control-Allow-Headers", "eg_allow_methods": "Access-Control-Allow-Methods", diff --git a/enterprise_gateway/services/kernels/remotemanager.py b/enterprise_gateway/services/kernels/remotemanager.py index a8028cc8..b277b2d2 100644 --- a/enterprise_gateway/services/kernels/remotemanager.py +++ b/enterprise_gateway/services/kernels/remotemanager.py @@ -10,7 +10,7 @@ import signal import time import uuid -from typing import Any +from typing import Any, ClassVar from jupyter_client.ioloop.manager import AsyncIOLoopKernelManager from jupyter_client.kernelspec import KernelSpec @@ -136,7 +136,7 @@ class TrackPendingRequests: """ _pending_requests_all = 0 - _pending_requests_user = {} + _pending_requests_user: ClassVar = {} def increment(self, username: str) -> None: """Increment the requests for a username.""" @@ -570,9 +570,7 @@ async def _launch_kernel( del env["KG_AUTH_TOKEN"] self.log.debug( - "Launching kernel: '{}' with command: {}".format( - self.kernel_spec.display_name, kernel_cmd - ) + f"Launching kernel: '{self.kernel_spec.display_name}' with command: {kernel_cmd}" ) proxy = await self.process_proxy.launch_process(kernel_cmd, **kwargs) @@ -660,7 +658,7 @@ async def signal_kernel(self, signum: int) -> None: if alt_sigint: try: sig_value = getattr(signal, alt_sigint) - if type(sig_value) is int: # Python 2 + if isinstance(sig_value, int): # Python 2 self.sigint_value = sig_value else: # Python 3 self.sigint_value = sig_value.value diff --git a/enterprise_gateway/services/kernelspecs/handlers.py b/enterprise_gateway/services/kernelspecs/handlers.py index ed957d48..72f23423 100644 --- a/enterprise_gateway/services/kernelspecs/handlers.py +++ b/enterprise_gateway/services/kernelspecs/handlers.py @@ -20,7 +20,7 @@ def apply_user_filter( kernelspec_model: Dict[str, object], global_authorized_list: Set, global_unauthorized_list: Set, - kernel_user: str = None, + kernel_user: Optional[str] = None, ) -> Optional[Dict[str, object]]: """ If authorization lists are configured - either within the kernelspec or globally, ensure diff --git a/enterprise_gateway/services/kernelspecs/kernelspec_cache.py b/enterprise_gateway/services/kernelspecs/kernelspec_cache.py index aef89e18..28234621 100644 --- a/enterprise_gateway/services/kernelspecs/kernelspec_cache.py +++ b/enterprise_gateway/services/kernelspecs/kernelspec_cache.py @@ -4,7 +4,7 @@ import os -from typing import Dict, Optional, Union +from typing import ClassVar, Dict, Optional, Union from jupyter_client.kernelspec import KernelSpec from jupyter_server.utils import ensure_async @@ -105,11 +105,7 @@ def get_item(self, kernel_name: str) -> Optional[KernelSpec]: pass if not kernelspec: self.cache_misses += 1 - self.log.debug( - "Cache miss ({misses}) for kernelspec: {kernel_name}".format( - misses=self.cache_misses, kernel_name=kernel_name - ) - ) + self.log.debug(f"Cache miss ({self.cache_misses}) for kernelspec: {kernel_name}") return kernelspec def get_all_items(self) -> Dict[str, CacheItemType]: @@ -147,11 +143,7 @@ def put_item(self, kernel_name: str, cache_item: Union[KernelSpec, CacheItemType observed_dir = os.path.dirname(resource_dir) if observed_dir not in self.observed_dirs: # New directory to watch, schedule it... - self.log.debug( - "KernelSpecCache: observing directory: {observed_dir}".format( - observed_dir=observed_dir - ) - ) + self.log.debug(f"KernelSpecCache: observing directory: {observed_dir}") self.observed_dirs.add(observed_dir) self.observer.schedule(KernelSpecChangeHandler(self), observed_dir, recursive=True) @@ -186,19 +178,15 @@ def _initialize(self): for kernel_dir in self.kernel_spec_manager.kernel_dirs: if kernel_dir not in self.observed_dirs: if os.path.exists(kernel_dir): - self.log.info( - "KernelSpecCache: observing directory: {kernel_dir}".format( - kernel_dir=kernel_dir - ) - ) + self.log.info(f"KernelSpecCache: observing directory: {kernel_dir}") self.observed_dirs.add(kernel_dir) self.observer.schedule( KernelSpecChangeHandler(self), kernel_dir, recursive=True ) else: self.log.warning( - "KernelSpecCache: kernel_dir '{kernel_dir}' does not exist" - " and will not be observed.".format(kernel_dir=kernel_dir) + f"KernelSpecCache: kernel_dir '{kernel_dir}' does not exist" + " and will not be observed." ) self.observer.start() @@ -223,7 +211,7 @@ class KernelSpecChangeHandler(FileSystemEventHandler): # Events related to these files trigger the management of the KernelSpec cache. Should we find # other files qualify as indicators of a kernel specification's state (like perhaps detached parameter # files in the future) should be added to this list - at which time it should become configurable. - watched_files = ["kernel.json"] + watched_files: ClassVar = ["kernel.json"] def __init__(self, kernel_spec_cache: KernelSpecCache, **kwargs): """Initialize the handler.""" diff --git a/enterprise_gateway/services/processproxies/conductor.py b/enterprise_gateway/services/processproxies/conductor.py index a9d474fc..17b8c3e7 100644 --- a/enterprise_gateway/services/processproxies/conductor.py +++ b/enterprise_gateway/services/processproxies/conductor.py @@ -13,7 +13,7 @@ import subprocess import time from random import randint -from typing import Any +from typing import Any, ClassVar from jupyter_client import localinterfaces from jupyter_server.utils import url_unescape @@ -32,8 +32,8 @@ class ConductorClusterProcessProxy(RemoteProcessProxy): Kernel lifecycle management for Conductor clusters. """ - initial_states = {"SUBMITTED", "WAITING", "RUNNING"} - final_states = {"FINISHED", "KILLED", "RECLAIMED"} # Don't include FAILED state + initial_states: ClassVar = {"SUBMITTED", "WAITING", "RUNNING"} + final_states: ClassVar = {"FINISHED", "KILLED", "RECLAIMED"} # Don't include FAILED state def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" @@ -227,9 +227,7 @@ def _update_notebook_master_rest_url(self, env_dict: dict) -> None: if updated_one_notebook_master_rest_url and updated_one_notebook_master_web_submission_url: self.log.debug( - "Updating KERNEL_NOTEBOOK_MASTER_REST to '{}'.".format( - updated_one_notebook_master_rest_url - ) + f"Updating KERNEL_NOTEBOOK_MASTER_REST to '{updated_one_notebook_master_rest_url}'." ) os.environ["KERNEL_NOTEBOOK_MASTER_REST"] = updated_one_notebook_master_rest_url env_dict["KERNEL_NOTEBOOK_MASTER_REST"] = updated_one_notebook_master_rest_url @@ -416,9 +414,7 @@ async def handle_timeout(self) -> None: ) if time_interval > self.kernel_launch_timeout: - reason = "Application failed to start within {} seconds.".format( - self.kernel_launch_timeout - ) + reason = f"Application failed to start within {self.kernel_launch_timeout} seconds." error_http_code = 500 if self._get_application_id(True): if self._query_app_state_by_driver_id(self.driver_id) != "WAITING": @@ -435,9 +431,7 @@ async def handle_timeout(self) -> None: self.application_id, self.kernel_launch_timeout ) await asyncio.get_event_loop().run_in_executor(None, self.kill) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format( - self.kernel_id, reason - ) + timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def _get_application_id(self, ignore_final_states: bool = False) -> str: @@ -473,9 +467,7 @@ def _get_application_id(self, ignore_final_states: bool = False) -> str: ) else: self.log.debug( - "ApplicationID not yet assigned for KernelID: '{}' - retrying...".format( - self.kernel_id - ) + f"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying..." ) return self.application_id @@ -525,9 +517,7 @@ def _query_app_by_driver_id(self, driver_id: str) -> dict | None: response = None if not response or not response["applist"] else response["applist"] except Exception as e: self.log.warning( - "Getting application with cmd '{}' failed with exception: '{}'. Continuing...".format( - cmd, e - ) + f"Getting application with cmd '{cmd}' failed with exception: '{e}'. Continuing..." ) return response @@ -557,9 +547,7 @@ def _query_app_by_id(self, app_id: str) -> dict | None: response = None if response is None or not response["applist"] else response["applist"] except Exception as e: self.log.warning( - "Getting application with cmd '{}' failed with exception: '{}'. Continuing...".format( - cmd, e - ) + f"Getting application with cmd '{cmd}' failed with exception: '{e}'. Continuing..." ) return response diff --git a/enterprise_gateway/services/processproxies/container.py b/enterprise_gateway/services/processproxies/container.py index e913d7bf..feee9c63 100644 --- a/enterprise_gateway/services/processproxies/container.py +++ b/enterprise_gateway/services/processproxies/container.py @@ -112,17 +112,13 @@ def _enforce_prohibited_ids(self, **kwargs: dict[str, Any] | None) -> None: if kernel_uid in prohibited_uids: http_status_code = 403 error_message = ( - "Kernel's UID value of '{}' has been denied via EG_PROHIBITED_UIDS!".format( - kernel_uid - ) + f"Kernel's UID value of '{kernel_uid}' has been denied via EG_PROHIBITED_UIDS!" ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) elif kernel_gid in prohibited_gids: http_status_code = 403 error_message = ( - "Kernel's GID value of '{}' has been denied via EG_PROHIBITED_GIDS!".format( - kernel_gid - ) + f"Kernel's GID value of '{kernel_gid}' has been denied via EG_PROHIBITED_GIDS!" ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) diff --git a/enterprise_gateway/services/processproxies/distributed.py b/enterprise_gateway/services/processproxies/distributed.py index ff160934..164934a3 100644 --- a/enterprise_gateway/services/processproxies/distributed.py +++ b/enterprise_gateway/services/processproxies/distributed.py @@ -10,7 +10,7 @@ import signal from socket import gethostbyname from subprocess import STDOUT -from typing import Any +from typing import Any, ClassVar from ..kernels.remotemanager import RemoteKernelManager from .processproxy import BaseProcessProxyABC, RemoteProcessProxy @@ -24,8 +24,8 @@ class TrackKernelOnHost: """A class for tracking a kernel on a host.""" - _host_kernels = {} - _kernel_host_mapping = {} + _host_kernels: ClassVar = {} + _kernel_host_mapping: ClassVar = {} def add_kernel_id(self, host: str, kernel_id: str) -> None: """Add a kernel to a host.""" @@ -229,9 +229,7 @@ async def handle_timeout(self) -> None: self.kernel_launch_timeout, self.assigned_host, self.kernel_log ) ) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format( - self.kernel_id, reason - ) + timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" await asyncio.get_event_loop().run_in_executor(None, self.kill) self.log_and_raise(http_status_code=500, reason=timeout_message) diff --git a/enterprise_gateway/services/processproxies/docker_swarm.py b/enterprise_gateway/services/processproxies/docker_swarm.py index 1c5336cc..f3b05aac 100644 --- a/enterprise_gateway/services/processproxies/docker_swarm.py +++ b/enterprise_gateway/services/processproxies/docker_swarm.py @@ -261,9 +261,7 @@ def terminate_container_resources(self) -> bool | None: container.remove(force=True) # Container still exists, attempt forced removal except Exception as err: self.log.debug( - "Container termination for container: {} raised exception: {}".format( - container.name, err - ) + f"Container termination for container: {container.name} raised exception: {err}" ) if isinstance(err, NotFound): pass # okay if its not found diff --git a/enterprise_gateway/services/processproxies/processproxy.py b/enterprise_gateway/services/processproxies/processproxy.py index edca9124..177c4949 100644 --- a/enterprise_gateway/services/processproxies/processproxy.py +++ b/enterprise_gateway/services/processproxies/processproxy.py @@ -357,8 +357,8 @@ def _decode_payload(self, data: json) -> dict: new_connection_info["kernel_id"] = kernel_id connection_info_str = json.dumps(new_connection_info) self.log.warning( - "WARNING!!!! Legacy kernel response received for kernel_id '{}'! " - "Update kernel launchers to current version!".format(kernel_id) + f"WARNING!!!! Legacy kernel response received for kernel_id '{kernel_id}'! " + "Update kernel launchers to current version!" ) break # If we're here, we made it! except Exception as ex2: @@ -464,7 +464,7 @@ def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): # if self._use_gss_raw.lower() not in ("", "true", "false"): msg = ( "Invalid Value for EG_REMOTE_GSS_SSH expected one of " - '"", "True", "False", got {!r}'.format(self._use_gss_raw) + f'"", "True", "False", got {self._use_gss_raw!r}' ) raise ValueError(msg) self.use_gss = self._use_gss_raw == "true" @@ -842,10 +842,8 @@ def _raise_authorization_error(self, kernel_username: str, differentiator_clause kernel_name = self.kernel_manager.kernel_spec.display_name kernel_clause = f" '{kernel_name}'." if kernel_name is not None else "s." error_message = ( - "User '{}' is {} to start kernel{} " - "Ensure KERNEL_USERNAME is set to an appropriate value and retry the request.".format( - kernel_username, differentiator_clause, kernel_clause - ) + f"User '{kernel_username}' is {differentiator_clause} to start kernel{kernel_clause} " + "Ensure KERNEL_USERNAME is set to an appropriate value and retry the request." ) self.log_and_raise(http_status_code=403, reason=error_message) @@ -921,26 +919,26 @@ def _validate_port_range(self) -> None: if self.lower_port < 1024 or self.lower_port > 65535: self.log_and_raise( http_status_code=500, - reason="Invalid port range '{}' specified. " - "Range for valid port numbers is (1024, 65535).".format(port_range), + reason=f"Invalid port range '{port_range}' specified. " + "Range for valid port numbers is (1024, 65535).", ) if self.upper_port < 1024 or self.upper_port > 65535: self.log_and_raise( http_status_code=500, - reason="Invalid port range '{}' specified. " - "Range for valid port numbers is (1024, 65535).".format(port_range), + reason=f"Invalid port range '{port_range}' specified. " + "Range for valid port numbers is (1024, 65535).", ) except ValueError as ve: self.log_and_raise( http_status_code=500, - reason="Port range validation failed for range: '{}'. " - "Error was: {}".format(port_range, ve), + reason=f"Port range validation failed for range: '{port_range}'. " + f"Error was: {ve}", ) except IndexError as ie: self.log_and_raise( http_status_code=500, - reason="Port range validation failed for range: '{}'. " - "Error was: {}".format(port_range, ie), + reason=f"Port range validation failed for range: '{port_range}'. " + f"Error was: {ie}", ) self.kernel_manager.port_range = port_range @@ -1131,14 +1129,14 @@ def detect_launch_failure(self) -> None: if poll_result and poll_result > 0: self.local_proc.wait() error_message = ( - "Error occurred during launch of KernelID: {}. " - "Check Enterprise Gateway log for more information.".format(self.kernel_id) + f"Error occurred during launch of KernelID: {self.kernel_id}. " + "Check Enterprise Gateway log for more information." ) self.local_proc = None self.log_and_raise(http_status_code=500, reason=error_message) def _tunnel_to_kernel( - self, connection_info: dict, server: str, port: int = ssh_port, key: str = None + self, connection_info: dict, server: str, port: int = ssh_port, key: str | None = None ) -> tuple: """ Tunnel connections to a kernel over SSH @@ -1188,7 +1186,7 @@ def _tunnel_to_port( remote_port: int, server: str, port: int = ssh_port, - key: str = None, + key: str | None = None, ) -> int: """ Analogous to _tunnel_to_kernel, but deals with a single port. This will typically be called for @@ -1228,9 +1226,7 @@ def _create_ssh_tunnel( except Exception as e: self.log_and_raise( http_status_code=500, - reason="Could not open SSH tunnel for port {}. Exception: '{}'".format( - channel_name, e - ), + reason=f"Could not open SSH tunnel for port {channel_name}. Exception: '{e}'", ) def _spawn_ssh_tunnel( @@ -1420,8 +1416,8 @@ def _update_connection(self, connect_info: dict) -> None: ) else: error_message = ( - "Unexpected runtime encountered for Kernel ID '{}' - " - "connection information is null!".format(self.kernel_id) + f"Unexpected runtime encountered for Kernel ID '{self.kernel_id}' - " + "connection information is null!" ) self.log_and_raise(http_status_code=500, reason=error_message) @@ -1451,9 +1447,7 @@ def _extract_pid_info(self, connect_info: dict) -> None: self.pid = int(pid) except ValueError: self.log.warning( - "pid returned from kernel launcher is not an integer: {} - ignoring.".format( - pid - ) + f"pid returned from kernel launcher is not an integer: {pid} - ignoring." ) pid = None pgid = connect_info.pop("pgid", None) @@ -1462,9 +1456,7 @@ def _extract_pid_info(self, connect_info: dict) -> None: self.pgid = int(pgid) except ValueError: self.log.warning( - "pgid returned from kernel launcher is not an integer: {} - ignoring.".format( - pgid - ) + f"pgid returned from kernel launcher is not an integer: {pgid} - ignoring." ) pgid = None if ( @@ -1487,12 +1479,8 @@ async def handle_timeout(self): if time_interval > self.kernel_launch_timeout: error_http_code = 500 - reason = "Waited too long ({}s) to get connection file".format( - self.kernel_launch_timeout - ) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format( - self.kernel_id, reason - ) + reason = f"Waited too long ({self.kernel_launch_timeout}s) to get connection file" + timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" await asyncio.get_event_loop().run_in_executor(None, self.kill) self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) @@ -1645,7 +1633,7 @@ def load_process_info(self, process_info): # communication socket (comm_ip, comm_port) members as well. self._setup_connection_info(process_info["tunneled_connect_info"]) - def log_and_raise(self, http_status_code: int = None, reason: str = None): + def log_and_raise(self, http_status_code: int | None = None, reason: str | None = None): """ Override log_and_raise method in order to verify that the response socket is properly closed before raise exception diff --git a/enterprise_gateway/services/processproxies/yarn.py b/enterprise_gateway/services/processproxies/yarn.py index 3981a515..c50b7cd5 100644 --- a/enterprise_gateway/services/processproxies/yarn.py +++ b/enterprise_gateway/services/processproxies/yarn.py @@ -11,7 +11,7 @@ import signal import socket import time -from typing import Any +from typing import Any, ClassVar from jupyter_client import localinterfaces from yarn_api_client.base import Response @@ -43,8 +43,8 @@ class YarnClusterProcessProxy(RemoteProcessProxy): Kernel lifecycle management for YARN clusters. """ - initial_states = {"NEW", "SUBMITTED", "ACCEPTED", "RUNNING"} - final_states = {"FINISHED", "KILLED", "FAILED"} + initial_states: ClassVar = {"NEW", "SUBMITTED", "ACCEPTED", "RUNNING"} + final_states: ClassVar = {"FINISHED", "KILLED", "FAILED"} def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" @@ -211,8 +211,8 @@ def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | None) -> No if self.candidate_queue is None: self.log.warning( - "Queue: {} not found in cluster." - "Availability check will not be performed".format(candidate_queue_name) + f"Queue: {candidate_queue_name} not found in cluster." + "Availability check will not be performed" ) return @@ -222,16 +222,14 @@ def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | None) -> No if self.candidate_partition is None: self.log.debug( - "Partition: {} not found in {} queue." - "Availability check will not be performed".format(node_label, candidate_queue_name) + f"Partition: {node_label} not found in {candidate_queue_name} queue." + "Availability check will not be performed" ) return self.log.debug( - "Checking endpoint: {} if partition: {} " - "has used capacity <= {}%".format( - self.yarn_endpoint, self.candidate_partition, partition_availability_threshold - ) + f"Checking endpoint: {self.yarn_endpoint} if partition: {self.candidate_partition} " + f"has used capacity <= {partition_availability_threshold}%" ) yarn_available = self.resource_mgr.cluster_scheduler_queue_availability( @@ -421,9 +419,7 @@ async def handle_timeout(self) -> None: ) ) await asyncio.get_event_loop().run_in_executor(None, self.kill) - timeout_message = "KernelID: '{}' launch timeout due to: {}".format( - self.kernel_id, reason - ) + timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def get_process_info(self) -> dict[str, Any]: @@ -460,7 +456,7 @@ def _get_application_id(self, ignore_final_states: bool = False) -> str: if not self.application_id: app = self._query_app_by_name(self.kernel_id) state_condition = True - if type(app) is dict: + if isinstance(app, dict): state = app.get("state") self.last_known_state = state @@ -480,9 +476,7 @@ def _get_application_id(self, ignore_final_states: bool = False) -> str: ) if not self.application_id: self.log.debug( - "ApplicationID not yet assigned for KernelID: '{}' - retrying...".format( - self.kernel_id - ) + f"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying..." ) return self.application_id @@ -524,7 +518,11 @@ def _query_app_by_name(self, kernel_id: str) -> dict: ) else: data = response.data - if type(data) is dict and type(data.get("apps")) is dict and "app" in data.get("apps"): + if ( + isinstance(data, dict) + and isinstance(data.get("apps"), dict) + and "app" in data.get("apps") + ): for app in data["apps"]["app"]: if app.get("name", "").find(kernel_id) >= 0 and app.get("id") > top_most_app_id: target_app = app @@ -542,13 +540,11 @@ def _query_app_by_id(self, app_id: str) -> dict: response = self.resource_mgr.cluster_application(application_id=app_id) except Exception as e: self.log.warning( - "Query for application ID '{}' failed with exception: '{}'. Continuing...".format( - app_id, e - ) + f"Query for application ID '{app_id}' failed with exception: '{e}'. Continuing..." ) else: data = response.data - if type(data) is dict and "app" in data: + if isinstance(data, dict) and "app" in data: app = data["app"] return app @@ -564,8 +560,8 @@ def _query_app_state_by_id(self, app_id: str) -> str: response = self.resource_mgr.cluster_application_state(application_id=app_id) except Exception as e: self.log.warning( - "Query for application '{}' state failed with exception: '{}'. " - "Continuing with last known state = '{}'...".format(app_id, e, state) + f"Query for application '{app_id}' state failed with exception: '{e}'. " + f"Continuing with last known state = '{state}'..." ) else: state = response.data["state"] diff --git a/enterprise_gateway/services/sessions/kernelsessionmanager.py b/enterprise_gateway/services/sessions/kernelsessionmanager.py index cf5450ae..487b3571 100644 --- a/enterprise_gateway/services/sessions/kernelsessionmanager.py +++ b/enterprise_gateway/services/sessions/kernelsessionmanager.py @@ -226,7 +226,7 @@ def _delete_sessions(self, kernel_ids: list[str]) -> None: @staticmethod def pre_save_transformation(session: dict) -> dict: """Handle a pre_save for a session.""" - kernel_id = list(session.keys())[0] + kernel_id = next(iter(session.keys())) session_info = session[kernel_id] if session_info.get("connection_info"): info = session_info["connection_info"] @@ -239,7 +239,7 @@ def pre_save_transformation(session: dict) -> dict: @staticmethod def post_load_transformation(session: dict) -> dict: """Handle a post_load for a session.""" - kernel_id = list(session.keys())[0] + kernel_id = next(iter(session.keys())) session_info = session[kernel_id] if session_info.get("connection_info"): info = session_info["connection_info"] diff --git a/enterprise_gateway/services/sessions/sessionmanager.py b/enterprise_gateway/services/sessions/sessionmanager.py index 101d03f1..4ca5aa0d 100644 --- a/enterprise_gateway/services/sessions/sessionmanager.py +++ b/enterprise_gateway/services/sessions/sessionmanager.py @@ -170,7 +170,7 @@ def get_session(self, **kwargs) -> dict: # multiple columns are never passed into kwargs so just using the # first and only one. - column = list(kwargs.keys())[0] + column = next(iter(kwargs.keys())) row = self.get_session_by_key(column, kwargs[column]) if not row: diff --git a/etc/kernel-launchers/R/scripts/server_listener.py b/etc/kernel-launchers/R/scripts/server_listener.py index 5ab41a31..9fd57987 100644 --- a/etc/kernel-launchers/R/scripts/server_listener.py +++ b/etc/kernel-launchers/R/scripts/server_listener.py @@ -67,8 +67,7 @@ def return_connection_info( response_parts = response_addr.split(":") if len(response_parts) != 2: logger.error( - "Invalid format for response address '{}'. " - "Assuming 'pull' mode...".format(response_addr) + f"Invalid format for response address '{response_addr}'. Assuming 'pull' mode..." ) return @@ -77,8 +76,8 @@ def return_connection_info( response_port = int(response_parts[1]) except ValueError: logger.error( - "Invalid port component found in response address '{}'. " - "Assuming 'pull' mode...".format(response_addr) + f"Invalid port component found in response address '{response_addr}'. " + "Assuming 'pull' mode..." ) return @@ -113,9 +112,7 @@ def prepare_comm_socket(lower_port, upper_port): """ sock = _select_socket(lower_port, upper_port) logger.info( - "Signal socket bound to host: {}, port: {}".format( - sock.getsockname()[0], sock.getsockname()[1] - ) + f"Signal socket bound to host: {sock.getsockname()[0]}, port: {sock.getsockname()[1]}" ) sock.listen(1) sock.settimeout(5) diff --git a/pyproject.toml b/pyproject.toml index 260f5547..157db6ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,7 +68,7 @@ lint = [ "black[jupyter]==23.3.0", "mdformat>0.7", "mdformat-gfm>=0.3.5", - "ruff==0.0.270" + "ruff==0.0.290" ] [tool.ruff.pylint]