Skip to content

Commit

Permalink
Merge remote-tracking branches 'benoitc/pr/2407', 'benoitc/pr/3157', …
Browse files Browse the repository at this point in the history
…'benoitc/pr/3273', 'benoitc/pr/3271', 'benoitc/pr/3275', 'benoitc/pr/3148', 'benoitc/pr/3250' and 'pajod/test-proxy' into integration-v23.1.0
  • Loading branch information
pajod committed Aug 24, 2024
9 parents 4f28f6c + d5aa52e + 497ad24 + 56b3e42 + 2096e42 + ef94875 + 7ecea2d + 7756175 + d3461ed commit 3c2d724
Show file tree
Hide file tree
Showing 20 changed files with 1,199 additions and 285 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/tox.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,11 @@ jobs:
cache-dependency-path: requirements_test.txt
check-latest: true
allow-prereleases: ${{ matrix.unsupported }}
- name: Add test utils
if: matrix.os == 'ubuntu-latest'
run: |
sudo systemctl mask nginx.service
sudo apt install nginx openssl wrk
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
Expand Down
22 changes: 21 additions & 1 deletion docs/source/settings.rst
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ The default behavior is to attempt inotify with a fallback to file
system polling. Generally, inotify should be preferred if available
because it consumes less system resources.

.. note::
If the application fails to load while this option is used,
the (potentially sensitive!) traceback will be shared in
the response to subsequent HTTP requests.
.. note::
In order to use the inotify reloader, you must have the ``inotify``
package installed.
Expand Down Expand Up @@ -114,10 +118,13 @@ Valid engines are:

**Default:** ``[]``

Extends :ref:`reload` option to also watch and reload on additional files
Alternative or extension to :ref:`reload` option to (also) watch
and reload on additional files
(e.g., templates, configurations, specifications, etc.).

.. versionadded:: 19.8
.. versionchanged:: 23.FIXME
Option no longer silently ignored if used without :ref:`reload`.

.. _spew:

Expand Down Expand Up @@ -461,6 +468,19 @@ if not provided).

.. versionadded:: 19.2

.. _enable-backlog-metric:

``enable_backlog_metric``
~~~~~~~~~~~~~~~~~~~~~~~~~

**Command line:** ``--enable-backlog-metric``

**Default:** ``False``

Enable socket backlog metric (only supported on Linux).

.. versionadded:: 23.1

Process Naming
--------------

Expand Down
174 changes: 70 additions & 104 deletions gunicorn/arbiter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
import errno
import os
import random
import select
import signal
import socket
import sys
import time
import traceback
import queue

from gunicorn.errors import HaltServer, AppImportError
from gunicorn.pidfile import Pidfile
Expand Down Expand Up @@ -37,16 +37,12 @@ class Arbiter:

LISTENERS = []
WORKERS = {}
PIPE = []

# I love dynamic languages
SIG_QUEUE = []
SIGNALS = [getattr(signal, "SIG%s" % x)
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
SIG_QUEUE = queue.SimpleQueue()
SIGNALS = [getattr(signal.Signals, "SIG%s" % x)
for x in "CHLD HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
WAKEUP_REQUEST = signal.NSIG

def __init__(self, app):
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
Expand Down Expand Up @@ -76,6 +72,11 @@ def __init__(self, app):
0: sys.executable
}

self.SIG_HANDLERS = dict(
(sig, getattr(self, "handle_%s" % sig.name[3:].lower()))
for sig in self.SIGNALS
)

def _get_num_workers(self):
return self._num_workers

Expand Down Expand Up @@ -172,27 +173,25 @@ def init_signals(self):
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
# close old PIPE
for p in self.PIPE:
os.close(p)

# initialize the pipe
self.PIPE = pair = os.pipe()
for p in pair:
util.set_non_blocking(p)
util.close_on_exec(p)

self.log.close_on_exec()

# initialize all signals
for s in self.SIGNALS:
signal.signal(s, self.signal)
signal.signal(signal.SIGCHLD, self.handle_chld)

def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
""" Note: Signal handler! No logging allowed. """
self.wakeup(due_to_signal=sig)

# Some UNIXes require SIGCHLD to be reinstalled, see python signal docs
if sig == signal.SIGCHLD:
signal.signal(sig, self.signal)

def wakeup(self, due_to_signal=None):
"""\
Wake up the main master loop.
"""
self.SIG_QUEUE.put(due_to_signal or self.WAKEUP_REQUEST)

def run(self):
"Main master loop."
Expand All @@ -205,25 +204,17 @@ def run(self):
while True:
self.maybe_promote_master()

sig = self.SIG_QUEUE.pop(0) if self.SIG_QUEUE else None
if sig is None:
self.sleep()
self.murder_workers()
self.manage_workers()
continue

if sig not in self.SIG_NAMES:
self.log.info("Ignoring unknown signal: %s", sig)
continue
try:
sig = self.SIG_QUEUE.get(timeout=1)
if sig != self.WAKEUP_REQUEST:
if sig != signal.SIGCHLD:
self.log.info("Handling signal: %s", signal.Signals(sig).name)
self.SIG_HANDLERS[sig]()
except queue.Empty:
pass

signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.log.error("Unhandled signal: %s", signame)
continue
self.log.info("Handling signal: %s", signame)
handler()
self.wakeup()
self.murder_workers()
self.manage_workers()
except (StopIteration, KeyboardInterrupt):
self.halt()
except HaltServer as inst:
Expand All @@ -238,10 +229,9 @@ def run(self):
self.pidfile.unlink()
sys.exit(-1)

def handle_chld(self, sig, frame):
def handle_chld(self):
"SIGCHLD handling"
self.reap_workers()
self.wakeup()

def handle_hup(self):
"""\
Expand Down Expand Up @@ -328,16 +318,6 @@ def maybe_promote_master(self):
# reset proctitle
util._setproctitle("master [%s]" % self.proc_name)

def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], b'.')
except OSError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise

def halt(self, reason=None, exit_status=0):
""" halt arbiter """
self.stop()
Expand All @@ -352,25 +332,6 @@ def halt(self, reason=None, exit_status=0):
self.cfg.on_exit(self)
sys.exit(exit_status)

def sleep(self):
"""\
Sleep until PIPE is readable or we timeout.
A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except OSError as e:
# TODO: select.error is a subclass of OSError since Python 3.3.
error_number = getattr(e, 'errno', e.args[0])
if error_number not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()

def stop(self, graceful=True):
"""\
Stop workers
Expand All @@ -395,6 +356,7 @@ def stop(self, graceful=True):
# wait until the graceful timeout
while self.WORKERS and time.time() < limit:
time.sleep(0.1)
self.reap_workers()

self.kill_workers(signal.SIGKILL)

Expand Down Expand Up @@ -519,44 +481,38 @@ def reap_workers(self):
break
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
# A worker was terminated. If the termination reason was
# that it could not boot, we'll shut it down to avoid
# infinite start/stop cycles.
exitcode = status >> 8
if exitcode != 0:
self.log.error('Worker (pid:%s) exited with code %s', wpid, exitcode)
continue

if os.WIFEXITED(status):
# A worker was normally terminated. If the termination
# reason was that it could not boot, we'll halt the server
# to avoid infinite start/stop cycles.
exitcode = os.WEXITSTATUS(status)
log = self.log.error if exitcode != 0 else self.log.debug
log('Worker (pid:%s) exited with code %s', wpid, exitcode)
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
if exitcode == self.APP_LOAD_ERROR:
reason = "App failed to load."
raise HaltServer(reason, self.APP_LOAD_ERROR)

if exitcode > 0:
# If the exit code of the worker is greater than 0,
# let the user know.
self.log.error("Worker (pid:%s) exited with code %s.",
wpid, exitcode)
elif status > 0:
# If the exit code of the worker is 0 and the status
# is greater than 0, then it was most likely killed
# via a signal.
try:
sig_name = signal.Signals(status).name
except ValueError:
sig_name = "code {}".format(status)
msg = "Worker (pid:{}) was sent {}!".format(
wpid, sig_name)

# Additional hint for SIGKILL
if status == signal.SIGKILL:
msg += " Perhaps out of memory?"
self.log.error(msg)

worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
elif os.WIFSIGNALED(status):
# A worker was terminated by a signal.
sig = os.WTERMSIG(status)
try:
sig_name = signal.Signals(sig).name
except ValueError:
sig_name = "signal {}".format(sig)
msg = "Worker (pid:{}) was terminated by {}!".format(
wpid, sig_name)

# Additional hint for SIGKILL
if sig == signal.SIGKILL:
msg += " Perhaps out of memory?"
self.log.error(msg)

worker = self.WORKERS.pop(wpid, None)
if worker:
worker.tmp.close()
self.cfg.child_exit(self, worker)
except OSError as e:
Expand Down Expand Up @@ -585,6 +541,16 @@ def manage_workers(self):
"value": active_worker_count,
"mtype": "gauge"})

if self.cfg.enable_backlog_metric:
backlog = sum(sock.get_backlog() or 0
for sock in self.LISTENERS)

if backlog >= 0:
self.log.debug("socket backlog: {0}".format(backlog),
extra={"metric": "gunicorn.backlog",
"value": backlog,
"mtype": "histogram"})

def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
Expand Down
23 changes: 22 additions & 1 deletion gunicorn/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -921,6 +921,10 @@ class Reload(Setting):
system polling. Generally, inotify should be preferred if available
because it consumes less system resources.
.. note::
If the application fails to load while this option is used,
the (potentially sensitive!) traceback will be shared in
the response to subsequent HTTP requests.
.. note::
In order to use the inotify reloader, you must have the ``inotify``
package installed.
Expand Down Expand Up @@ -956,10 +960,13 @@ class ReloadExtraFiles(Setting):
validator = validate_list_of_existing_files
default = []
desc = """\
Extends :ref:`reload` option to also watch and reload on additional files
Alternative or extension to :ref:`reload` option to (also) watch
and reload on additional files
(e.g., templates, configurations, specifications, etc.).
.. versionadded:: 19.8
.. versionchanged:: 23.FIXME
Option no longer silently ignored if used without :ref:`reload`.
"""


Expand Down Expand Up @@ -1693,6 +1700,20 @@ class StatsdPrefix(Setting):
"""


class BacklogMetric(Setting):
name = "enable_backlog_metric"
section = "Logging"
cli = ["--enable-backlog-metric"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable socket backlog metric (only supported on Linux).
.. versionadded:: 23.1
"""


class Procname(Setting):
name = "proc_name"
section = "Process Naming"
Expand Down
Loading

0 comments on commit 3c2d724

Please sign in to comment.