Skip to content

Commit

Permalink
Improve language.
Browse files Browse the repository at this point in the history
  • Loading branch information
felixfontein committed Dec 28, 2024
1 parent 04c9772 commit f69536e
Show file tree
Hide file tree
Showing 34 changed files with 89 additions and 89 deletions.
18 changes: 9 additions & 9 deletions plugins/connection/docker.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
# - key: extra_env
# section: docker_connection
# ansible-core's config manager does NOT support converting JSON strings (or other things) to dictionaries,
# it only accepts actual dictionaries (which don't happen to come from env and ini vars). So there's no way
# it only accepts actual dictionaries (which do not happen to come from env and ini vars). So there's no way
# to actually provide this parameter from env and ini sources... :-(
vars:
- name: ansible_docker_extra_env
Expand Down Expand Up @@ -153,7 +153,7 @@ def __init__(self, play_context, new_stdin, *args, **kwargs):
# Note: docker supports running as non-root in some configurations.
# (For instance, setting the UNIX socket file to be readable and
# writable by a specific UNIX group and then putting users into that
# group). Therefore we don't check that the user is root when using
# group). Therefore we do not check that the user is root when using
# this connection. But if the user is getting a permission denied
# error it probably means that docker on their system is only
# configured to be connected to by root and they are not running as
Expand Down Expand Up @@ -333,9 +333,9 @@ def _get_actual_user(self):
.format(self.docker_version, self.actual_user or u'?'))
return actual_user
elif self._display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Since we are not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to.
# This saves overhead from calling into docker when we do not need to.
return self._get_docker_remote_user()
else:
return None
Expand Down Expand Up @@ -418,11 +418,11 @@ def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
ssh chooses $HOME but we are not guaranteed that a home dir will
exist in any given chroot. So for now we are choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
Can revisit using $HOME instead if it is a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
Expand All @@ -444,7 +444,7 @@ def put_file(self, in_path, out_path):
"file or module does not exist: %s" % to_native(in_path))

out_path = shlex_quote(out_path)
# Older docker doesn't have native support for copying files into
# Older docker does not have native support for copying files into
# running containers, so we use docker exec to implement this
# Although docker version 1.8 and later provide support, the
# owner and group of the files are always set to root
Expand Down Expand Up @@ -490,7 +490,7 @@ def fetch_file(self, in_path, out_path):
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))

if p.returncode != 0:
# Older docker doesn't have native support for fetching files command `cp`
# Older docker does not have native support for fetching files command `cp`
# If `cp` fails, try to use `dd` instead
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
Expand Down
12 changes: 6 additions & 6 deletions plugins/connection/docker_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
# - key: extra_env
# section: docker_connection
# ansible-core's config manager does NOT support converting JSON strings (or other things) to dictionaries,
# it only accepts actual dictionaries (which don't happen to come from env and ini vars). So there's no way
# it only accepts actual dictionaries (which do not happen to come from env and ini vars). So there's no way
# to actually provide this parameter from env and ini sources... :-(
vars:
- name: ansible_docker_extra_env
Expand Down Expand Up @@ -205,9 +205,9 @@ def _connect(self, port=None):
self._connected = True

if self.actual_user is None and display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Since we are not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to
# This saves overhead from calling into docker when we do not need to
display.vvv(u"Trying to determine actual user")
result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr')))
if result.get('Config'):
Expand Down Expand Up @@ -319,11 +319,11 @@ def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
ssh chooses $HOME but we are not guaranteed that a home dir will
exist in any given chroot. So for now we are choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
Can revisit using $HOME instead if it is a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
Expand Down
4 changes: 2 additions & 2 deletions plugins/inventory/docker_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
plugin:
description:
- The name of this plugin, it should always be set to V(community.docker.docker_containers)
for this plugin to recognize it as it's own.
for this plugin to recognize it as its own.
type: str
required: true
choices: [ community.docker.docker_containers ]
Expand Down Expand Up @@ -162,7 +162,7 @@
# Next accept all containers whose inventory_hostname starts with 'a'
- include: >-
inventory_hostname.startswith("a")
# Exclude all containers that didn't match any of the above filters
# Exclude all containers that did not match any of the above filters
- exclude: true
'''

Expand Down
2 changes: 1 addition & 1 deletion plugins/inventory/docker_machine.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def _get_docker_daemon_variables(self, machine_name):
return vars

def _get_machine_names(self):
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
# with them.
ls_command = ['ls', '-q']
if self.get_option('running_required'):
Expand Down
2 changes: 1 addition & 1 deletion plugins/inventory/docker_swarm.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
options:
plugin:
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm)
for this plugin to recognize it as it's own.
for this plugin to recognize it as its own.
type: str
required: true
choices: [ docker_swarm, community.docker.docker_swarm ]
Expand Down
16 changes: 8 additions & 8 deletions plugins/module_utils/_api/api/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def _result(self, response, json=False, binary=False):
return response.text

def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# Go <1.1 cannot unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
Expand Down Expand Up @@ -316,8 +316,8 @@ def _get_raw_response_socket(self, response):
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
# UNIX sockets cannot have attributes set on them, but that's
# fine because we will not be doing TLS over them
pass

return sock
Expand All @@ -340,7 +340,7 @@ def _stream_helper(self, response, decode=False):
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# Response is not chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)

Expand Down Expand Up @@ -419,7 +419,7 @@ def _read_from_socket(self, response, stream, tty=True, demux=False):
response.close()

def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
""" Depending on the combination of python version and whether we are
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
Expand All @@ -440,7 +440,7 @@ def _disable_socket_timeout(self, socket):
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()

# Don't change the timeout if it is already disabled.
# Do not change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue

Expand All @@ -456,7 +456,7 @@ def _get_result(self, container, stream, res):

def _get_result_tty(self, stream, res, is_tty):
# We should also use raw streaming (without keep-alive)
# if we're dealing with a tty-enabled container.
# if we are dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
Expand Down Expand Up @@ -506,7 +506,7 @@ def reload_config(self, dockercfg_path=None):
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')

# If we don't have any auth data so far, try reloading the config
# If we do not have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
Expand Down
4 changes: 2 additions & 2 deletions plugins/module_utils/_api/api/daemon.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def login(self, username, password=None, email=None, registry=None,
If the server returns an error.
"""

# If we don't have any auth data so far, try reloading the config file
# If we do not have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
Expand Down Expand Up @@ -107,7 +107,7 @@ def login(self, username, password=None, email=None, registry=None,
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
is not responding.
Returns:
(bool) The response from the server.
Expand Down
4 changes: 2 additions & 2 deletions plugins/module_utils/_api/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def load_config(cls, config_path, config_dict, credstore_env=None):
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# Likely missing new Docker config file or it is in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
Expand All @@ -194,7 +194,7 @@ def load_config(cls, config_path, config_dict, credstore_env=None):
return cls(res, credstore_env)

log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"Could not find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
Expand Down
2 changes: 1 addition & 1 deletion plugins/module_utils/_api/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class APIError(_HTTPError, DockerException):
"""
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
# requests 1.1 does not
super(APIError, self).__init__(message)
self.response = response
self.explanation = explanation
Expand Down
2 changes: 1 addition & 1 deletion plugins/module_utils/_api/transport/npipeconn.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def get_connection(self, url, proxies=None):

def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
Expand Down
2 changes: 1 addition & 1 deletion plugins/module_utils/_api/transport/unixconn.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def get_connection(self, url, proxies=None):

def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
Expand Down
2 changes: 1 addition & 1 deletion plugins/module_utils/_api/types/daemon.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def close(self):
sock = sock_raw._sock

elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# We are working with a paramiko (SSH) channel, which does not
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'
Expand Down
8 changes: 4 additions & 4 deletions plugins/module_utils/_api/utils/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
i.mtime = int(i.mtime)

if IS_WINDOWS_PLATFORM:
# Windows doesn't keep track of the execute bit, so we make files
# Windows does not keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111

Expand All @@ -113,7 +113,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
'Can not read file in context: {0}'.format(full_path)
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
# Directories, FIFOs, symlinks... do not need to be read.
t.addfile(i, None)

for name, contents in extra_files:
Expand Down Expand Up @@ -210,10 +210,10 @@ def rec_walk(current_dir):
continue

if match:
# If we want to skip this file and it's a directory
# If we want to skip this file and it is a directory
# then we should first check to see if there's an
# excludes pattern (e.g. !dir/file) that starts with this
# dir. If so then we can't skip this dir.
# dir. If so then we cannot skip this dir.
skip = True

for pat in self.patterns:
Expand Down
2 changes: 1 addition & 1 deletion plugins/module_utils/_api/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def load_general_config(config_path=None):
with open(config_file) as f:
return json.load(f)
except (IOError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# In the case of a legacy `.dockercfg` file, we will not
# be able to load any JSON data.
log.debug(e)

Expand Down
4 changes: 2 additions & 2 deletions plugins/module_utils/_api/utils/fnmatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def fnmatch(name, pat):
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
"""

name = name.lower()
Expand All @@ -58,7 +58,7 @@ def fnmatch(name, pat):

def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
This is a version of fnmatch() which does not case-normalize
its arguments.
"""

Expand Down
4 changes: 2 additions & 2 deletions plugins/module_utils/_api/utils/socket.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def read(socket, n=4096):
len(e.args) > 0 and
e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
# npipes don't support duplex sockets, so we interpret
# npipes do not support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return ''
raise
Expand All @@ -73,7 +73,7 @@ def read(socket, n=4096):
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
Raises SocketError if there is not enough data
"""
data = binary_type()
while len(data) < n:
Expand Down
4 changes: 2 additions & 2 deletions plugins/module_utils/_api/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def convert_volume_binds(binds):
mode = 'rw'

# NOTE: this is only relevant for Linux hosts
# (doesn't apply in Docker Desktop)
# (does not apply in Docker Desktop)
propagation_modes = [
'rshared',
'shared',
Expand Down Expand Up @@ -391,7 +391,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):

if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
# so if it is not set already then set it to false.
assert_hostname = False

params['tls'] = TLSConfig(
Expand Down
Loading

0 comments on commit f69536e

Please sign in to comment.