diff --git a/src/isilon_hadoop_tools/directories.py b/src/isilon_hadoop_tools/directories.py index 6790de9..b3bfd44 100644 --- a/src/isilon_hadoop_tools/directories.py +++ b/src/isilon_hadoop_tools/directories.py @@ -56,30 +56,27 @@ def create_directories( # and modifying /ifs can break NFS/SMB. raise HDFSRootDirectoryError(hdfs_root) assert hdfs_root.startswith(zone_root) - zone_hdfs = hdfs_root[len(zone_root) :] + zone_hdfs = posixpath.relpath(hdfs_root, start=zone_root) if setup: setup(zone_root, hdfs_root, zone_hdfs) for directory in directories: - path = posixpath.join(zone_hdfs, directory.path.lstrip(posixpath.sep)) - LOGGER.info("mkdir '%s%s'", zone_root, path) + zone_path = posixpath.join(zone_hdfs, directory.path.lstrip(sep)) + path = posixpath.join(zone_root, zone_path) + LOGGER.info("mkdir '%s'", path) try: - (mkdir or self.onefs.mkdir)(path, directory.mode, zone=self.onefs_zone) + (mkdir or self.onefs.mkdir)( + zone_path, directory.mode, zone=self.onefs_zone + ) except isilon_hadoop_tools.onefs.APIError as exc: if exc.dir_path_already_exists_error(): - LOGGER.warning("%s%s already exists. ", zone_root, path) + LOGGER.warning("%s already exists. ", path) else: raise - LOGGER.info("chmod '%o' '%s%s'", directory.mode, zone_root, path) - (chmod or self.onefs.chmod)(path, directory.mode, zone=self.onefs_zone) - LOGGER.info( - "chown '%s:%s' '%s%s'", - directory.owner, - directory.group, - zone_root, - path, - ) + LOGGER.info("chmod '%o' '%s'", directory.mode, path) + (chmod or self.onefs.chmod)(zone_path, directory.mode, zone=self.onefs_zone) + LOGGER.info("chown '%s:%s' '%s'", directory.owner, directory.group, path) (chown or self.onefs.chown)( - path, + zone_path, owner=directory.owner, group=directory.group, zone=self.onefs_zone, @@ -181,7 +178,9 @@ def cdp_directories(identity_suffix=None): HDFSDirectory("/user/yarn/mapreduce", "hdfs", "supergroup", 0o775), HDFSDirectory("/user/yarn/mapreduce/mr-framework", "yarn", "hadoop", 0o775), HDFSDirectory("/user/yarn/services", "hdfs", "supergroup", 0o775), - HDFSDirectory("/user/yarn/services/service-framework", "hdfs", "supergroup", 0o775), + HDFSDirectory( + "/user/yarn/services/service-framework", "hdfs", "supergroup", 0o775 + ), HDFSDirectory("/user/zeppelin", "zeppelin", "zeppelin", 0o775), HDFSDirectory("/warehouse", "hdfs", "supergroup", 0o775), HDFSDirectory("/warehouse/tablespace", "hdfs", "supergroup", 0o775), diff --git a/src/isilon_hadoop_tools/onefs.py b/src/isilon_hadoop_tools/onefs.py index 1c86171..a7d4bb8 100644 --- a/src/isilon_hadoop_tools/onefs.py +++ b/src/isilon_hadoop_tools/onefs.py @@ -1054,11 +1054,10 @@ def flush_auth_cache(self, zone=None): response.raise_for_status() except requests.exceptions.HTTPError as exc: raise NonSDKAPIError("The auth cache could not be flushed.") from exc - else: - assert bool( - response.status_code - == requests.codes.no_content, # pylint: disable=no-member - ) + assert bool( + response.status_code + == requests.codes.no_content, # pylint: disable=no-member + ) else: try: self._sdk.AuthApi(self._api_client).create_auth_cache_item( diff --git a/tox.ini b/tox.ini index c5d1855..b0c1d52 100644 --- a/tox.ini +++ b/tox.ini @@ -25,9 +25,9 @@ python = [testenv:static] basepython = python3.8 deps = - black ~= 23.1.0 + black ~= 23.7.0 flake8 ~= 6.0.0 - pylint ~= 2.16.0 + pylint[spelling] ~= 2.17.0 commands = black --check src setup.py tests flake8 src setup.py tests