From db6993ca7c7098bfd909a0a6d31d86294a4d90cf Mon Sep 17 00:00:00 2001 From: Ilia Kurenkov Date: Mon, 16 Sep 2024 17:13:04 +0200 Subject: [PATCH] Remove use of 'six' package from most tests (and haproxy and istio) (#18593) * Remove use of 'six' package from most tests * fix redisdb test --- amazon_msk/tests/conftest.py | 2 +- clickhouse/tests/test_unit.py | 5 +-- consul/tests/consul_mocks.py | 4 +- datadog_checks_base/tests/test_metadata.py | 9 +---- datadog_checks_dev/tests/test_conditions.py | 2 +- disk/tests/test_unit.py | 13 +++---- dns_check/tests/mocks.py | 3 +- elastic/tests/test_integration.py | 7 ++-- esxi/tests/ssh_tunnel.py | 7 +--- go_expvar/tests/test_integration.py | 3 +- go_expvar/tests/test_unit.py | 5 +-- .../datadog_checks/haproxy/legacy/haproxy.py | 39 ++++++++----------- hdfs_datanode/tests/test_hdfs_datanode.py | 3 +- hdfs_namenode/tests/test_hdfs_namenode.py | 7 ++-- ibm_mq/tests/conftest.py | 1 - ibm_mq/tests/test_ibm_mq_int.py | 5 +-- ibm_mq/tests/test_ibm_mq_unit.py | 5 +-- istio/datadog_checks/istio/istio.py | 12 +----- kubelet/tests/test_kubelet.py | 3 +- lighttpd/tests/conftest.py | 4 +- mapr/tests/test_unit.py | 3 +- mapreduce/tests/test_unit.py | 25 ++++++------ mesos_master/tests/test_check.py | 7 ++-- mesos_master/tests/test_integration_e2e.py | 3 +- mesos_slave/tests/test_integration_e2e.py | 3 +- mesos_slave/tests/test_unit.py | 5 +-- network/tests/common.py | 17 +------- network/tests/test_ethtool.py | 17 ++++---- network/tests/test_linux.py | 16 ++++---- network/tests/test_network.py | 4 -- network/tests/test_windows.py | 14 +++---- openstack/tests/test_openstack.py | 3 +- openstack_controller/tests/ssh_tunnel.py | 7 +--- postfix/tests/test_e2e.py | 3 +- postfix/tests/test_integration.py | 3 +- postgres/tests/test_statements.py | 5 +-- postgres/tests/test_unit.py | 3 +- process/tests/test_process.py | 3 +- redisdb/tests/test_unit.py | 7 ++-- snmp/tests/common.py | 3 +- system_core/tests/test_system_core.py | 3 +- tls/tests/conftest.py | 5 +-- tls/tests/utils.py | 3 +- vault/tests/test_vault.py | 2 +- vsphere/tests/common.py | 2 +- vsphere/tests/mocked_api.py | 7 ++-- vsphere/tests/test_cache.py | 9 ++--- yarn/tests/conftest.py | 2 +- yarn/tests/test_yarn.py | 21 +++++----- zk/tests/conftest.py | 2 +- 50 files changed, 127 insertions(+), 219 deletions(-) diff --git a/amazon_msk/tests/conftest.py b/amazon_msk/tests/conftest.py index 5c3e6dc629c52..1298134d93e65 100644 --- a/amazon_msk/tests/conftest.py +++ b/amazon_msk/tests/conftest.py @@ -2,10 +2,10 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import json +from urllib.parse import urlparse import mock import pytest -from six.moves.urllib.parse import urlparse from datadog_checks.dev import docker_run from datadog_checks.dev.http import MockResponse diff --git a/clickhouse/tests/test_unit.py b/clickhouse/tests/test_unit.py index 8d191056e3e22..3734bfa6c04bc 100644 --- a/clickhouse/tests/test_unit.py +++ b/clickhouse/tests/test_unit.py @@ -4,7 +4,6 @@ import mock import pytest from clickhouse_driver.errors import Error, NetworkError -from six import PY3 from datadog_checks.clickhouse import ClickhouseCheck, queries @@ -65,9 +64,7 @@ def test_error_query(instance, dd_run_check): ids=['SystemMetrics', 'SystemEvents'], ) def test_latest_metrics_supported(metrics, ignored_columns, metric_source_url): - # While we're here, also check key order - if PY3: - assert list(metrics) == sorted(metrics) + assert list(metrics) == sorted(metrics) described_metrics = parse_described_metrics(metric_source_url) diff --git a/consul/tests/consul_mocks.py b/consul/tests/consul_mocks.py index e9f1ad98f813d..be6cd803f221b 100644 --- a/consul/tests/consul_mocks.py +++ b/consul/tests/consul_mocks.py @@ -3,8 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import random -from six import iteritems - MOCK_CONFIG = {'url': 'http://localhost:8500', 'catalog_checks': True} MOCK_CONFIG_DISABLE_SERVICE_TAG = { 'url': 'http://localhost:8500', @@ -30,7 +28,7 @@ def mock_check(check, mocks): - for f_name, m in iteritems(mocks): + for f_name, m in mocks.items(): if not hasattr(check, f_name): continue else: diff --git a/datadog_checks_base/tests/test_metadata.py b/datadog_checks_base/tests/test_metadata.py index 727669d164fbb..f2de6cd886172 100644 --- a/datadog_checks_base/tests/test_metadata.py +++ b/datadog_checks_base/tests/test_metadata.py @@ -8,7 +8,6 @@ import mock import pytest -from six import PY3 from datadog_checks.base import AgentCheck, ensure_bytes, ensure_unicode @@ -57,12 +56,8 @@ class NewAgentCheck(AgentCheck): def test_encoding(self): check = AgentCheck('test', {}, [{}]) check.check_id = 'test:123' - if PY3: - constructor = ensure_bytes - finalizer = ensure_unicode - else: - constructor = ensure_unicode - finalizer = ensure_bytes + constructor = ensure_bytes + finalizer = ensure_unicode name = constructor(u'nam\u00E9') value = constructor(u'valu\u00E9') diff --git a/datadog_checks_dev/tests/test_conditions.py b/datadog_checks_dev/tests/test_conditions.py index cc32c46bfac6a..a4e96660e7fb3 100644 --- a/datadog_checks_dev/tests/test_conditions.py +++ b/datadog_checks_dev/tests/test_conditions.py @@ -3,9 +3,9 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import os import sys +from urllib.response import addinfourl import pytest -from six.moves.urllib.response import addinfourl from datadog_checks.dev.conditions import CheckCommandOutput, CheckDockerLogs, CheckEndpoints, WaitFor from datadog_checks.dev.errors import RetryError diff --git a/disk/tests/test_unit.py b/disk/tests/test_unit.py index fa92719850075..ed896a4d1672b 100644 --- a/disk/tests/test_unit.py +++ b/disk/tests/test_unit.py @@ -6,7 +6,6 @@ import mock import pytest -from six import iteritems from datadog_checks.base.utils.platform import Platform from datadog_checks.base.utils.timeout import TimeoutException @@ -65,10 +64,10 @@ def test_default(aggregator, gauge_metrics, rate_metrics, count_metrics, dd_run_ else: tags = [] - for name, value in iteritems(gauge_metrics): + for name, value in gauge_metrics.items(): aggregator.assert_metric(name, value=value, count=1, metric_type=aggregator.GAUGE, tags=tags) - for name, value in iteritems(rate_metrics): + for name, value in rate_metrics.items(): aggregator.assert_metric( name, value=value, @@ -77,7 +76,7 @@ def test_default(aggregator, gauge_metrics, rate_metrics, count_metrics, dd_run_ tags=['device:{}'.format(DEFAULT_DEVICE_NAME), 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME)], ) - for name, value in iteritems(count_metrics): + for name, value in count_metrics.items(): aggregator.assert_metric( name, value=value, @@ -110,14 +109,14 @@ def test_use_mount(aggregator, instance_basic_mount, gauge_metrics, rate_metrics c = Disk('disk', {}, [instance_basic_mount]) dd_run_check(c) - for name, value in iteritems(gauge_metrics): + for name, value in gauge_metrics.items(): aggregator.assert_metric( name, value=value, tags=['device:{}'.format(DEFAULT_MOUNT_POINT), 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME)], ) - for name, value in chain(iteritems(rate_metrics), iteritems(count_metrics)): + for name, value in chain(rate_metrics.items(), count_metrics.items()): aggregator.assert_metric( name, value=value, @@ -155,7 +154,7 @@ def test_device_tagging(aggregator, gauge_metrics, rate_metrics, count_metrics, 'device_label:mylab', ] - for name, value in chain(iteritems(gauge_metrics), iteritems(rate_metrics), iteritems(count_metrics)): + for name, value in chain(gauge_metrics.items(), rate_metrics.items(), count_metrics.items()): aggregator.assert_metric( name, value=value, diff --git a/dns_check/tests/mocks.py b/dns_check/tests/mocks.py index e4f9974fb4ceb..4d33ed376366f 100644 --- a/dns_check/tests/mocks.py +++ b/dns_check/tests/mocks.py @@ -3,7 +3,6 @@ # Licensed under Simplified BSD License (see LICENSE) from dns.resolver import NXDOMAIN -from six import PY3 class MockDNSAnswer: @@ -18,7 +17,7 @@ def __init__(self, address): else: items = [MockDNSAnswer.MockItem(address)] - self.items = {item: None for item in items} if PY3 else items + self.items = {item: None for item in items} class MockItem: def __init__(self, address): diff --git a/elastic/tests/test_integration.py b/elastic/tests/test_integration.py index c02bfb0c91e4c..77e7bba3c7263 100644 --- a/elastic/tests/test_integration.py +++ b/elastic/tests/test_integration.py @@ -6,7 +6,6 @@ import pytest import requests from packaging import version -from six import iteritems from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.elastic import ESCheck @@ -276,7 +275,7 @@ def test_node_name_as_host(dd_environment, instance_normalize_hostname, aggregat elastic_check.check(None) node_name = node_tags[-1].split(':')[1] - for m_name, _ in iteritems(STATS_METRICS): + for m_name in STATS_METRICS: aggregator.assert_metric(m_name, count=1, tags=node_tags, hostname=node_name) @@ -288,7 +287,7 @@ def test_pshard_metrics(dd_environment, aggregator): elastic_check.check(None) pshard_stats_metrics = pshard_stats_for_version(es_version) - for m_name, desc in iteritems(pshard_stats_metrics): + for m_name, desc in pshard_stats_metrics.items(): if desc[0] == 'gauge': aggregator.assert_metric(m_name) @@ -310,7 +309,7 @@ def test_detailed_index_stats(dd_environment, aggregator): es_version = elastic_check._get_es_version() elastic_check.check(None) pshard_stats_metrics = pshard_stats_for_version(es_version) - for m_name, desc in iteritems(pshard_stats_metrics): + for m_name, desc in pshard_stats_metrics.items(): if desc[0] == 'gauge' and desc[1].startswith('_all.'): aggregator.assert_metric(m_name) diff --git a/esxi/tests/ssh_tunnel.py b/esxi/tests/ssh_tunnel.py index 75fbab232fa66..e456444087fd0 100644 --- a/esxi/tests/ssh_tunnel.py +++ b/esxi/tests/ssh_tunnel.py @@ -4,21 +4,16 @@ from __future__ import absolute_import import os +import subprocess from contextlib import contextmanager import psutil -from six import PY3 from datadog_checks.dev.conditions import WaitForPortListening from datadog_checks.dev.env import environment_run from datadog_checks.dev.structures import LazyFunction, TempDir from datadog_checks.dev.utils import ON_WINDOWS, find_free_port, get_ip -if PY3: - import subprocess -else: - import subprocess32 as subprocess - PID_FILE = 'ssh.pid' diff --git a/go_expvar/tests/test_integration.py b/go_expvar/tests/test_integration.py index 19dbd7ae95b96..d1ba30869feb8 100644 --- a/go_expvar/tests/test_integration.py +++ b/go_expvar/tests/test_integration.py @@ -5,7 +5,6 @@ import logging import pytest -from six import iteritems from . import common @@ -25,7 +24,7 @@ def test_go_expvar(check, aggregator): aggregator.assert_metric(gauge, count=1, tags=shared_tags) for rate in common.CHECK_RATES: aggregator.assert_metric(rate, count=1, tags=shared_tags) - for rate, value in iteritems(CHECK_RATES_CUSTOM): + for rate, value in CHECK_RATES_CUSTOM.items(): aggregator.assert_metric(rate, count=1, value=value, tags=shared_tags) for count in common.CHECK_COUNT: aggregator.assert_metric(count, count=1, metric_type=3, tags=shared_tags) diff --git a/go_expvar/tests/test_unit.py b/go_expvar/tests/test_unit.py index d235da0b9aade..12047947dd59d 100644 --- a/go_expvar/tests/test_unit.py +++ b/go_expvar/tests/test_unit.py @@ -6,7 +6,6 @@ import logging import pytest -from six import iteritems from . import common @@ -83,7 +82,7 @@ def test_go_expvar_mocked(go_expvar_mock, check, aggregator): aggregator.assert_metric( gauge.format(common.CHECK_NAME), metric_type=aggregator.GAUGE, count=1, tags=shared_tags ) - for gauge, tags in iteritems(CHECK_GAUGES_CUSTOM_MOCK): + for gauge, tags in CHECK_GAUGES_CUSTOM_MOCK.items(): aggregator.assert_metric( gauge.format(common.CHECK_NAME), metric_type=aggregator.GAUGE, count=1, tags=shared_tags + tags ) @@ -145,7 +144,7 @@ def test_go_expvar_mocked_namespace(go_expvar_mock, check, aggregator): for gauge in CHECK_GAUGES: aggregator.assert_metric(gauge.format(metric_namespace), count=1, tags=shared_tags) - for gauge, tags in iteritems(CHECK_GAUGES_CUSTOM_MOCK): + for gauge, tags in CHECK_GAUGES_CUSTOM_MOCK.items(): aggregator.assert_metric(gauge.format(metric_namespace), count=1, tags=shared_tags + tags) for rate in CHECK_RATES: diff --git a/haproxy/datadog_checks/haproxy/legacy/haproxy.py b/haproxy/datadog_checks/haproxy/legacy/haproxy.py index 96c7a000cbdd3..f34bd9de9dd90 100644 --- a/haproxy/datadog_checks/haproxy/legacy/haproxy.py +++ b/haproxy/datadog_checks/haproxy/legacy/haproxy.py @@ -9,9 +9,7 @@ import socket import time from collections import defaultdict, namedtuple - -from six import PY2, iteritems -from six.moves.urllib.parse import urlparse +from urllib.parse import urlparse from datadog_checks.base import AgentCheck, is_affirmative, to_string from datadog_checks.base.errors import CheckException @@ -115,20 +113,15 @@ def _fetch_url_data(self): @staticmethod def _decode_response(response): - # it only needs additional decoding in py3, so skip it if it's py2 - if PY2: - return response.content.splitlines() - else: - content = response.content - - # If the content is a string, it can't be decoded again - # But if it's bytes, it can be decoded. - # So, check if it has the decode method - decode_fn = getattr(content, "decode", None) - if callable(decode_fn): - content = content.decode('utf-8') + content = response.content + # If the content is a string, it can't be decoded again + # But if it's bytes, it can be decoded. + # So, check if it has the decode method + decode_fn = getattr(content, "decode", None) + if callable(decode_fn): + content = content.decode('utf-8') - return content.splitlines() + return content.splitlines() @staticmethod def _parse_uptime(uptime): @@ -443,7 +436,7 @@ def _tag_from_regex(self, service_name): # match.groupdict() returns tags dictionary in the form of {'name': 'value'} # convert it to Datadog tag LIST: ['name:value'] - return ["%s:%s" % (name, value) for name, value in iteritems(match.groupdict())] + return ["%s:%s" % (name, value) for name, value in match.groupdict().items()] @staticmethod def _normalize_status(status): @@ -463,7 +456,7 @@ def _process_backend_hosts_metric(self, active_tag=None): agg_statuses = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES}) active_tag = [] if active_tag is None else active_tag - for host_status, count in iteritems(self.hosts_statuses): + for host_status, count in self.hosts_statuses.items(): try: service, back_or_front, hostname, status = host_status except ValueError: @@ -512,7 +505,7 @@ def _process_status_metric( reported_statuses_dict[reported_status] = 0 statuses_counter = defaultdict(lambda: copy.copy(reported_statuses_dict)) - for host_status, count in iteritems(self.hosts_statuses): + for host_status, count in self.hosts_statuses.items(): hostname = None try: service, _, hostname, status = host_status @@ -555,13 +548,13 @@ def _process_status_metric( status_key = Services.STATUS_TO_COLLATED.get(status, Services.UNAVAILABLE) agg_statuses_counter[tuple(agg_tags)][status_key] += count - for tags, count_per_status in iteritems(statuses_counter): - for status, count in iteritems(count_per_status): + for tags, count_per_status in statuses_counter.items(): + for status, count in count_per_status.items(): self.gauge('haproxy.count_per_status', count, tags=tags + ('status:%s' % status,)) # Send aggregates - for service_tags, service_agg_statuses in iteritems(agg_statuses_counter): - for status, count in iteritems(service_agg_statuses): + for service_tags, service_agg_statuses in agg_statuses_counter.items(): + for status, count in service_agg_statuses.items(): self.gauge("haproxy.count_per_status", count, tags=service_tags + ('status:%s' % status,)) def _process_metrics(self, data, custom_tags=None, active_tag=None): diff --git a/hdfs_datanode/tests/test_hdfs_datanode.py b/hdfs_datanode/tests/test_hdfs_datanode.py index 7bc755316619b..a444f95c9fa45 100644 --- a/hdfs_datanode/tests/test_hdfs_datanode.py +++ b/hdfs_datanode/tests/test_hdfs_datanode.py @@ -3,7 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import mock import pytest -from six import iteritems from datadog_checks.hdfs_datanode import HDFSDataNode @@ -38,7 +37,7 @@ def test_check(aggregator, mocked_request): HDFSDataNode.JMX_SERVICE_CHECK, status=HDFSDataNode.OK, tags=HDFS_DATANODE_METRIC_TAGS + CUSTOM_TAGS, count=1 ) - for metric, value in iteritems(HDFS_DATANODE_METRICS_VALUES): + for metric, value in HDFS_DATANODE_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=HDFS_DATANODE_METRIC_TAGS + CUSTOM_TAGS, count=1) diff --git a/hdfs_namenode/tests/test_hdfs_namenode.py b/hdfs_namenode/tests/test_hdfs_namenode.py index 0112dabc49222..ade519bbe47f4 100644 --- a/hdfs_namenode/tests/test_hdfs_namenode.py +++ b/hdfs_namenode/tests/test_hdfs_namenode.py @@ -3,7 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import mock import pytest -from six import iteritems from datadog_checks.hdfs_namenode import HDFSNameNode @@ -34,13 +33,13 @@ def test_check(aggregator, dd_run_check, mocked_request): HDFSNameNode.JMX_SERVICE_CHECK, HDFSNameNode.OK, tags=HDFS_NAMESYSTEM_METRIC_TAGS + CUSTOM_TAGS, count=1 ) - for metric, value in iteritems(HDFS_NAMESYSTEM_STATE_METRICS_VALUES): + for metric, value in HDFS_NAMESYSTEM_STATE_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=HDFS_NAMESYSTEM_METRIC_TAGS + CUSTOM_TAGS, count=1) - for metric, value in iteritems(HDFS_NAMESYSTEM_METRICS_VALUES): + for metric, value in HDFS_NAMESYSTEM_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=HDFS_NAMESYSTEM_METRIC_TAGS + CUSTOM_TAGS, count=1) - for metric, value in iteritems(HDFS_NAMESYSTEM_MUTUAL_METRICS_VALUES): + for metric, value in HDFS_NAMESYSTEM_MUTUAL_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=HDFS_NAMESYSTEM_METRIC_TAGS + CUSTOM_TAGS, count=2) aggregator.assert_all_metrics_covered() diff --git a/ibm_mq/tests/conftest.py b/ibm_mq/tests/conftest.py index 8e9936f47a48e..824795fc3e70b 100644 --- a/ibm_mq/tests/conftest.py +++ b/ibm_mq/tests/conftest.py @@ -7,7 +7,6 @@ import re import pytest -from six.moves import range from datadog_checks.dev import docker_run from datadog_checks.dev.conditions import CheckDockerLogs, WaitFor diff --git a/ibm_mq/tests/test_ibm_mq_int.py b/ibm_mq/tests/test_ibm_mq_int.py index 0892df56c511e..72c2655eebc8e 100644 --- a/ibm_mq/tests/test_ibm_mq_int.py +++ b/ibm_mq/tests/test_ibm_mq_int.py @@ -7,7 +7,6 @@ import mock import pytest -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.base.utils.time import ensure_aware_datetime @@ -280,7 +279,7 @@ def test_check_channel_count(aggregator, get_check, instance_queue_regex_tag, se 'my_channel', pymqi.CMQCFC.MQCHS_RUNNING, ["channel:my_channel"] ) - for status, expected_value in iteritems(metrics_to_assert): + for status, expected_value in metrics_to_assert.items(): aggregator.assert_metric( 'ibm_mq.channel.count', expected_value, tags=["channel:my_channel", "status:" + status] ) @@ -305,7 +304,7 @@ def test_check_channel_count_status_unknown(aggregator, get_check, instance_queu check = get_check(instance_queue_regex_tag) check.channel_metric_collector._submit_channel_count('my_channel', 123, ["channel:my_channel"]) - for status, expected_value in iteritems(metrics_to_assert): + for status, expected_value in metrics_to_assert.items(): aggregator.assert_metric( 'ibm_mq.channel.count', expected_value, tags=["channel:my_channel", "status:" + status] ) diff --git a/ibm_mq/tests/test_ibm_mq_unit.py b/ibm_mq/tests/test_ibm_mq_unit.py index 7a62d19c199d2..ac9f4ef87437c 100644 --- a/ibm_mq/tests/test_ibm_mq_unit.py +++ b/ibm_mq/tests/test_ibm_mq_unit.py @@ -3,7 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import mock import pytest -from six import iteritems from datadog_checks.base import AgentCheck, ConfigurationError from datadog_checks.dev.testing import requires_py3 @@ -37,7 +36,7 @@ def test_channel_status_service_check_default_mapping(aggregator, get_check, ins 'my_channel', status, ["channel:my_channel_{}".format(status)] ) - for status, service_check_status in iteritems(service_check_map): + for status, service_check_status in service_check_map.items(): aggregator.assert_service_check( 'ibm_mq.channel.status', service_check_status, tags=["channel:my_channel_{}".format(status)] ) @@ -80,7 +79,7 @@ def test_channel_status_service_check_custom_mapping(aggregator, get_check, inst 'my_channel', status, ["channel:my_channel_{}".format(status)] ) - for status, service_check_status in iteritems(service_check_map): + for status, service_check_status in service_check_map.items(): aggregator.assert_service_check( 'ibm_mq.channel.status', service_check_status, tags=["channel:my_channel_{}".format(status)] ) diff --git a/istio/datadog_checks/istio/istio.py b/istio/datadog_checks/istio/istio.py index 26528d51fd0d6..5bccdbf84fc0e 100644 --- a/istio/datadog_checks/istio/istio.py +++ b/istio/datadog_checks/istio/istio.py @@ -1,10 +1,9 @@ # (C) Datadog, Inc. 2018-Present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) -from six import PY2 - from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheck, is_affirmative +from .check import IstioCheckV2 from .constants import BLACKLIST_LABELS from .legacy_1_4 import LegacyIstioCheck_1_4 from .metrics import ISTIOD_METRICS @@ -46,15 +45,6 @@ def __new__(cls, name, init_config, instances): instance = instances[0] if is_affirmative(instance.get('use_openmetrics', False)): - if PY2: - raise ConfigurationError( - "Openmetrics on this integration is only available when using py3. " - "Check https://docs.datadoghq.com/agent/guide/agent-v6-python-3 " - "for more information" - ) - # TODO: when we drop Python 2 move this import up top - from .check import IstioCheckV2 - return IstioCheckV2(name, init_config, instances) else: if instance.get('istiod_endpoint'): diff --git a/kubelet/tests/test_kubelet.py b/kubelet/tests/test_kubelet.py index fcc6c88b78271..4328df2671443 100644 --- a/kubelet/tests/test_kubelet.py +++ b/kubelet/tests/test_kubelet.py @@ -11,7 +11,6 @@ import pytest import requests import requests_mock -from six import iteritems from datadog_checks.base.checks.kubelet_base.base import KubeletCredentials from datadog_checks.base.errors import SkipInstanceError @@ -466,7 +465,7 @@ def test_bad_config(): def test_parse_quantity(): - for raw, res in iteritems(QUANTITIES): + for raw, res in QUANTITIES.items(): assert KubeletCheck.parse_quantity(raw) == res diff --git a/lighttpd/tests/conftest.py b/lighttpd/tests/conftest.py index 0338a98b7ea33..bf086334e83fe 100644 --- a/lighttpd/tests/conftest.py +++ b/lighttpd/tests/conftest.py @@ -2,10 +2,10 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from copy import deepcopy +from urllib import error +from urllib.request import urlopen import pytest -from six.moves.urllib import error -from six.moves.urllib.request import urlopen from datadog_checks.dev import WaitFor, docker_run from datadog_checks.lighttpd import Lighttpd diff --git a/mapr/tests/test_unit.py b/mapr/tests/test_unit.py index a4595c6d03416..08364463296ff 100644 --- a/mapr/tests/test_unit.py +++ b/mapr/tests/test_unit.py @@ -2,7 +2,6 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import pytest -from six import iteritems from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.mapr import MaprCheck @@ -29,7 +28,7 @@ def test_metrics_constants(): @pytest.mark.unit def test_get_stream_id(): - for (text, rng), value in iteritems(STREAM_ID_FIXTURE): + for (text, rng), value in STREAM_ID_FIXTURE.items(): assert get_stream_id_for_topic(text, rng=rng) == value diff --git a/mapreduce/tests/test_unit.py b/mapreduce/tests/test_unit.py index 7f701b8c683f1..5d7c7b333086c 100644 --- a/mapreduce/tests/test_unit.py +++ b/mapreduce/tests/test_unit.py @@ -1,7 +1,6 @@ # (C) Datadog, Inc. 2024-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -from six import iteritems from datadog_checks.mapreduce import MapReduceCheck @@ -41,19 +40,19 @@ def test_check(aggregator, dd_run_check, mocked_request): expected_tags = COMMON_TAGS + CLUSTER_TAGS # Check the MapReduce job metrics - for metric, value in iteritems(MAPREDUCE_JOB_METRIC_VALUES): + for metric, value in MAPREDUCE_JOB_METRIC_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=expected_tags, count=1) # Check the map task metrics - for metric, value in iteritems(MAPREDUCE_MAP_TASK_METRIC_VALUES): + for metric, value in MAPREDUCE_MAP_TASK_METRIC_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=MAPREDUCE_MAP_TASK_METRIC_TAGS + expected_tags, count=1) # Check the reduce task metrics - for metric, value in iteritems(MAPREDUCE_REDUCE_TASK_METRIC_VALUES): + for metric, value in MAPREDUCE_REDUCE_TASK_METRIC_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=MAPREDUCE_REDUCE_TASK_METRIC_TAGS + expected_tags, count=1) # Check the MapReduce job counter metrics - for metric, attributes in iteritems(MAPREDUCE_JOB_COUNTER_METRIC_VALUES_READ): + for metric, attributes in MAPREDUCE_JOB_COUNTER_METRIC_VALUES_READ.items(): aggregator.assert_metric( metric, value=attributes["value"], @@ -62,7 +61,7 @@ def test_check(aggregator, dd_run_check, mocked_request): ) # Check the MapReduce job counter metrics - for metric, attributes in iteritems(MAPREDUCE_JOB_COUNTER_METRIC_VALUES_WRITTEN): + for metric, attributes in MAPREDUCE_JOB_COUNTER_METRIC_VALUES_WRITTEN.items(): aggregator.assert_metric( metric, value=attributes["value"], @@ -71,7 +70,7 @@ def test_check(aggregator, dd_run_check, mocked_request): ) # Check the MapReduce job counter metrics - for metric, attributes in iteritems(MAPREDUCE_JOB_COUNTER_METRIC_VALUES_RECORDS): + for metric, attributes in MAPREDUCE_JOB_COUNTER_METRIC_VALUES_RECORDS.items(): aggregator.assert_metric( metric, value=attributes["value"], @@ -131,19 +130,19 @@ def test_disable_legacy_cluster_tag(aggregator, dd_run_check, mocked_request): expected_tags.append(MAPREDUCE_CLUSTER_TAG) # Check the MapReduce job metrics - for metric, value in iteritems(MAPREDUCE_JOB_METRIC_VALUES): + for metric, value in MAPREDUCE_JOB_METRIC_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=expected_tags, count=1) # Check the map task metrics - for metric, value in iteritems(MAPREDUCE_MAP_TASK_METRIC_VALUES): + for metric, value in MAPREDUCE_MAP_TASK_METRIC_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=MAPREDUCE_MAP_TASK_METRIC_TAGS + expected_tags, count=1) # Check the reduce task metrics - for metric, value in iteritems(MAPREDUCE_REDUCE_TASK_METRIC_VALUES): + for metric, value in MAPREDUCE_REDUCE_TASK_METRIC_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=MAPREDUCE_REDUCE_TASK_METRIC_TAGS + expected_tags, count=1) # Check the MapReduce job counter metrics - for metric, attributes in iteritems(MAPREDUCE_JOB_COUNTER_METRIC_VALUES_READ): + for metric, attributes in MAPREDUCE_JOB_COUNTER_METRIC_VALUES_READ.items(): aggregator.assert_metric( metric, value=attributes["value"], @@ -152,7 +151,7 @@ def test_disable_legacy_cluster_tag(aggregator, dd_run_check, mocked_request): ) # Check the MapReduce job counter metrics - for metric, attributes in iteritems(MAPREDUCE_JOB_COUNTER_METRIC_VALUES_WRITTEN): + for metric, attributes in MAPREDUCE_JOB_COUNTER_METRIC_VALUES_WRITTEN.items(): aggregator.assert_metric( metric, value=attributes["value"], @@ -161,7 +160,7 @@ def test_disable_legacy_cluster_tag(aggregator, dd_run_check, mocked_request): ) # Check the MapReduce job counter metrics - for metric, attributes in iteritems(MAPREDUCE_JOB_COUNTER_METRIC_VALUES_RECORDS): + for metric, attributes in MAPREDUCE_JOB_COUNTER_METRIC_VALUES_RECORDS.items(): aggregator.assert_metric( metric, value=attributes["value"], diff --git a/mesos_master/tests/test_check.py b/mesos_master/tests/test_check.py index 4707201f31fd2..721acce7a3613 100644 --- a/mesos_master/tests/test_check.py +++ b/mesos_master/tests/test_check.py @@ -4,7 +4,6 @@ import mock import pytest import requests -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.base.errors import CheckException @@ -26,11 +25,11 @@ def test_check(check, instance, aggregator): ): metrics.update(d) - for _, v in iteritems(check.FRAMEWORK_METRICS): + for v in check.FRAMEWORK_METRICS.values(): aggregator.assert_metric(v[0]) - for _, v in iteritems(metrics): + for v in metrics.values(): aggregator.assert_metric(v[0]) - for _, v in iteritems(check.ROLE_RESOURCES_METRICS): + for v in check.ROLE_RESOURCES_METRICS.values(): aggregator.assert_metric(v[0]) aggregator.assert_metric('mesos.cluster.total_frameworks') diff --git a/mesos_master/tests/test_integration_e2e.py b/mesos_master/tests/test_integration_e2e.py index 88334b1a16ebe..482d985e8f0ed 100644 --- a/mesos_master/tests/test_integration_e2e.py +++ b/mesos_master/tests/test_integration_e2e.py @@ -2,7 +2,6 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import pytest -from six import iteritems from datadog_checks.mesos_master import MesosMaster @@ -37,7 +36,7 @@ def assert_metric_coverage(aggregator): check.CLUSTER_FRAMEWORK_METRICS, check.STATS_METRICS, ): - for _, m in iteritems(d): + for m in d.values(): metrics.append(m[0]) for m in metrics: diff --git a/mesos_slave/tests/test_integration_e2e.py b/mesos_slave/tests/test_integration_e2e.py index 43ca56c798f27..564d06f099103 100644 --- a/mesos_slave/tests/test_integration_e2e.py +++ b/mesos_slave/tests/test_integration_e2e.py @@ -2,7 +2,6 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import pytest -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.mesos_slave import MesosSlave @@ -41,7 +40,7 @@ def assert_metrics_covered(aggregator): expected_tags = ["instance:mytag1", "url:{}/metrics/snapshot".format(URL), "mesos_node:slave"] - for _, v in iteritems(metrics): + for v in metrics.values(): aggregator.assert_metric(v[0]) for tag in expected_tags: aggregator.assert_metric_has_tag(v[0], tag) diff --git a/mesos_slave/tests/test_unit.py b/mesos_slave/tests/test_unit.py index 9a62af2830111..0cc7b699b6d96 100644 --- a/mesos_slave/tests/test_unit.py +++ b/mesos_slave/tests/test_unit.py @@ -5,7 +5,6 @@ import mock import pytest -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.mesos_slave import MesosSlave @@ -26,9 +25,9 @@ def test_fixtures(check, instance, aggregator): ): metrics.update(d) - for _, v in iteritems(check.TASK_METRICS): + for v in check.TASK_METRICS.values(): aggregator.assert_metric(v[0]) - for _, v in iteritems(metrics): + for v in metrics.values(): aggregator.assert_metric(v[0]) service_check_tags = [ diff --git a/network/tests/common.py b/network/tests/common.py index 812e06cedf64d..eeffd58363045 100644 --- a/network/tests/common.py +++ b/network/tests/common.py @@ -3,8 +3,6 @@ # Licensed under Simplified BSD License (see LICENSE) import os -from six import PY3 - from datadog_checks.dev import get_here HERE = get_here() @@ -159,16 +157,5 @@ } -if PY3: - long = int - ESCAPE_ENCODING = 'unicode-escape' - - def decode_string(s): - return s.decode(ESCAPE_ENCODING) - -else: - ESCAPE_ENCODING = 'string-escape' - - def decode_string(s): - s.decode(ESCAPE_ENCODING) - return s.decode("utf-8") +def decode_string(s): + return s.decode('unicode-escape') diff --git a/network/tests/test_ethtool.py b/network/tests/test_ethtool.py index c9d314cc0ea91..3b5e8b5a446bb 100644 --- a/network/tests/test_ethtool.py +++ b/network/tests/test_ethtool.py @@ -9,7 +9,6 @@ import mock import pytest -from six import PY3, iteritems from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.network import ethtool @@ -445,9 +444,9 @@ def send_ethtool_ioctl_mock(iface, sckt, data): for input, result in common.ETHTOOL_IOCTL_INPUTS_OUTPUTS.items(): - if input == (iface, data.tobytes() if PY3 else data.tostring()): + if input == (iface, data.tobytes()): data[:] = array.array('B', []) - data.frombytes(result) if PY3 else data.fromstring(result) + data.frombytes(result) return raise ValueError("Couldn't match any iface/data combination in the test data") @@ -558,8 +557,8 @@ def test_submit_ena_ethtool_metrics(is_linux, is_bsd, send_ethtool_ioctl, check, send_ethtool_ioctl.side_effect = send_ethtool_ioctl_mock check_instance._handle_ethtool_stats('eth0', []) - for tag, metrics in iteritems(ENA_ETHTOOL_VALUES): - for metric_suffix, value in iteritems(metrics): + for tag, metrics in ENA_ETHTOOL_VALUES.items(): + for metric_suffix, value in metrics.items(): aggregator.assert_metric( 'system.net.' + metric_suffix, count=1, @@ -581,8 +580,8 @@ def test_submit_hv_netvsc_ethtool_metrics(is_linux, is_bsd, send_ethtool_ioctl, send_ethtool_ioctl.side_effect = send_ethtool_ioctl_mock check_instance._handle_ethtool_stats('hv_netvsc', []) - for tag, metrics in iteritems(HV_NETVSC_ETHTOOL_VALUES): - for metric_suffix, value in iteritems(metrics): + for tag, metrics in HV_NETVSC_ETHTOOL_VALUES.items(): + for metric_suffix, value in metrics.items(): aggregator.assert_metric( 'system.net.' + metric_suffix, count=1, @@ -604,8 +603,8 @@ def test_submit_gve_ethtool_metrics(is_linux, is_bsd, send_ethtool_ioctl, check, send_ethtool_ioctl.side_effect = send_ethtool_ioctl_mock check_instance._handle_ethtool_stats('gve', []) - for tag, metrics in iteritems(GVE_ETHTOOL_VALUES): - for metric_suffix, value in iteritems(metrics): + for tag, metrics in GVE_ETHTOOL_VALUES.items(): + for metric_suffix, value in metrics.items(): aggregator.assert_metric( 'system.net.' + metric_suffix, count=1, diff --git a/network/tests/test_linux.py b/network/tests/test_linux.py index aec259dfb9a98..7ebd5e148daf6 100644 --- a/network/tests/test_linux.py +++ b/network/tests/test_linux.py @@ -7,7 +7,6 @@ import mock import pytest -from six import PY3, iteritems from datadog_checks.base.utils.platform import Platform from datadog_checks.base.utils.subprocess_output import get_subprocess_output @@ -221,13 +220,13 @@ def test_cx_state(aggregator): with mock.patch('datadog_checks.network.check_linux.get_subprocess_output') as out: out.side_effect = ss_subprocess_mock check_instance.check(instance) - for metric, value in iteritems(CX_STATE_GAUGES_VALUES): + for metric, value in CX_STATE_GAUGES_VALUES.items(): aggregator.assert_metric(metric, value=value) aggregator.reset() out.side_effect = netstat_subprocess_mock check_instance.check(instance) - for metric, value in iteritems(CX_STATE_GAUGES_VALUES): + for metric, value in CX_STATE_GAUGES_VALUES.items(): aggregator.assert_metric(metric, value=value) aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) @@ -242,7 +241,7 @@ def test_linux_sys_net(listdir, read_int_file, aggregator): check_instance.check({}) - for metric, value in iteritems(LINUX_SYS_NET_STATS): + for metric, value in LINUX_SYS_NET_STATS.items(): aggregator.assert_metric(metric, value=value[0], tags=['iface:lo']) aggregator.assert_metric(metric, value=value[1], tags=['iface:ens5']) @@ -259,13 +258,13 @@ def test_cx_state_mocked(aggregator): check_instance.get_net_proc_base_location = lambda x: FIXTURE_DIR check_instance.check({}) - for metric, value in iteritems(CX_STATE_GAUGES_VALUES): + for metric, value in CX_STATE_GAUGES_VALUES.items(): aggregator.assert_metric(metric, value=value) aggregator.reset() out.side_effect = netstat_subprocess_mock check_instance.check({}) - for metric, value in iteritems(CX_STATE_GAUGES_VALUES): + for metric, value in CX_STATE_GAUGES_VALUES.items(): aggregator.assert_metric(metric, value=value) aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) @@ -283,14 +282,13 @@ def test_add_conntrack_stats_metrics(aggregator): subprocess.return_value = mocked_conntrack_stats, None, None check_instance._add_conntrack_stats_metrics(None, None, ['foo:bar']) - for metric, value in iteritems(CONNTRACK_STATS): + for metric, value in CONNTRACK_STATS.items(): aggregator.assert_metric(metric, value=value[0], tags=['foo:bar', 'cpu:0']) aggregator.assert_metric(metric, value=value[1], tags=['foo:bar', 'cpu:1']) aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) -@pytest.mark.skipif(not PY3, reason="mock builtins only works on Python 3") def test_proc_permissions_error(aggregator, caplog): instance = copy.deepcopy(common.INSTANCE) instance['collect_connection_state'] = False @@ -330,7 +328,7 @@ def test_proc_net_metrics(aggregator): check_instance.get_net_proc_base_location = lambda x: FIXTURE_DIR check_instance.check({}) - for metric, value in iteritems(PROC_NET_STATS): + for metric, value in PROC_NET_STATS.items(): aggregator.assert_metric(metric, value=value) aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) diff --git a/network/tests/test_network.py b/network/tests/test_network.py index 5061413fe763c..c27fe30b3171d 100644 --- a/network/tests/test_network.py +++ b/network/tests/test_network.py @@ -5,15 +5,11 @@ import mock import pytest -from six import PY3 from datadog_checks.dev import EnvVars from . import common -if PY3: - long = int - @pytest.mark.parametrize( "proc_location, envs, expected_net_proc_base_location", diff --git a/network/tests/test_windows.py b/network/tests/test_windows.py index 8762ac25cc66e..88c448c99ab25 100644 --- a/network/tests/test_windows.py +++ b/network/tests/test_windows.py @@ -16,15 +16,11 @@ from collections import namedtuple import mock -from six import PY3, iteritems from datadog_checks.network.check_windows import TCPSTATS, WindowsNetwork from . import common -if PY3: - long = int - @mock.patch('datadog_checks.network.network.Platform.is_linux', return_value=False) @mock.patch('datadog_checks.network.network.Platform.is_bsd', return_value=False) @@ -107,7 +103,7 @@ def test_get_tcp_stats(aggregator): with mock.patch('datadog_checks.network.check_windows.WindowsNetwork._get_tcp_stats') as mock_get_tcp_stats: mock_get_tcp_stats.return_value = mock_stats # Make _get_tcp_stats return my mock object check_instance.check({}) - for name, value in iteritems(expected_mets): + for name, value in expected_mets.items(): aggregator.assert_metric(name, value=value) @@ -230,7 +226,7 @@ def test_cx_state_psutil(aggregator): mock_psutil.net_connections.return_value = conn check_instance._setup_metrics({}) check_instance._cx_state_psutil() - for _, m in iteritems(aggregator._metrics): + for m in aggregator._metrics.values(): assert results[m[0].name] == m[0].value @@ -240,8 +236,8 @@ def test_cx_counters_psutil(aggregator): ) counters = { 'Ethernet': snetio( - bytes_sent=long(3096403230), - bytes_recv=long(3280598526), + bytes_sent=int(3096403230), + bytes_recv=int(3280598526), packets_sent=6777924, packets_recv=32888147, errin=0, @@ -262,7 +258,7 @@ def test_cx_counters_psutil(aggregator): with mock.patch('datadog_checks.network.check_windows.psutil') as mock_psutil: mock_psutil.net_io_counters.return_value = counters check_instance._cx_counters_psutil() - for _, m in iteritems(aggregator._metrics): + for m in aggregator._metrics.values(): assert 'device:Ethernet' in m[0].tags if 'bytes_rcvd' in m[0].name: assert m[0].value == 3280598526 diff --git a/openstack/tests/test_openstack.py b/openstack/tests/test_openstack.py index b25c5d49cc2a2..b44a1dea155b4 100644 --- a/openstack/tests/test_openstack.py +++ b/openstack/tests/test_openstack.py @@ -7,7 +7,6 @@ import mock import pytest -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.dev.testing import requires_py3 @@ -139,7 +138,7 @@ def test_unscoped_from_config(): assert scope.auth_token == 'fake_token' assert len(scope.project_scope_map) == 1 - for _, project_scope in iteritems(scope.project_scope_map): + for project_scope in scope.project_scope_map.values(): assert isinstance(project_scope, OpenStackProjectScope) assert project_scope.auth_token == 'fake_token' assert project_scope.tenant_id == '263fd9' diff --git a/openstack_controller/tests/ssh_tunnel.py b/openstack_controller/tests/ssh_tunnel.py index 1f9b3a0f359c0..38639a1bc8163 100644 --- a/openstack_controller/tests/ssh_tunnel.py +++ b/openstack_controller/tests/ssh_tunnel.py @@ -4,21 +4,16 @@ from __future__ import absolute_import import os +import subprocess from contextlib import contextmanager import psutil -from six import PY3 from datadog_checks.dev.conditions import WaitForPortListening from datadog_checks.dev.env import environment_run from datadog_checks.dev.structures import LazyFunction, TempDir from datadog_checks.dev.utils import ON_WINDOWS, find_free_port, get_ip -if PY3: - import subprocess -else: - import subprocess32 as subprocess - PID_FILE = 'ssh.pid' diff --git a/postfix/tests/test_e2e.py b/postfix/tests/test_e2e.py index 3f0aa9e94b7e9..0d890175895ce 100644 --- a/postfix/tests/test_e2e.py +++ b/postfix/tests/test_e2e.py @@ -2,7 +2,6 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import pytest -from six import iteritems from .common import get_e2e_instance, get_e2e_instance_postqueue, get_queue_counts @@ -11,7 +10,7 @@ def test_check_default(dd_agent_check): aggregator = dd_agent_check(get_e2e_instance()) - for queue, count in iteritems(get_queue_counts()): + for queue, count in get_queue_counts().items(): tags = ['instance:postfix_data', 'queue:{}'.format(queue)] aggregator.assert_metric('postfix.queue.size', value=count[0], tags=tags) diff --git a/postfix/tests/test_integration.py b/postfix/tests/test_integration.py index 8e808df1036a3..a797ad3a34fd9 100644 --- a/postfix/tests/test_integration.py +++ b/postfix/tests/test_integration.py @@ -2,7 +2,6 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import pytest -from six import iteritems from datadog_checks.postfix import PostfixCheck @@ -15,6 +14,6 @@ def test_check(aggregator): check = PostfixCheck('postfix', {}, [instance]) check.check(instance) - for queue, count in iteritems(get_queue_counts()): + for queue, count in get_queue_counts().items(): tags = ['instance:postfix_data', 'queue:{}'.format(queue)] aggregator.assert_metric('postfix.queue.size', value=count[0], tags=tags) diff --git a/postgres/tests/test_statements.py b/postgres/tests/test_statements.py index 66c5c774a5cb8..f486cbf57f3ba 100644 --- a/postgres/tests/test_statements.py +++ b/postgres/tests/test_statements.py @@ -13,7 +13,6 @@ import pytest from dateutil import parser from semver import VersionInfo -from six import string_types from datadog_checks.base.utils.db.sql import compute_sql_signature from datadog_checks.base.utils.db.utils import DBMAsyncJob @@ -798,7 +797,7 @@ def test_statement_samples_collect( dbm_samples = aggregator.get_event_platform_events("dbm-samples") - expected_query = query % ('\'' + arg + '\'' if isinstance(arg, string_types) else arg) + expected_query = query % ('\'' + arg + '\'' if isinstance(arg, str) else arg) # Find matching events by checking if the expected query starts with the event statement. Using this # instead of a direct equality check covers cases of truncated statements @@ -1479,7 +1478,7 @@ def test_statement_samples_dbstrict(aggregator, integration_check, dbm_instance, dbm_samples = aggregator.get_event_platform_events("dbm-samples") for _, _, dbname, query, arg in SAMPLE_QUERIES: - expected_query = query % ('\'' + arg + '\'' if isinstance(arg, string_types) else arg) + expected_query = query % ('\'' + arg + '\'' if isinstance(arg, str) else arg) matching = [e for e in dbm_samples if e['db']['statement'] == expected_query] if not dbstrict or dbname == dbm_instance['dbname']: # when dbstrict=True we expect to only capture those queries for the initial database to which the diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index 8143c6b46a5c5..1fa3164fa1f22 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -8,7 +8,6 @@ import pytest from pytest import fail from semver import VersionInfo -from six import iteritems from datadog_checks.postgres import PostgreSql, util @@ -100,7 +99,7 @@ def test_version_metadata(check, test_case, params): check.check_id = 'test:123' with mock.patch('datadog_checks.base.stubs.datadog_agent.set_check_metadata') as m: check.set_metadata('version', test_case) - for name, value in iteritems(params): + for name, value in params.items(): m.assert_any_call('test:123', name, value) m.assert_any_call('test:123', 'version.scheme', 'semver') m.assert_any_call('test:123', 'version.raw', test_case) diff --git a/process/tests/test_process.py b/process/tests/test_process.py index a1dbd67184c13..8eed74076d967 100644 --- a/process/tests/test_process.py +++ b/process/tests/test_process.py @@ -7,7 +7,6 @@ import psutil import pytest from mock import patch -from six import iteritems from datadog_checks.process import ProcessCheck @@ -329,7 +328,7 @@ def test_relocated_procfs(aggregator, dd_run_check): my_procfs = tempfile.mkdtemp() def _fake_procfs(arg, root=my_procfs): - for key, val in iteritems(arg): + for key, val in arg.items(): path = os.path.join(root, key) if isinstance(val, dict): os.mkdir(path) diff --git a/redisdb/tests/test_unit.py b/redisdb/tests/test_unit.py index 2666d2edb8a5e..83c9666812709 100644 --- a/redisdb/tests/test_unit.py +++ b/redisdb/tests/test_unit.py @@ -3,7 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import mock import pytest -from six import iteritems from datadog_checks.dev.utils import get_metadata_metrics @@ -22,18 +21,18 @@ def test__get_conn(check, redis_instance): # create a connection check._get_conn(instance) - key1, conn1 = next(iteritems(check.connections)) + key1, conn1 = next(iter(check.connections.items())) # assert connection is cached check._get_conn(instance) - key2, conn2 = next(iteritems(check.connections)) + key2, conn2 = next(iter(check.connections.items())) assert key2 == key1 assert conn2 == conn1 # disable cache and assert connection has changed instance['disable_connection_cache'] = True check._get_conn(instance) - key2, conn2 = next(iteritems(check.connections)) + key2, conn2 = next(iter(check.connections.items())) assert key2 == key1 assert conn2 != conn1 diff --git a/snmp/tests/common.py b/snmp/tests/common.py index 289f7e4f19a0e..bb7073a6af2b8 100644 --- a/snmp/tests/common.py +++ b/snmp/tests/common.py @@ -11,7 +11,6 @@ from collections import defaultdict import pytest -from six import iteritems from datadog_checks.base.stubs.aggregator import AggregatorStub from datadog_checks.base.utils.common import get_docker_hostname, to_native_string @@ -336,7 +335,7 @@ def dd_agent_check_wrapper(dd_agent_check, *args, **kwargs): """ aggregator = dd_agent_check(*args, **kwargs) new_agg_metrics = defaultdict(list) - for metric_name, metric_list in iteritems(aggregator._metrics): + for metric_name, metric_list in aggregator._metrics.items(): new_metrics = [] for metric in metric_list: # metric is a Namedtuple, to modify namedtuple fields we need to use `._replace()` diff --git a/system_core/tests/test_system_core.py b/system_core/tests/test_system_core.py index 64a259b60e9ab..fcee3f0da6302 100644 --- a/system_core/tests/test_system_core.py +++ b/system_core/tests/test_system_core.py @@ -4,7 +4,6 @@ from collections import defaultdict import mock -from six import iteritems from datadog_checks.base.utils.platform import Platform from datadog_checks.system_core import SystemCore @@ -57,7 +56,7 @@ def fake_cpu_times(percpu=False): sum_dict = defaultdict(float) for cputimes in common.MOCK_PSUTIL_CPU_TIMES: - for key, value in iteritems(cputimes._asdict()): + for key, value in cputimes._asdict().items(): sum_dict[key] += value / len(common.MOCK_PSUTIL_CPU_TIMES) return common.MOCK_PSUTIL_CPU_TIMES[0].__class__(**sum_dict) diff --git a/tls/tests/conftest.py b/tls/tests/conftest.py index a657e7fb3e986..12483f28b2dab 100644 --- a/tls/tests/conftest.py +++ b/tls/tests/conftest.py @@ -5,7 +5,6 @@ import pytest from datadog_test_libs.utils.mock_dns import mock_local -from six import iteritems from datadog_checks.dev import TempDir, docker_run from datadog_checks.tls.utils import days_to_seconds @@ -51,11 +50,11 @@ def certs(dd_environment): } certs = {} with TempDir('certs') as tmp_dir: - for address, name in iteritems(downloads): + for address, name in downloads.items(): filepath = os.path.join(tmp_dir, name) download_cert(filepath, address) certs[name] = filepath - for address, name in iteritems(raw_downloads): + for address, name in raw_downloads.items(): filepath = os.path.join(tmp_dir, name) certs[name] = download_cert(filepath, address, raw=True) certs[name] = filepath diff --git a/tls/tests/utils.py b/tls/tests/utils.py index 91ef75cf8f97d..6aa4702e28112 100644 --- a/tls/tests/utils.py +++ b/tls/tests/utils.py @@ -6,8 +6,7 @@ import ssl import time from contextlib import contextmanager - -from six.moves.urllib.parse import urlparse +from urllib.parse import urlparse from datadog_checks.dev import TempDir from datadog_checks.tls.utils import closing diff --git a/vault/tests/test_vault.py b/vault/tests/test_vault.py index a262414801478..81c37dceff840 100644 --- a/vault/tests/test_vault.py +++ b/vault/tests/test_vault.py @@ -2,11 +2,11 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import re +from urllib.parse import urlparse import mock import pytest import requests -from six.moves.urllib.parse import urlparse from datadog_checks.dev.http import MockResponse from datadog_checks.vault import Vault diff --git a/vsphere/tests/common.py b/vsphere/tests/common.py index 4f9390cb82f00..562b990e34dbb 100644 --- a/vsphere/tests/common.py +++ b/vsphere/tests/common.py @@ -3,10 +3,10 @@ # Licensed under Simplified BSD License (see LICENSE) import os import re +from urllib.parse import urlparse import mock from pyVmomi import vim, vmodl -from six.moves.urllib.parse import urlparse from datadog_checks.base.utils.time import get_current_datetime from datadog_checks.dev.http import MockResponse diff --git a/vsphere/tests/mocked_api.py b/vsphere/tests/mocked_api.py index d193ccc480f58..dcd053fe78b96 100644 --- a/vsphere/tests/mocked_api.py +++ b/vsphere/tests/mocked_api.py @@ -9,7 +9,6 @@ from mock import MagicMock from pyVmomi import vim from requests import Response -from six import iteritems from datadog_checks.vsphere.api import VersionInfo from tests.common import HERE, VSPHERE_VERSION @@ -57,7 +56,7 @@ def recursive_parse_topology(self, subtree, parent=None): self.infrastructure_data[current_mor]['guest.hostName'] = subtree['guest.hostName'] if self.config.should_collect_attributes and 'customValue' in subtree: mor_attr = [] - for key_name, value in iteritems(subtree['customValue']): + for key_name, value in subtree['customValue'].items(): mor_attr.append('{}{}:{}'.format(self.config.attr_prefix, key_name, value)) self.infrastructure_data[current_mor]['attributes'] = mor_attr @@ -68,9 +67,9 @@ def recursive_parse_topology(self, subtree, parent=None): return # Resolve the runtime.host_moId into pointers to the mocked mors. - for _, props in iteritems(self.infrastructure_data): + for props in self.infrastructure_data.values(): if 'runtime.host_moid' in props: - hosts = [m for m, p in iteritems(self.infrastructure_data) if p['name'] == props['runtime.host_moid']] + hosts = [m for m, p in self.infrastructure_data.items() if p['name'] == props['runtime.host_moid']] props['runtime.host'] = hosts[0] if hosts else object() del props['runtime.host_moid'] diff --git a/vsphere/tests/test_cache.py b/vsphere/tests/test_cache.py index 64155a06271a9..c74a2a5bf413b 100644 --- a/vsphere/tests/test_cache.py +++ b/vsphere/tests/test_cache.py @@ -6,7 +6,6 @@ import pytest from mock import MagicMock, patch from pyVmomi import vim -from six import iteritems from datadog_checks.vsphere.cache import InfrastructureCache, MetricsMetadataCache, VSphereCache from datadog_checks.vsphere.config import VSphereConfig @@ -68,10 +67,10 @@ def test_metrics_metadata_cache(): data = {k: object() for k in ALL_RESOURCES_WITH_METRICS} with cache.update(): - for k, v in iteritems(data): + for k, v in data.items(): cache.set_metadata(k, v) - for k, v in iteritems(data): + for k, v in data.items(): assert cache.get_metadata(k) == v @@ -83,14 +82,14 @@ def test_infrastructure_cache(realtime_instance): mors = {MagicMock(spec=k, _moId="foo"): object() for k in ALL_RESOURCES_WITH_METRICS * 2} with cache.update(): - for k, v in iteritems(mors): + for k, v in mors.items(): cache.set_mor_props(k, v) cache.set_all_tags(mock_api.get_resource_tags_for_mors(mors)) for r in ALL_RESOURCES_WITH_METRICS: assert len(list(cache.get_mors(r))) == 2 - for k, v in iteritems(mors): + for k, v in mors.items(): assert cache.get_mor_props(k) == v vm_mor = vim.VirtualMachine(moId='VM4-4-1') diff --git a/yarn/tests/conftest.py b/yarn/tests/conftest.py index dedeb382b67f0..ac6efe6588ee8 100644 --- a/yarn/tests/conftest.py +++ b/yarn/tests/conftest.py @@ -4,11 +4,11 @@ import os from copy import deepcopy +from urllib.parse import urljoin import pytest from mock import patch from requests.exceptions import SSLError -from six.moves.urllib.parse import urljoin from datadog_checks.dev import docker_run from datadog_checks.dev.conditions import CheckEndpoints diff --git a/yarn/tests/test_yarn.py b/yarn/tests/test_yarn.py index 63686ee4213c5..11eee87969ded 100644 --- a/yarn/tests/test_yarn.py +++ b/yarn/tests/test_yarn.py @@ -7,7 +7,6 @@ import pytest from requests.exceptions import SSLError -from six import iteritems from datadog_checks.yarn import YarnCheck from datadog_checks.yarn.yarn import ( @@ -84,25 +83,25 @@ def test_check(aggregator, mocked_request): ) # Check the YARN Cluster Metrics - for metric, value in iteritems(YARN_CLUSTER_METRICS_VALUES): + for metric, value in YARN_CLUSTER_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=EXPECTED_TAGS, count=1) # Check the YARN App Metrics - for metric, value in iteritems(YARN_APP_METRICS_VALUES): + for metric, value in YARN_APP_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=YARN_APP_METRICS_TAGS + CUSTOM_TAGS, count=1) - for metric, value in iteritems(DEPRECATED_YARN_APP_METRICS_VALUES): + for metric, value in DEPRECATED_YARN_APP_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=YARN_APP_METRICS_TAGS + CUSTOM_TAGS, count=1) # Check the YARN Node Metrics - for metric, value in iteritems(YARN_NODE_METRICS_VALUES): + for metric, value in YARN_NODE_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=YARN_NODE_METRICS_TAGS + CUSTOM_TAGS, count=1) # Check the YARN Root Queue Metrics - for metric, value in iteritems(YARN_ROOT_QUEUE_METRICS_VALUES): + for metric, value in YARN_ROOT_QUEUE_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=YARN_ROOT_QUEUE_METRICS_TAGS + CUSTOM_TAGS, count=1) # Check the YARN Custom Queue Metrics - for metric, value in iteritems(YARN_QUEUE_METRICS_VALUES): + for metric, value in YARN_QUEUE_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=YARN_QUEUE_METRICS_TAGS + CUSTOM_TAGS, count=1) # Check the YARN Queue Metrics from excluded queues are absent @@ -110,7 +109,7 @@ def test_check(aggregator, mocked_request): aggregator.assert_metric(metric, tags=YARN_QUEUE_NOFOLLOW_METRICS_TAGS + CUSTOM_TAGS, count=0) # Check the YARN Subqueue Metrics - for metric, value in iteritems(YARN_SUBQUEUE_METRICS_VALUES): + for metric, value in YARN_SUBQUEUE_METRICS_VALUES.items(): aggregator.assert_metric(metric, value=value, tags=YARN_SUBQUEUE_METRICS_TAGS + CUSTOM_TAGS, count=1) aggregator.assert_all_metrics_covered() @@ -329,7 +328,7 @@ def test_collect_apps_all_states(dd_run_check, aggregator, mocked_request): dd_run_check(yarn) for app in YARN_APPS_ALL_STATES: - for metric, value in iteritems(app['metric_values']): + for metric, value in app['metric_values'].items(): aggregator.assert_metric(metric, value=value, tags=app['tags'] + EXPECTED_TAGS, count=1) @@ -351,7 +350,7 @@ def test_collect_apps_states_list(dd_run_check, aggregator, mocked_request, conf state_tag_re = re.compile(r'state:.*') for app in YARN_APPS_ALL_STATES: - for metric, value in iteritems(app['metric_values']): + for metric, value in app['metric_values'].items(): m = re.search(state_tag_re, app['tags'][2]) if m: state_tag = m.group(0) @@ -368,7 +367,7 @@ def test_collect_apps_killed_instance_state(dd_run_check, aggregator, mocked_req dd_run_check(yarn) for app in YARN_APPS_ALL_STATES: - for metric, value in iteritems(app['metric_values']): + for metric, value in app['metric_values'].items(): if app['tags'] == "KILLED": aggregator.assert_metric(metric, value=value, tags=app['tags'] + EXPECTED_TAGS, count=1) else: diff --git a/zk/tests/conftest.py b/zk/tests/conftest.py index 929e7f781b764..9cf0fa6fe6f1a 100644 --- a/zk/tests/conftest.py +++ b/zk/tests/conftest.py @@ -5,9 +5,9 @@ import sys import time from copy import deepcopy +from io import StringIO import pytest -from six import StringIO from datadog_checks.base.utils.common import get_docker_hostname from datadog_checks.dev import RetryError, docker_run