From 217211a34187a32afd8ac84080aad18ba301435f Mon Sep 17 00:00:00 2001 From: Praveen K Pandey Date: Tue, 29 Nov 2022 10:51:48 +0530 Subject: [PATCH] Fixed style issue Fixed pylint issue across misc test repo Signed-off-by: Praveen K Pandey --- cpu/cpupower_monitor.py | 32 ++++++++++++++------------ cpu/linsched.py | 3 ++- cpu/producer_consumer.py | 3 ++- dlpar/dlpar_main.py | 2 +- fs/blktests.py | 6 +++-- fs/xfstests.py | 3 ++- generic/interbench.py | 6 +++-- io/common/distro_tools.py | 3 ++- io/common/virtual_bind_unbind.py | 3 ++- io/disk/disk_info.py | 15 ++++++++---- io/disk/iozone.py | 3 ++- io/disk/lvsetup.py | 1 + io/disk/multipath_test.py | 4 ++-- io/disk/port_bounce.py | 1 + io/driver/module_unload_load.py | 1 + io/net/bonding.py | 31 +++++++++++++++---------- io/net/ethtool_test.py | 18 +++++++-------- io/net/htx_nic_devices.py | 34 ++++++++++++++++++---------- io/net/iperf_test.py | 8 +++---- io/net/multiport_stress.py | 12 ++++++---- io/net/net_tools.py | 6 +++-- io/net/network_test.py | 12 ++++++---- io/net/switch_test.py | 3 ++- io/net/tcpdump.py | 18 ++++++++++----- io/net/uperf_test.py | 9 +++++--- io/pci/pci_info_lscfg.py | 31 ++++++++++++------------- memory/eatmemory.py | 6 +++-- memory/libhugetlbfs.py | 3 ++- memory/page_table.py | 15 ++++++------ nx_gzip/nx_gzip.py | 14 ++++++------ perf/compilebench.py | 5 ++-- perf/hackbench.py | 3 ++- perf/perf_24x7_hardware_counters.py | 9 +++++--- perf/perf_c2c.py | 9 +++++--- perf/perf_core_imc_non_zero_event.py | 3 ++- perf/perf_events_test.py | 3 ++- perf/perf_genericevents.py | 10 ++++---- perf/perf_invalid_flag_test.py | 3 ++- perf/perf_metric.py | 2 +- perf/perf_nmem.py | 5 ++-- perf/perf_pmu.py | 18 ++++++++++----- perf/perf_sched.py | 6 +++-- perf/perf_script_bug.py | 5 ++-- perf/perf_sdt_probe.py | 3 ++- perf/tbench.py | 4 ++-- ras/sosreport.py | 9 ++++---- ras/supportconfig.py | 16 ++++++++----- security/annobin-tests.py | 1 + security/audit-tests.py | 2 +- security/evmctl-tests.py | 7 ++++-- security/ima-evm-utils-tests.py | 4 +++- security/ima-modsig-tests.py | 1 + security/kernel-hardening-tests.py | 1 + security/keyutils-tests.py | 1 + security/krb5-tests.py | 1 + security/libkmip-tests.py | 1 + security/openssh-tests.py | 1 + security/openssl-tests.py | 1 + security/pam-tests.py | 1 + security/secvarctl-tests.py | 1 + security/selinux-tests.py | 3 ++- security/vTPM-tests.py | 1 + security/xmlsec-tests.py | 1 + toolchain/binutils.py | 6 +++-- toolchain/papiTest.py | 2 +- 65 files changed, 281 insertions(+), 174 deletions(-) diff --git a/cpu/cpupower_monitor.py b/cpu/cpupower_monitor.py index 420fe0575..be05579e1 100755 --- a/cpu/cpupower_monitor.py +++ b/cpu/cpupower_monitor.py @@ -47,7 +47,7 @@ def setUp(self): for line in output.splitlines(): if 'Available idle states: ' in line: self.states_list = (line.split('Available idle states: ')[-1])\ - .split() + .split() break self.log.info("Idle states on the system are: %s" % self.states_list) @@ -79,7 +79,6 @@ def check_zero_nonzero(self, stop_state_index): return 0 def test_workload(self): - """ This test covers: 1. Collect cpupower monitor output. @@ -113,11 +112,10 @@ def test_workload(self): zero_nonzero = zero_nonzero + self.check_zero_nonzero(i + 1) if not zero_nonzero: self.log.info("cpus have not entered idle states after killing" - " ebizzy workload") + " ebizzy workload") self.log.info("cpus have entered idle states after killing work load") def test_disable_idlestate(self): - """ 1. Collect list of supported idle states. 2. Disable first idle statei, check cpus have not entered this state. @@ -137,7 +135,6 @@ def test_disable_idlestate(self): @skipIf("powerpc" not in cpu.get_arch(), "Skip, SMT specific tests") def test_idlestate_smt(self): - """ 1. Set smt mode to off. 2. Run test_workload. @@ -152,7 +149,6 @@ def test_idlestate_smt(self): process.run('ppc64_cpu --smt=on', shell=True) def test_idlestate_single_core(self): - """ 1. Set single core online. 2. Run test_workload. @@ -167,17 +163,17 @@ def test_idlestate_single_core(self): process.run('ppc64_cpu --smt=on', shell=True) def test_idle_info(self): - """ This test verifies cpupower idle-info with different smt states. Prints the duration for which CPU is in snooze and CEDE state. """ process.run('cpupower -c all idle-info', shell=True) - for i in [1,2,4]: + for i in [1, 2, 4]: process.run('ppc64_cpu --smt=%s' % i, shell=True) process.run('ppc64_cpu --smt', shell=True) - output = process.system_output('cpupower -c %s idle-info | grep offline' % i, shell=True).split() + output = process.system_output( + 'cpupower -c %s idle-info | grep offline' % i, shell=True).split() if "offline" not in str(output[1]): self.fail("cpupower tool verification with smt=%s failed" % i) process.run('ppc64_cpu --smt=on', shell=True) @@ -185,14 +181,20 @@ def test_idle_info(self): process.run('cpupower -c all idle-info', shell=True) process.run('ppc64_cpu --cores-on=all', shell=True) process.run('cpupower -c all idle-info', shell=True) - self.nr_cpus = process.system_output("lscpu | grep ^'CPU(s):'", shell=True).split() + self.nr_cpus = process.system_output( + "lscpu | grep ^'CPU(s):'", shell=True).split() for i in range(int(self.nr_cpus[1])): - duration_init = process.system_output('cpupower -c %s idle-info | grep Duration' % i, shell=True).split() + duration_init = process.system_output( + 'cpupower -c %s idle-info | grep Duration' % i, shell=True).split() time.sleep(5) - duration_final = process.system_output('cpupower -c %s idle-info | grep Duration' % i, shell=True).split() + duration_final = process.system_output( + 'cpupower -c %s idle-info | grep Duration' % i, shell=True).split() duration_snooze = int(duration_final[1]) - int(duration_init[1]) - self.log.info("CPU%s has entered snooze state for %s microseconds in 2 seconds" % (i, duration_snooze)) + self.log.info("CPU%s has entered snooze state for %s microseconds in 2 seconds" % ( + i, duration_snooze)) duration_CEDE = int(duration_final[3]) - int(duration_init[3]) - self.log.info("CPU%s has entered CEDE state for %s microseconds in 2 seconds" % (i, duration_CEDE)) + self.log.info("CPU%s has entered CEDE state for %s microseconds in 2 seconds" % ( + i, duration_CEDE)) if (duration_snooze == 0) and (duration_CEDE == 0): - self.fail("CPU%s has not entered snooze or CEDE state even in idle state" % i) + self.fail( + "CPU%s has not entered snooze or CEDE state even in idle state" % i) diff --git a/cpu/linsched.py b/cpu/linsched.py index 42babde1e..533ec48e0 100644 --- a/cpu/linsched.py +++ b/cpu/linsched.py @@ -49,7 +49,8 @@ def setUp(self): "archive/refs/heads/master.zip" tarball = self.fetch_asset("linsched.zip", locations=url, expire='7d') archive.extract(tarball, self.workdir) - self.sourcedir = os.path.join(self.workdir, 'linux-scheduler-testing-master') + self.sourcedir = os.path.join( + self.workdir, 'linux-scheduler-testing-master') os.chdir(self.sourcedir) fix_patch = 'patch -p1 < %s' % self.get_data('fix.patch') diff --git a/cpu/producer_consumer.py b/cpu/producer_consumer.py index 4ac9baf26..0c83ff6ac 100644 --- a/cpu/producer_consumer.py +++ b/cpu/producer_consumer.py @@ -77,7 +77,8 @@ def test(self): cache_size = self.params.get('cache_size') if not cache_size: - iteration_length = self.params.get('iteration_length', default=1024) + iteration_length = self.params.get( + 'iteration_length', default=1024) args = '-p %s -c %s -r %s -l %s -t %s' % (pcpu, ccpu, random_seed, iteration_length, runtime) else: diff --git a/dlpar/dlpar_main.py b/dlpar/dlpar_main.py index aab54d562..7357e02f7 100644 --- a/dlpar/dlpar_main.py +++ b/dlpar/dlpar_main.py @@ -147,7 +147,7 @@ def test_dlpar(self): 'sha_cpu_fold_workload'): dlpar_type_flag = "cpu_fold" self.log.info( - "CPU folding Workload: Calling ./dlpar_workload_setup.py") + "CPU folding Workload: Calling ./dlpar_workload_setup.py") test_cmd = './dlpar_workload_setup.py' self.run_cmd(test_cmd, "cpu_fold") self.dlpar_engine() diff --git a/fs/blktests.py b/fs/blktests.py index b2f079677..05be20313 100644 --- a/fs/blktests.py +++ b/fs/blktests.py @@ -38,9 +38,11 @@ def setUp(self): smm = SoftwareManager() dist = distro.detect() if dist.name in ['Ubuntu', 'debian']: - packages = ['gcc', 'make', 'util-linux', 'fio', 'libdevmapper-dev', 'g++'] + packages = ['gcc', 'make', 'util-linux', + 'fio', 'libdevmapper-dev', 'g++'] else: - packages = ['gcc', 'make', 'util-linux', 'fio', 'device-mapper', 'gcc-c++'] + packages = ['gcc', 'make', 'util-linux', + 'fio', 'device-mapper', 'gcc-c++'] for package in packages: if not smm.check_installed(package) and not smm.install(package): diff --git a/fs/xfstests.py b/fs/xfstests.py index 9196e12c5..fbde26c1d 100644 --- a/fs/xfstests.py +++ b/fs/xfstests.py @@ -141,7 +141,8 @@ def setup_nvdimm(self): self.log_scratch = None self.plib.create_namespace(region=self.region, size=dev_size) self.plib.create_namespace(region=self.region, size=dev_size) - namespaces = self.plib.run_ndctl_list('-N -r %s -m fsdax' % self.region) + namespaces = self.plib.run_ndctl_list( + '-N -r %s -m fsdax' % self.region) pmem_dev = self.plib.run_ndctl_list_val(namespaces[0], 'blockdev') self.test_dev = "/dev/%s" % pmem_dev pmem_dev = self.plib.run_ndctl_list_val(namespaces[1], 'blockdev') diff --git a/generic/interbench.py b/generic/interbench.py index 4ab75d864..447780529 100644 --- a/generic/interbench.py +++ b/generic/interbench.py @@ -52,14 +52,16 @@ def setUp(self): if memory.meminfo.MemTotal.b > disk_free_b: self.cancel('Disk space is less than total memory. Skipping test') - tarball = self.fetch_asset('http://ck.kolivas.org/apps/interbench/interbench-0.31.tar.bz2') + tarball = self.fetch_asset( + 'http://ck.kolivas.org/apps/interbench/interbench-0.31.tar.bz2') archive.extract(tarball, self.workdir) version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.workdir, version) # Patch for make file os.chdir(self.sourcedir) - makefile_patch = 'patch -p1 < %s ' % self.get_data('makefile_fix.patch') + makefile_patch = 'patch -p1 < %s ' % self.get_data( + 'makefile_fix.patch') process.run(makefile_patch, shell=True) build.make(self.sourcedir) diff --git a/io/common/distro_tools.py b/io/common/distro_tools.py index 7c41b9d31..026154080 100644 --- a/io/common/distro_tools.py +++ b/io/common/distro_tools.py @@ -25,10 +25,12 @@ release = "%s%s" % (distro.detect().name, distro.detect().version) + class DisrtoTool(Test): ''' to test different type of tool ''' + def setUp(self): ''' get all parameters @@ -67,7 +69,6 @@ def setUp(self): if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) - def test(self): ''' test all distro tools diff --git a/io/common/virtual_bind_unbind.py b/io/common/virtual_bind_unbind.py index 9d189f3c3..29a759297 100644 --- a/io/common/virtual_bind_unbind.py +++ b/io/common/virtual_bind_unbind.py @@ -97,7 +97,8 @@ def test(self): if self.device_type in ["l-lan", "vnic"]: if self.networkinterface.ping_check(self.peer_ip, count=5) is not None: - self.cancel("Please make sure the network peer is configured ?") + self.cancel( + "Please make sure the network peer is configured ?") else: if self.is_exists_device(self.virtual_device) is False: self.cancel("failed to detect the test disk") diff --git a/io/disk/disk_info.py b/io/disk/disk_info.py index 9e5911d39..3112a604e 100755 --- a/io/disk/disk_info.py +++ b/io/disk/disk_info.py @@ -127,11 +127,13 @@ def test(self): msg = [] if process.system("ls /dev/disk/by-id -l| grep -i %s" % self.disk_abs, ignore_status=True, shell=True, sudo=True) != 0: - msg.append("Given disk %s is not in /dev/disk/by-id" % self.disk_abs) + msg.append("Given disk %s is not in /dev/disk/by-id" % + self.disk_abs) for disk_node in self.disk_nodes: if process.system("ls /dev/disk/by-path -l| grep -i %s" % disk_node, ignore_status=True, shell=True, sudo=True) != 0: - msg.append("Given disk %s is not in /dev/disk/by-path" % disk_node) + msg.append( + "Given disk %s is not in /dev/disk/by-path" % disk_node) # Verify disk listed in all tools if self.mpath: @@ -145,12 +147,14 @@ def test(self): cmd = cmd + " | grep -i %s" % self.disk_base if process.system(cmd, ignore_status=True, shell=True, sudo=True) != 0: - msg.append("Given disk %s is not present in %s" % (self.disk_base, cmd)) + msg.append("Given disk %s is not present in %s" % + (self.disk_base, cmd)) if self.mpath: for disk_node in self.disk_nodes: if process.system("lshw -c disk | grep -i %s" % disk_node, ignore_status=True, shell=True, sudo=True) != 0: - msg.append("Given disk %s is not in lshw -c disk" % disk_node) + msg.append("Given disk %s is not in lshw -c disk" % + disk_node) # Get the size and UUID of the disk cmd = "lsblk -l %s --output SIZE -b |sed -n 2p" % self.disk @@ -251,7 +255,8 @@ def test(self): self.disk, self.fstype, self.uuid) if process.system("grub2-probe %s" % self.dirs, ignore_status=True): - msg.append("Given disk %s's fs not detected by grub2" % self.disk_base) + msg.append("Given disk %s's fs not detected by grub2" % + self.disk_base) # Un-mount the directory self.log.info("Unmounting directory %s", self.dirs) diff --git a/io/disk/iozone.py b/io/disk/iozone.py index 38398e54d..545cc891c 100755 --- a/io/disk/iozone.py +++ b/io/disk/iozone.py @@ -444,7 +444,8 @@ def setUp(self): if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): - self.cancel('btrfs-tools is needed for the test to be run') + self.cancel( + 'btrfs-tools is needed for the test to be run') tarball = self.fetch_asset(self.source_url) archive.extract(tarball, self.teststmpdir) diff --git a/io/disk/lvsetup.py b/io/disk/lvsetup.py index 000558e35..33802b5c0 100755 --- a/io/disk/lvsetup.py +++ b/io/disk/lvsetup.py @@ -44,6 +44,7 @@ class Lvsetup(Test): """ Test class for creating logical volumes. """ + def setUp(self): """ Check existence of input PV,VG, LV and snapshots prior to Test. diff --git a/io/disk/multipath_test.py b/io/disk/multipath_test.py index 1be3a52c2..934401b83 100755 --- a/io/disk/multipath_test.py +++ b/io/disk/multipath_test.py @@ -177,7 +177,7 @@ def is_mpath_available(): self.log.info("recovery of %s success" % path_dic["wwid"]) else: msg += "Recovery of %s fails after blocklist\n" \ - % path_dic["wwid"] + % path_dic["wwid"] def is_path_available(): if operation == 'block': @@ -210,7 +210,7 @@ def is_path_available(): self.log.info("recovery of %s success" % disk) else: msg += "Recovery of %s fails after blacklist %s\n" \ - % (disk, path_dic["wwid"]) + % (disk, path_dic["wwid"]) multipath.form_conf_mpath_file(defaults_extra=plcy) if msg: diff --git a/io/disk/port_bounce.py b/io/disk/port_bounce.py index db60c92b8..0bd6e4724 100755 --- a/io/disk/port_bounce.py +++ b/io/disk/port_bounce.py @@ -35,6 +35,7 @@ class CommandFailed(Exception): ''' exception class ''' + def __init__(self, command, output, exitcode): self.command = command self.output = output diff --git a/io/driver/module_unload_load.py b/io/driver/module_unload_load.py index 65e172080..d8a50fe57 100755 --- a/io/driver/module_unload_load.py +++ b/io/driver/module_unload_load.py @@ -32,6 +32,7 @@ class ModuleLoadUnload(Test): :param iteration: Number of time to unload and load the module :only_io True for single provide module and False for All pci modules """ + def setUp(self): """ get parameters. diff --git a/io/net/bonding.py b/io/net/bonding.py index 7c5aa33fd..94958c99b 100755 --- a/io/net/bonding.py +++ b/io/net/bonding.py @@ -82,7 +82,8 @@ def setUp(self): for self.host_interface in self.host_interfaces: if self.host_interface not in interfaces: self.cancel("interface is not available") - self.peer_first_ipinterface = self.params.get("peer_ips", default="").split(" ") + self.peer_first_ipinterface = self.params.get( + "peer_ips", default="").split(" ") if not self.peer_interfaces or self.peer_first_ipinterface == "": self.cancel("peer machine should available") self.ipaddr = self.params.get("host_ips", default="").split(" ") @@ -104,8 +105,8 @@ def setUp(self): self.peer_interfaces): if self.peer_bond_needed: self.remotehost = RemoteHost( - self.peer_public_ip, - self.user, password=self.password) + self.peer_public_ip, + self.user, password=self.password) peer_networkinterface = NetworkInterface(interface, self.remotehost) try: @@ -179,11 +180,13 @@ def setUp(self): if 'setup' in str(self.name.name): for interface in self.peer_interfaces: - peer_networkinterface = NetworkInterface(interface, self.remotehost) + peer_networkinterface = NetworkInterface( + interface, self.remotehost) if peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") for host_interface in self.host_interfaces: - self.networkinterface = NetworkInterface(host_interface, self.localhost) + self.networkinterface = NetworkInterface( + host_interface, self.localhost) if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host") @@ -361,9 +364,11 @@ def bond_fail(self, arg1): if peer_networkinterface.set_mtu(mtu) is not None: self.cancel("Failed to set mtu in peer") if not self.ping_check(): - self.fail("Ping fail in mode %s after MTU change to %s" % (self.mode, mtu)) + self.fail("Ping fail in mode %s after MTU change to %s" % + (self.mode, mtu)) else: - self.log.info("Ping success for mode %s bond with MTU %s" % (self.mode, mtu)) + self.log.info( + "Ping success for mode %s bond with MTU %s" % (self.mode, mtu)) if self.bond_networkinterface.set_mtu('1500'): self.cancel("Failed to set mtu back to 1500 in host") for interface in self.peer_interfaces: @@ -522,15 +527,16 @@ def test_cleanup(self): try: networkinterface.restore_from_backup() except Exception: - self.log.info("backup file not availbale, could not restore file.") + self.log.info( + "backup file not availbale, could not restore file.") if self.peer_bond_needed: self.bond_remove("peer") for ipaddr, interface in zip(self.peer_first_ipinterface, self.peer_interfaces): self.remotehost = RemoteHost( - self.peer_public_ip, self.user, - password=self.password) + self.peer_public_ip, self.user, + password=self.password) peer_networkinterface = NetworkInterface(interface, self.remotehost) try: @@ -541,7 +547,7 @@ def test_cleanup(self): peer_networkinterface.save(ipaddr, self.netmask) time.sleep(self.sleep_time) self.error_check() - + detected_distro = distro.detect() if detected_distro.name == "rhel": cmd = "systemctl restart NetworkManager.service" @@ -554,7 +560,8 @@ def test_cleanup(self): try: for interface in self.peer_interfaces: - peer_networkinterface = NetworkInterface(interface, self.remotehost) + peer_networkinterface = NetworkInterface( + interface, self.remotehost) peer_networkinterface.set_mtu("1500") self.remotehost.remote_session.quit() except Exception: diff --git a/io/net/ethtool_test.py b/io/net/ethtool_test.py index cd5e99288..4985f807c 100755 --- a/io/net/ethtool_test.py +++ b/io/net/ethtool_test.py @@ -160,8 +160,8 @@ def test_ethtool(self): for j in range(5): if value[i] != '': cmd = "ethtool %s %s %s %s" % ( - self.args, self.iface, - self.param[i], default[i][j]) + self.args, self.iface, + self.param[i], default[i][j]) result = process.run(cmd, shell=True, verbose=True, ignore_status=True) @@ -170,21 +170,21 @@ def test_ethtool(self): self.peer, count=5) is not None: self.cancel("ping fail value %s \ to %s parameter" % ( - default[i][j], - self.param[i])) + default[i][j], + self.param[i])) err_channel = "no RX or TX channel" err_count = "count exceeds maximum" if result.exit_status != 0: if err_channel in result.stderr_text: self.log.info("Cannot set %s \ value on %s parameter" % ( - default[i][j], - self.param[i])) + default[i][j], + self.param[i])) elif err_count in result.stderr_text: self.log.info("Cannot set %s \ value on %s parameter" % ( - default[i][j], - self.param[i])) + default[i][j], + self.param[i])) else: self.fail("%s %s" % ( self.args, result.stderr_text)) @@ -206,7 +206,7 @@ def test_ethtool(self): else: self.fail("%s failed" % self.args) if not wait.wait_for(lambda: self.networkinterface.are_packets_lost( - self.peer, options=['-c 10000', '-f']), timeout=30): + self.peer, options=['-c 10000', '-f']), timeout=30): self.cancel("Packet recieved in Ping flood is not 100 percent \ after waiting for 30sec") if self.priv_test: diff --git a/io/net/htx_nic_devices.py b/io/net/htx_nic_devices.py index 179ac451e..27f468644 100755 --- a/io/net/htx_nic_devices.py +++ b/io/net/htx_nic_devices.py @@ -107,7 +107,8 @@ def build_htx(self): packages.extend(['libncurses5', 'g++', 'ncurses-dev', 'libncurses-dev', 'tar', 'wget']) elif detected_distro.name == 'SuSE': - packages.extend(['libncurses5', 'gcc-c++', 'ncurses-devel', 'tar', 'wget']) + packages.extend(['libncurses5', 'gcc-c++', + 'ncurses-devel', 'tar', 'wget']) else: self.cancel("Test not supported in %s" % detected_distro.name) @@ -118,13 +119,15 @@ def build_htx(self): cmd = "%s install %s" % (smm.backend.base_command, pkg) output = self.session.cmd(cmd) if not output.exit_status == 0: - self.cancel("Unable to install the package %s on peer machine" % pkg) + self.cancel( + "Unable to install the package %s on peer machine" % pkg) if self.htx_url: htx = self.htx_url.split("/")[-1] htx_rpm = self.fetch_asset(self.htx_url) process.system("rpm -ivh --force %s" % htx_rpm) - cmd = "wget %s -O /tmp/%s ; cd /tmp ; rpm -ivh --force %s" % (self.htx_url, htx, htx) + cmd = "wget %s -O /tmp/%s ; cd /tmp ; rpm -ivh --force %s" % ( + self.htx_url, htx, htx) self.session.cmd(cmd) else: url = "https://github.com/open-power/HTX/archive/master.zip" @@ -264,11 +267,13 @@ def generate_bpt_file(self): self.log.info("Generating bpt file in both Host & Peer") cmd = "/usr/bin/build_net help n" self.session.cmd(cmd) - exit_code = process.run(cmd, shell=True, sudo=True, ignore_status=True).exit_status + exit_code = process.run( + cmd, shell=True, sudo=True, ignore_status=True).exit_status if exit_code == 0 or exit_code == 43: return True else: - self.fail("Command %s failed with exit status %s " % (cmd, exit_code)) + self.fail("Command %s failed with exit status %s " % + (cmd, exit_code)) def check_bpt_file_existence(self): """ @@ -375,7 +380,8 @@ def ip_config(self): for (peer_intf, net_id) in zip(self.peer_intfs, self.net_ids): ip_addr = "%s.1.1.%s" % (net_id, self.peer_ip.split('.')[-1]) - peer_networkinterface = NetworkInterface(peer_intf, self.remotehost) + peer_networkinterface = NetworkInterface( + peer_intf, self.remotehost) peer_networkinterface.add_ipaddr(ip_addr, self.netmask) peer_networkinterface.bring_up() @@ -396,7 +402,7 @@ def htx_configure_net(self): peer_obj = re.search("All networks ping Ok", peer_output) except Exception: self.log.info("build_net command failed in peer") - if host_obj != None: + if host_obj is not None: if self.peer_distro == "rhel": self.session.cmd("systemctl start NetworkManager") else: @@ -418,7 +424,7 @@ def htx_configure_net(self): self.log.info("Starting the N/W ping test for HTX in Peer") for count in range(11): - if peer_obj != None: + if peer_obj is not None: try: self.session.cmd("pingum") except Exception: @@ -576,7 +582,8 @@ def monitor_htx_run(self): def shutdown_active_mdt(self): self.log.info("Shutdown active mdt in host") cmd = "htxcmdline -shutdown" - process.run(cmd, timeout=120, ignore_status=True, shell=True, sudo=True) + process.run(cmd, timeout=120, ignore_status=True, + shell=True, sudo=True) self.log.info("Shutdown active mdt in peer") output = self.session.cmd(cmd) if not output.exit_status == 0: @@ -675,11 +682,13 @@ def clean_state(self): self.log.info("Resetting bpt file in both Host & Peer") cmd = "/usr/bin/build_net help n" self.session.cmd(cmd) - exit_code = process.run(cmd, shell=True, sudo=True, ignore_status=True).exit_status + exit_code = process.run( + cmd, shell=True, sudo=True, ignore_status=True).exit_status if exit_code == 0 or exit_code == 43: return True else: - self.fail("Command %s failed with exit status %s " % (cmd, exit_code)) + self.fail("Command %s failed with exit status %s " % + (cmd, exit_code)) if self.is_net_device_active_in_host(): self.suspend_all_net_devices_in_host() @@ -716,7 +725,8 @@ def ip_restore_peer(self): config ip for peer ''' for ip, interface in zip(self.peer_ips, self.peer_intfs): - peer_networkinterface = NetworkInterface(interface, self.remotehost) + peer_networkinterface = NetworkInterface( + interface, self.remotehost) try: cmd = "ip addr flush %s" % interface self.session.cmd(cmd) diff --git a/io/net/iperf_test.py b/io/net/iperf_test.py index 59c643895..3b4b45e8e 100755 --- a/io/net/iperf_test.py +++ b/io/net/iperf_test.py @@ -125,14 +125,14 @@ def setUp(self): self.remotehost = RemoteHost(self.peer_ip, self.peer_user, password=self.peer_password) self.peer_interface = self.remotehost.get_interface_by_ipaddr( - self.peer_ip).name + self.peer_ip).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost( - self.peer_public_ip, self.peer_user, - password=self.peer_password) + self.peer_public_ip, self.peer_user, + password=self.peer_password) self.peer_public_networkinterface = NetworkInterface( - self.peer_interface, self.remotehost_public) + self.peer_interface, self.remotehost_public) if self.peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") if self.networkinterface.set_mtu(self.mtu) is not None: diff --git a/io/net/multiport_stress.py b/io/net/multiport_stress.py index 6026dfb39..0fe37deec 100755 --- a/io/net/multiport_stress.py +++ b/io/net/multiport_stress.py @@ -72,13 +72,15 @@ def setUp(self): self.peer_user, password=self.peer_password) for peer_ip in self.peer_ips: - peer_interface = self.remotehost.get_interface_by_ipaddr(peer_ip).name + peer_interface = self.remotehost.get_interface_by_ipaddr( + peer_ip).name peer_networkinterface = NetworkInterface(peer_interface, self.remotehost) if peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") for host_interface in self.host_interfaces: - self.networkinterface = NetworkInterface(host_interface, self.local) + self.networkinterface = NetworkInterface( + host_interface, self.local) if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host") @@ -124,7 +126,8 @@ def tearDown(self): if networkinterface.set_mtu("1500") is not None: self.cancel("Failed to set mtu in host") for peer_ip in self.peer_ips: - peer_interface = self.remotehost.get_interface_by_ipaddr(peer_ip).name + peer_interface = self.remotehost.get_interface_by_ipaddr( + peer_ip).name try: peer_networkinterface = NetworkInterface(peer_interface, self.remotehost) @@ -139,6 +142,7 @@ def tearDown(self): try: networkinterface.restore_from_backup() except Exception: - self.log.info("backup file not availbale, could not restore file.") + self.log.info( + "backup file not availbale, could not restore file.") self.remotehost.remote_session.quit() self.remotehost_public.remote_session.quit() diff --git a/io/net/net_tools.py b/io/net/net_tools.py index 80a1c3ce4..694cd684f 100755 --- a/io/net/net_tools.py +++ b/io/net/net_tools.py @@ -53,7 +53,8 @@ def setUp(self): install_dependencies() self.restore_hostname = False # Get Hostname - hostname = process.system_output("hostname").decode("utf-8").strip("\n") + hostname = process.system_output( + "hostname").decode("utf-8").strip("\n") if not hostname: # set hostname if not set process.system("hostname localhost.localdomain", sudo=True) @@ -317,7 +318,8 @@ class Iptunnel(Test): @skipUnless("SuSE15" not in release, "iptunnel is deprecated") def setUp(self): self.tunnel = None - ret = process.system_output("ps -aef", env={"LANG": "C"}).decode("utf-8") + ret = process.system_output( + "ps -aef", env={"LANG": "C"}).decode("utf-8") if 'dhclient' in ret: self.cancel("Test not supported on systems running dhclient") install_dependencies() diff --git a/io/net/network_test.py b/io/net/network_test.py index 919ffd5fe..4953734df 100755 --- a/io/net/network_test.py +++ b/io/net/network_test.py @@ -67,7 +67,8 @@ def setUp(self): self.hbond = self.params.get("hbond", default=False) local = LocalHost() if self.hbond: - self.networkinterface = NetworkInterface(self.iface, local, if_type='Bond') + self.networkinterface = NetworkInterface( + self.iface, local, if_type='Bond') else: self.networkinterface = NetworkInterface(self.iface, local) if self.ip_config: @@ -95,7 +96,8 @@ def setUp(self): self.cancel("failed connecting to peer") self.remotehost = RemoteHost(self.peer, self.peer_user, password=self.peer_password) - self.peer_interface = self.remotehost.get_interface_by_ipaddr(self.peer).name + self.peer_interface = self.remotehost.get_interface_by_ipaddr( + self.peer).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost(self.peer_public_ip, self.peer_user, @@ -200,7 +202,8 @@ def test_ipv6_ping(self): if not peer_ipv6[0]: self.cancel("IPV6 addrress is not set for peer interface") except Exception: - self.cancel("Test failing while getting IPV6 address for peer interface") + self.cancel( + "Test failing while getting IPV6 address for peer interface") if self.networkinterface.ping_check(peer_ipv6[0], count=10) is not None: self.fail("IPV6 ping test failed") @@ -337,7 +340,8 @@ def tearDown(self): self.networkinterface.restore_from_backup() except Exception: self.networkinterface.remove_cfg_file() - self.log.info("backup file not availbale, could not restore file.") + self.log.info( + "backup file not availbale, could not restore file.") if self.hbond: self.networkinterface.restore_slave_cfg_file() self.remotehost.remote_session.quit() diff --git a/io/net/switch_test.py b/io/net/switch_test.py index 78dcb1faf..62664e10a 100755 --- a/io/net/switch_test.py +++ b/io/net/switch_test.py @@ -133,4 +133,5 @@ def tearDown(self): try: self.networkinterface.restore_from_backup() except Exception: - self.log.info("backup file not availbale, could not restore file.") + self.log.info( + "backup file not availbale, could not restore file.") diff --git a/io/net/tcpdump.py b/io/net/tcpdump.py index ca21f31fd..28e7138d0 100755 --- a/io/net/tcpdump.py +++ b/io/net/tcpdump.py @@ -72,7 +72,8 @@ def setUp(self): self.networkinterface.save(self.ipaddr, self.netmask) self.networkinterface.bring_up() if not wait.wait_for(self.networkinterface.is_link_up, timeout=120): - self.cancel("Link up of interface is taking longer than 120 seconds") + self.cancel( + "Link up of interface is taking longer than 120 seconds") self.peer_user = self.params.get("peer_user", default="root") self.peer_password = self.params.get("peer_password", '*', default="None") @@ -81,7 +82,8 @@ def setUp(self): self.mtu_timeout = self.params.get("mtu_timeout", default=30) self.remotehost = RemoteHost(self.peer_ip, self.peer_user, password=self.peer_password) - self.peer_interface = self.remotehost.get_interface_by_ipaddr(self.peer_ip).name + self.peer_interface = self.remotehost.get_interface_by_ipaddr( + self.peer_ip).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost(self.peer_public_ip, self.peer_user, @@ -126,7 +128,8 @@ def test(self): else: obj = process.SubProcess(cmd, verbose=False, shell=True) obj.start() - cmd = "timeout %s tcpdump -i %s -n -c %s" % (self.timeout, self.iface, self.count) + cmd = "timeout %s tcpdump -i %s -n -c %s" % ( + self.timeout, self.iface, self.count) if self.option in ('host', 'src'): cmd = "%s %s %s" % (cmd, self.option, self.host_ip) elif self.option == "dst": @@ -168,15 +171,18 @@ def tearDown(self): if self.networkinterface.set_mtu('1500', timeout=self.mtu_timeout) is not None: self.cancel("Failed to set mtu in host") try: - self.peer_networkinterface.set_mtu('1500', timeout=self.mtu_timeout) + self.peer_networkinterface.set_mtu( + '1500', timeout=self.mtu_timeout) except Exception: - self.peer_public_networkinterface.set_mtu('1500', timeout=self.mtu_timeout) + self.peer_public_networkinterface.set_mtu( + '1500', timeout=self.mtu_timeout) self.networkinterface.remove_ipaddr(self.ipaddr, self.netmask) try: self.networkinterface.restore_from_backup() except Exception: self.networkinterface.remove_cfg_file() - self.log.info("backup file not availbale, could not restore file.") + self.log.info( + "backup file not availbale, could not restore file.") if self.hbond: self.networkinterface.restore_slave_cfg_file() self.remotehost.remote_session.quit() diff --git a/io/net/uperf_test.py b/io/net/uperf_test.py index 1aff672d4..9ad700e93 100755 --- a/io/net/uperf_test.py +++ b/io/net/uperf_test.py @@ -72,7 +72,8 @@ def setUp(self): self.cancel("failed connecting to peer") smm = SoftwareManager() detected_distro = distro.detect() - pkgs = ["gcc", "gcc-c++", "autoconf", "perl", "m4", "git-core", "automake"] + pkgs = ["gcc", "gcc-c++", "autoconf", + "perl", "m4", "git-core", "automake"] if detected_distro.name == "Ubuntu": pkgs.extend(["libsctp1", "libsctp-dev", "lksctp-tools"]) elif detected_distro.name == "rhel": @@ -106,7 +107,8 @@ def setUp(self): self.mtu = self.params.get("mtu", default=1500) self.remotehost = RemoteHost(self.peer_ip, self.peer_user, password=self.peer_password) - self.peer_interface = self.remotehost.get_interface_by_ipaddr(self.peer_ip).name + self.peer_interface = self.remotehost.get_interface_by_ipaddr( + self.peer_ip).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost(self.peer_public_ip, self.peer_user, @@ -212,7 +214,8 @@ def tearDown(self): try: self.networkinterface.restore_from_backup() except Exception: - self.log.info("backup file not availbale, could not restore file.") + self.log.info( + "backup file not availbale, could not restore file.") self.remotehost.remote_session.quit() self.remotehost_public.remote_session.quit() self.session.quit() diff --git a/io/pci/pci_info_lscfg.py b/io/pci/pci_info_lscfg.py index 30aa83b1b..434deaf0e 100755 --- a/io/pci/pci_info_lscfg.py +++ b/io/pci/pci_info_lscfg.py @@ -52,46 +52,46 @@ def test(self): self.log.info(cfg_output) if cfg_output and pci_info_dict: if 'YL' in cfg_output and 'PhySlot' in pci_info_dict: - # Physical Slot Match + # Physical Slot Match self.log.info("Physical Slot from lscfg is %s" " and lspci is %s", - cfg_output['YL'], pci_info_dict['PhySlot']) + cfg_output['YL'], pci_info_dict['PhySlot']) cfg_output['YL'] = \ - cfg_output['YL'][:cfg_output['YL'].rfind('-')] + cfg_output['YL'][:cfg_output['YL'].rfind('-')] if(cfg_output['YL'] == pci_info_dict['PhySlot']): self.log.info("Physical Slot matched") else: error.append("Physical slot info didn't match") # Sub Device ID match - if ('subvendor_device' in cfg_output and - 'SDevice' in pci_info_dict): + if ('subvendor_device' in cfg_output and + 'SDevice' in pci_info_dict): self.log.info("Device iD from lscfg is %s" " and lspci is %s", - cfg_output['subvendor_device'][4:], - pci_info_dict['SDevice']) + cfg_output['subvendor_device'][4:], + pci_info_dict['SDevice']) if(cfg_output['subvendor_device'][4:] - == pci_info_dict['SDevice']): + == pci_info_dict['SDevice']): self.log.info("Sub Device ID matched") else: error.append("Device ID info didn't match") # Subvendor ID Match - if ('subvendor_device' in cfg_output and - 'SVendor' in pci_info_dict): + if ('subvendor_device' in cfg_output and + 'SVendor' in pci_info_dict): self.log.info("Subvendor ID from lscfg is %s" "and lspci is %s", - cfg_output['subvendor_device'], - pci_info_dict['SVendor']) + cfg_output['subvendor_device'], + pci_info_dict['SVendor']) if(cfg_output['subvendor_device'][0:4] == - pci_info_dict['SVendor']): + pci_info_dict['SVendor']): self.log.info("Sub vendor ID matched") else: error.append("Sub vendor ID didn't match") # PCI Slot ID Match if 'pci_id' in cfg_output and 'Slot' in pci_info_dict: self.log.info("PCI ID from lscfg is %s and lspci is %s", - cfg_output['pci_id'], pci_info_dict['Slot']) + cfg_output['pci_id'], pci_info_dict['Slot']) if(cfg_output['pci_id'] == - pci_info_dict['Slot']): + pci_info_dict['Slot']): self.log.info("PCI Slot ID matched") else: error.append("PCI slot ID didn't match") @@ -101,4 +101,3 @@ def test(self): error.append(pci_addr + " : pci_config_space") if error: self.fail(f"Errors for above pci addresses: {error}") - diff --git a/memory/eatmemory.py b/memory/eatmemory.py index de74964c9..ff09b7bd3 100644 --- a/memory/eatmemory.py +++ b/memory/eatmemory.py @@ -37,7 +37,8 @@ def setUp(self): if not smm.check_installed(package) and not smm.install(package): self.cancel(package + ' is needed for the test to be run') url = 'https://github.com/julman99/eatmemory/archive/master.zip' - tarball = self.fetch_asset("eatmemory.zip", locations=[url], expire='7d') + tarball = self.fetch_asset( + "eatmemory.zip", locations=[url], expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, "eatmemory-master") # patch for getch remove @@ -45,7 +46,8 @@ def setUp(self): os.chdir(self.sourcedir) process.run(getch_patch, shell=True) build.make(self.sourcedir) - mem = self.params.get('memory_to_test', default=int(0.95 * memory.meminfo.MemFree.k)) + mem = self.params.get('memory_to_test', default=int( + 0.95 * memory.meminfo.MemFree.k)) self.mem_to_eat = self._mem_to_mbytes(mem) if self.mem_to_eat is None: self.cancel("Memory '%s' not valid." % mem) diff --git a/memory/libhugetlbfs.py b/memory/libhugetlbfs.py index 535438e57..acb4847f4 100644 --- a/memory/libhugetlbfs.py +++ b/memory/libhugetlbfs.py @@ -113,7 +113,8 @@ def setUp(self): {hp_size: tempfile.mkdtemp(dir=self.teststmpdir, prefix='avocado_' + __name__)}) if process.system('mount -t hugetlbfs -o pagesize=%sM none %s' % - (hp_size, self.hugetlbfs_dir[hp_size]), sudo=True, + (hp_size, + self.hugetlbfs_dir[hp_size]), sudo=True, ignore_status=True): self.cancel("hugetlbfs mount failed") self.configured_page_sizes.append(hp_size) diff --git a/memory/page_table.py b/memory/page_table.py index 1356608db..d00755650 100644 --- a/memory/page_table.py +++ b/memory/page_table.py @@ -11,7 +11,7 @@ # # See LICENSE for more details. # -# Copyright: 2022 AMD +# Copyright: 2022 AMD # Author: Kalpana Shetty # @@ -28,7 +28,7 @@ class PageTable(Test): * Test will detect CPU feature, 5 Level page table. * Check for kernel support * Run series of 5-level page tests from "pg-table_tests.git" that covers - - heap, mmap, shmat tests. + - heap, mmap, shmat tests. :avocado: tags=memory """ @@ -38,7 +38,7 @@ class PageTable(Test): this condition can be removed") def setUp(self): ''' - Install pre-requisites packages. + Install pre-requisites packages. Setup pa-table_tests.git ''' smm = SoftwareManager() @@ -57,9 +57,9 @@ def test_detect_5lvl(self): ''' cpu_info = genio.read_file("/proc/cpuinfo") if 'la57' in cpu_info: - self.log.info("Detected 5-Level page table cpu support") + self.log.info("Detected 5-Level page table cpu support") else: - self.fail("5-Level page table - Unsupported platform") + self.fail("5-Level page table - Unsupported platform") def test_kernel(self): ''' @@ -68,9 +68,10 @@ def test_kernel(self): cfg_param = "CONFIG_X86_5LEVEL" result = linux_modules.check_kernel_config(cfg_param) if result == linux_modules.ModuleConfig.NOT_SET: - self.fail("%s is not set in the kernel." % cfg_param) + self.fail("%s is not set in the kernel." % cfg_param) else: - self.log.info("Detected 5-Level page table config - CONFIG_X86_5LEVEL set in the kernel") + self.log.info( + "Detected 5-Level page table config - CONFIG_X86_5LEVEL set in the kernel") def test_pg_table_tests(self): ''' diff --git a/nx_gzip/nx_gzip.py b/nx_gzip/nx_gzip.py index ed5ba7dc4..f1ef4e664 100644 --- a/nx_gzip/nx_gzip.py +++ b/nx_gzip/nx_gzip.py @@ -64,12 +64,12 @@ def build_tests(self, testdir_name): test_dir = os.path.join(self.teststmpdir, testdir_name) os.chdir(test_dir) testdir_dict = { - "": "check", - "selftest": "run_tests", - "test": "unsafe-check", - "samples": "bench", - "oct": "-j16", - "tools/testing/selftests/powerpc/nx-gzip": "run_tests" + "": "check", + "selftest": "run_tests", + "test": "unsafe-check", + "samples": "bench", + "oct": "-j16", + "tools/testing/selftests/powerpc/nx-gzip": "run_tests" } failed_tests = [] @@ -311,7 +311,7 @@ def test_kself_nxgzip(self): linux_src = 'https://github.com/torvalds/linux/archive/master.zip' self.output = "linux-master" match = next( - (ext for ext in [".zip", ".tar"] if ext in linux_src), None) + (ext for ext in [".zip", ".tar"] if ext in linux_src), None) if match: tarball = self.fetch_asset("kselftest%s" % match, locations=[linux_src], expire='1d') diff --git a/perf/compilebench.py b/perf/compilebench.py index fd4746161..590403805 100644 --- a/perf/compilebench.py +++ b/perf/compilebench.py @@ -40,13 +40,14 @@ def setUp(self): Source: https://oss.oracle.com/~mason/compilebench/compilebench-0.6.tar.bz2 """ - tarball = self.fetch_asset('https://oss.oracle.com/~mason/compilebench/compilebench-0.6.tar.bz2') + tarball = self.fetch_asset( + 'https://oss.oracle.com/~mason/compilebench/compilebench-0.6.tar.bz2') archive.extract(tarball, self.workdir) cb_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.workdir, cb_version) os.chdir(self.sourcedir) compilebench_fix_patch = 'patch -p1 < %s' % self.get_data( - 'fix_compilebench') + 'fix_compilebench') process.run(compilebench_fix_patch, shell=True) def test(self): diff --git a/perf/hackbench.py b/perf/hackbench.py index f3eb323c6..3d96b264d 100644 --- a/perf/hackbench.py +++ b/perf/hackbench.py @@ -69,7 +69,8 @@ def test(self): perf_json = {} for run in range(self._iterations): self.log.info("Iteration " + str(run+1)) - self.results = process.system_output(cmd, shell=True).decode("utf-8") + self.results = process.system_output( + cmd, shell=True).decode("utf-8") for line in self.results.split('\n'): if line.startswith('Time:'): time_spent += float(line.split()[1]) diff --git a/perf/perf_24x7_hardware_counters.py b/perf/perf_24x7_hardware_counters.py index 06aafbe81..bc9253263 100755 --- a/perf/perf_24x7_hardware_counters.py +++ b/perf/perf_24x7_hardware_counters.py @@ -46,7 +46,8 @@ def setUp(self): """ smm = SoftwareManager() detected_distro = distro.detect() - processor = process.system_output("uname -m", ignore_status=True).decode("utf-8") + processor = process.system_output( + "uname -m", ignore_status=True).decode("utf-8") if 'ppc' not in processor: if 'unknown' in processor and 'ppc' not in os.uname(): self.cancel("Processor is not ppc64") @@ -153,9 +154,11 @@ def test_check_all_domains(self): def test_event_w_chip_param(self): if self.cpu_family in ['power8', 'power9']: - event_out = genio.read_file("%s/events/PM_PB_CYC" % self.event_sysfs).rstrip('\t\r\n\0') + event_out = genio.read_file( + "%s/events/PM_PB_CYC" % self.event_sysfs).rstrip('\t\r\n\0') if self.cpu_family == 'power10': - event_out = genio.read_file("%s/events/PM_PHB0_0_CYC" % self.event_sysfs).rstrip('\t\r\n\0') + event_out = genio.read_file( + "%s/events/PM_PHB0_0_CYC" % self.event_sysfs).rstrip('\t\r\n\0') if "chip=?" in event_out: self.log.info('sysfs entry has chip entry') else: diff --git a/perf/perf_c2c.py b/perf/perf_c2c.py index 284031236..010ff25ca 100644 --- a/perf/perf_c2c.py +++ b/perf/perf_c2c.py @@ -87,12 +87,15 @@ def test_c2c(self): self.report = "--input=%s" % output_file elif self.report in ['-k', '--vmlinux']: if self.distro_name in ['rhel', 'fedora', 'centos']: - self.report = self.report + " /boot/vmlinuz-" + platform.uname()[2] + self.report = self.report + \ + " /boot/vmlinuz-" + platform.uname()[2] elif self.distro_name in ['SuSE', 'Ubuntu']: - self.report = self.report + " /boot/vmlinux-" + platform.uname()[2] + self.report = self.report + \ + " /boot/vmlinux-" + platform.uname()[2] # Record command - record_cmd = "perf c2c record -o %s %s -- ls" % (output_file, self.record) + record_cmd = "perf c2c record -o %s %s -- ls" % ( + output_file, self.record) self.run_cmd(record_cmd) # Report command report_cmd = "perf c2c report %s" % self.report diff --git a/perf/perf_core_imc_non_zero_event.py b/perf/perf_core_imc_non_zero_event.py index 6131f1492..796f2c41e 100644 --- a/perf/perf_core_imc_non_zero_event.py +++ b/perf/perf_core_imc_non_zero_event.py @@ -74,7 +74,8 @@ def test_perf_cpm_cyc(self): self.parse_op('perf stat -e core_imc/CPM_CCYC/ -C 0 -I 1000 sleep 5') def test_perf_cpm_32mhz_cyc(self): - self.parse_op('perf stat -e core_imc/CPM_32MHZ_CYC/ -C 0 -I 1000 sleep 5') + self.parse_op( + 'perf stat -e core_imc/CPM_32MHZ_CYC/ -C 0 -I 1000 sleep 5') def tearDown(self): process.system('pkill ppc64_cpu', ignore_status=True) diff --git a/perf/perf_events_test.py b/perf/perf_events_test.py index 5cc4218b4..9565facb0 100755 --- a/perf/perf_events_test.py +++ b/perf/perf_events_test.py @@ -81,7 +81,8 @@ def analyse_perf_output(self, output): self.is_fail += 1 if self.is_fail: - self.fail("There are %d test(s) failure, please check the job.log" % self.is_fail) + self.fail( + "There are %d test(s) failure, please check the job.log" % self.is_fail) def execute_perf_test(self): os.chdir(self.sourcedir) diff --git a/perf/perf_genericevents.py b/perf/perf_genericevents.py index d295f5fec..4ef5235fc 100755 --- a/perf/perf_genericevents.py +++ b/perf/perf_genericevents.py @@ -51,13 +51,13 @@ def read_generic_events(self): if 'cpu family' in line: self.family = int(line.split(':')[1]) if self.family == 0x16: - self.log.info("AMD Family: 16h") - self.generic_events = dict(parser.items('AMD16h')) + self.log.info("AMD Family: 16h") + self.generic_events = dict(parser.items('AMD16h')) elif self.family >= 0x17: - self.log.info("AMD Family: 17h") - self.generic_events = dict(parser.items('AMD17h')) + self.log.info("AMD Family: 17h") + self.generic_events = dict(parser.items('AMD17h')) else: - self.cancel("Unsupported AMD Family") + self.cancel("Unsupported AMD Family") def test(self): nfail = 0 diff --git a/perf/perf_invalid_flag_test.py b/perf/perf_invalid_flag_test.py index d9c43eec4..c57c36d38 100644 --- a/perf/perf_invalid_flag_test.py +++ b/perf/perf_invalid_flag_test.py @@ -46,6 +46,7 @@ def setUp(self): def test_perf_invalid_flag(self): cmd = "perf --version -test" - output = process.run(cmd, ignore_status="True", sudo="True", shell="True") + output = process.run(cmd, ignore_status="True", + sudo="True", shell="True") if output.exit_status == -11: self.fail("perf: failed to execute command %s" % cmd) diff --git a/perf/perf_metric.py b/perf/perf_metric.py index 20288c2db..8ced8460b 100755 --- a/perf/perf_metric.py +++ b/perf/perf_metric.py @@ -84,7 +84,7 @@ def _run_cmd(self, option): for line in self.list_of_metric_events: cmd = "perf stat %s %s sleep 1" % (option, line) rc, op = process.getstatusoutput(cmd, ignore_status=True, - shell=True, verbose=True) + shell=True, verbose=True) # When the command failed, checking for expected failure or not. if rc: output = op.stdout.decode() + op.stderr.decode() diff --git a/perf/perf_nmem.py b/perf/perf_nmem.py index 3d11fa456..abcfec57a 100644 --- a/perf/perf_nmem.py +++ b/perf/perf_nmem.py @@ -99,13 +99,14 @@ def test_pmu_register_dmesg(self): # This function tests whether performance monitor hardware support # registered or not. If not found any registered messages in dmesg # output this test will fail. - output = dmesg.collect_errors_dmesg('NVDIMM performance monitor support registered') + output = dmesg.collect_errors_dmesg( + 'NVDIMM performance monitor support registered') if not output: self.fail("NVDIMM PMUs not found in dmesg.") else: for line in output: # Looking for - #nvdimm_pmu: nmem0 NVDIMM performance monitor support registered + # nvdimm_pmu: nmem0 NVDIMM performance monitor support registered matchFound = re.search(r"nvdimm_pmu: (.*) NVDIMM", line) if matchFound: pmu = matchFound.group(1) diff --git a/perf/perf_pmu.py b/perf/perf_pmu.py index 6a6522f87..cde43f643 100644 --- a/perf/perf_pmu.py +++ b/perf/perf_pmu.py @@ -55,7 +55,8 @@ def _process_lscpu(self): if 'node' in nodefile: filename = os.path.join(nodesysfs, nodefile, 'cpulist') self.node_cpu_dict[nodefile] = genio.read_file(filename) - self.log.info("Nodes and CPU list: %s" % self.node_cpu_dict) + self.log.info("Nodes and CPU list: %s" % + self.node_cpu_dict) def setUp(self): self.pmu_list = [] @@ -110,7 +111,8 @@ def test_config_PMU_sysfs(self): "power10": ['mmcr0', 'mmcr1', 'mmcr3', 'mmcra']} # Check for any missing files according to the model - self._check_file_existence(sysfs_dict[self.model], os.listdir(sysfs_file)) + self._check_file_existence( + sysfs_dict[self.model], os.listdir(sysfs_file)) try: for filename in glob.glob("%smmcr*" % sysfs_file): @@ -155,7 +157,8 @@ def test_config_PPC_RTAS(self): self._verify_lscpu_sysfs('%ssockets' % sysfs_file, self.psockets) self._verify_lscpu_sysfs('%schipspersocket' % sysfs_file, self.pchips) - self._verify_lscpu_sysfs('%scoresperchip' % sysfs_file, self.pcorechips) + self._verify_lscpu_sysfs('%scoresperchip' % + sysfs_file, self.pcorechips) self._create_temp_user() user_cmd = "su - test_pmu -c" @@ -168,7 +171,8 @@ def test_config_PPC_RTAS(self): self._remove_temp_user() if self.pchips == "1" and self.psockets == "1" and self.pcorechips == "1": - output = dmesg.collect_errors_dmesg('rtas error: Error calling get-system-parameter') + output = dmesg.collect_errors_dmesg( + 'rtas error: Error calling get-system-parameter') if len(output): self.fail("RTAS error occured") @@ -180,11 +184,13 @@ def _check_count(self, event_type): if not os.path.isdir(base_dir + event_type): self.cancel("sysfs %s folder not found" % event_type) - sys_fs_events = os.listdir(os.path.join(base_dir, event_type, 'events')) + sys_fs_events = os.listdir( + os.path.join(base_dir, event_type, 'events')) if len(sys_fs_events) < 21: self.fail("%s events folder contains less than 21 entries" % event_type) - self.log.info("%s events count = %s" % (event_type, len(sys_fs_events))) + self.log.info("%s events count = %s" % + (event_type, len(sys_fs_events))) def test_cpu_event_count(self): # This test checks for the sysfs event_source directory and checks for diff --git a/perf/perf_sched.py b/perf/perf_sched.py index 3aa468173..3dda27a5a 100644 --- a/perf/perf_sched.py +++ b/perf/perf_sched.py @@ -75,9 +75,11 @@ def test_sched(self): if self.optname == "timehist": if self.option in ["-k", "--vmlinux", "--kallsyms"]: if 'rhel' in self.distro_name: - self.option = self.option + " /boot/vmlinuz-" + platform.uname()[2] + self.option = self.option + \ + " /boot/vmlinuz-" + platform.uname()[2] elif 'SuSE' in self.distro_name: - self.option = self.option + " /boot/vmlinux-" + platform.uname()[2] + self.option = self.option + \ + " /boot/vmlinux-" + platform.uname()[2] record_cmd = "perf sched record -o %s ls" % self.temp_file self.run_cmd(record_cmd) diff --git a/perf/perf_script_bug.py b/perf/perf_script_bug.py index 41b196673..4202e5783 100644 --- a/perf/perf_script_bug.py +++ b/perf/perf_script_bug.py @@ -15,7 +15,7 @@ # Author: Shirisha import os -import platform +import platform import tempfile import shutil import configparser @@ -63,7 +63,8 @@ def test_probe(self): process.run(probe, sudo=True, shell=True) record = "perf record -e \'{cpu/cpu-cycles,period=10000/,probe_perf_test:main}:S\' -o %s ./perf_test" % self.temp_file process.run(record, sudo=True, shell=True) - output = process.run("perf script -i %s" % self.temp_file, ignore_status=True, sudo=True, shell=True) + output = process.run("perf script -i %s" % self.temp_file, + ignore_status=True, sudo=True, shell=True) probe_del = "perf probe -d probe_perf_test:main" process.run(probe_del) if output.exit_status == -11: diff --git a/perf/perf_sdt_probe.py b/perf/perf_sdt_probe.py index 30fcf7965..61cb4097b 100755 --- a/perf/perf_sdt_probe.py +++ b/perf/perf_sdt_probe.py @@ -116,7 +116,8 @@ def disable_sdt_marker_probe(self): % self.sdt_marker) def record_sdt_marker_probe(self): - record_sdt_probe = "perf record -o %s -e %s -aR sleep 1" % (self.temp_file, self.sdt_marker) + record_sdt_probe = "perf record -o %s -e %s -aR sleep 1" % ( + self.temp_file, self.sdt_marker) self.is_fail = 0 self.run_cmd(record_sdt_probe) if self.is_fail or not os.path.exists(self.temp_file): diff --git a/perf/tbench.py b/perf/tbench.py index 1a58cf2c9..bfe536778 100755 --- a/perf/tbench.py +++ b/perf/tbench.py @@ -46,8 +46,8 @@ def setUp(self): if not sm.check_installed(package) and not sm.install(package): self.cancel('%s is needed for the test to be run' % package) tarball = self.fetch_asset( - "https://www.samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz", - expire='7d') + "https://www.samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz", + expire='7d') archive.extract(tarball, self.workdir) version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.workdir, version) diff --git a/ras/sosreport.py b/ras/sosreport.py index 0ea7974db..73bdb5275 100755 --- a/ras/sosreport.py +++ b/ras/sosreport.py @@ -284,9 +284,10 @@ def test_smtchanges(self): directory_name = tempfile.mkdtemp() for i in [2, 4, 8, "off"]: self.run_cmd("ppc64_cpu --smt=%s" % i) - smt_initial = re.split(r'=| is ', self.run_cmd_out("ppc64_cpu --smt"))[1] + smt_initial = re.split( + r'=| is ', self.run_cmd_out("ppc64_cpu --smt"))[1] if smt_initial == str(i): - self.run_cmd("%s --batch --tmp-dir=%s --all-logs" % + self.run_cmd("%s --batch --tmp-dir=%s --all-logs" % (self.sos_cmd, directory_name)) else: self.is_fail += 1 @@ -332,8 +333,8 @@ def test_dlpar_mem_hotplug(self): ignore_status=True, shell=True).decode("utf-8"): mem_value = self.run_cmd_out("lparstat -i | " - "grep \"Online Memory\" | " - "cut -d':' -f2") + "grep \"Online Memory\" | " + "cut -d':' -f2") mem_count = re.split(r'\s', mem_value)[1] if mem_count: mem_count = int(mem_count) diff --git a/ras/supportconfig.py b/ras/supportconfig.py index 32558489a..1ede2659c 100755 --- a/ras/supportconfig.py +++ b/ras/supportconfig.py @@ -51,7 +51,8 @@ def setUp(self): for pkg in pkgs: if not sm.check_installed(pkg) and not sm.install(pkg): - self.cancel("Package %s is missing/could not be installed" % pkg) + self.cancel( + "Package %s is missing/could not be installed" % pkg) def test_supportconfig_options(self): """ @@ -65,7 +66,8 @@ def test_supportconfig_options(self): ret = process.run("supportconfig", sudo=True, ignore_status=True) - logfile = re.search(r"Log file tar ball: (\S+)\n", ret.stdout.decode("utf-8")).group(1) + logfile = re.search(r"Log file tar ball: (\S+)\n", + ret.stdout.decode("utf-8")).group(1) if not os.path.exists(logfile) or ret.exit_status: self.fail("supportconfig failed to create log file") @@ -121,7 +123,8 @@ def test_enable_disable_plugins(self): ret = process.run("supportconfig", sudo=True, ignore_status=True) - logfile = re.search(r"Log file tar ball: (\S+)\n", ret.stdout.decode("utf-8")).group(1) + logfile = re.search(r"Log file tar ball: (\S+)\n", + ret.stdout.decode("utf-8")).group(1) res = process.system("tar -tvf %s | grep 'plugin-pstree.txt'" % logfile, ignore_status=True, @@ -132,7 +135,8 @@ def test_enable_disable_plugins(self): ret = process.run("supportconfig -p", sudo=True, ignore_status=True) - logfile = re.search(r"Log file tar ball: (\S+)\n", ret.stdout.decode("utf-8")).group(1) + logfile = re.search(r"Log file tar ball: (\S+)\n", + ret.stdout.decode("utf-8")).group(1) res = process.system("tar -tvf %s | grep 'plugin-pstree.txt'" % logfile, ignore_status=True, @@ -150,7 +154,7 @@ def test_smtchanges(self): """ self.is_fail = 0 for i in [2, 4, 8, "off"]: - process.run("ppc64_cpu --smt=%s" %i) + process.run("ppc64_cpu --smt=%s" % i) smt_initial = re.split(r'=| is ', process.system_output("ppc64_cpu --smt") .decode('utf-8'))[1] if smt_initial == str(i): @@ -173,7 +177,7 @@ def test_dlpar_cpu_hotplug(self): shell=True).decode("utf-8"): lcpu_count = process.system_output("lparstat -i | " "grep \"Online Virtual CPUs\" | " - "cut -d':' -f2", + "cut -d':' -f2", ignore_status=True, shell=True).decode("utf-8") if lcpu_count: diff --git a/security/annobin-tests.py b/security/annobin-tests.py index 5749dbe21..bab317232 100644 --- a/security/annobin-tests.py +++ b/security/annobin-tests.py @@ -25,6 +25,7 @@ class annobin(Test): annobin-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support annobin diff --git a/security/audit-tests.py b/security/audit-tests.py index 3e62e8c83..cc8284c88 100644 --- a/security/audit-tests.py +++ b/security/audit-tests.py @@ -52,7 +52,7 @@ def setUp(self): run_type = self.params.get('type', default='upstream') if run_type == "upstream": default_url = ("https://github.com/linux-audit/audit-userspace/" - "archive/master.zip") + "archive/master.zip") url = self.params.get('url', default=default_url) tarball = self.fetch_asset(url, expire='7d') archive.extract(tarball, self.workdir) diff --git a/security/evmctl-tests.py b/security/evmctl-tests.py index a9020ddfd..bb3961ffa 100644 --- a/security/evmctl-tests.py +++ b/security/evmctl-tests.py @@ -25,6 +25,7 @@ class EvmCtl(Test): evmctl-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support evmctl @@ -43,7 +44,8 @@ def setUp(self): tarball = self.fetch_asset(name="download.tar.gz", locations=url, expire='7d') archive.extract(tarball, self.workdir) - self.srcdir = os.path.join(self.workdir, os.listdir(self.workdir)[0]) + self.srcdir = os.path.join( + self.workdir, os.listdir(self.workdir)[0]) self.log.info("sourcedir - %s" % self.srcdir) os.chdir(self.srcdir) output = process.run('./autogen.sh', ignore_status=True) @@ -61,7 +63,8 @@ def test(self): Running tests from evmctl ''' count = 0 - output = process.system_output('./build.sh', ignore_status=True).decode() + output = process.system_output( + './build.sh', ignore_status=True).decode() for line in reversed(output.splitlines()): if '# FAIL' in line: count = int(line.split(":")[1].strip()) diff --git a/security/ima-evm-utils-tests.py b/security/ima-evm-utils-tests.py index 20015994a..938d9b6a2 100644 --- a/security/ima-evm-utils-tests.py +++ b/security/ima-evm-utils-tests.py @@ -25,6 +25,7 @@ class IMAEVMUtils(Test): ima-evm-utils testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support ima-evm-utils @@ -41,7 +42,8 @@ def setUp(self): 'libattr-devel', 'libxslt-tools', 'openssl-devel', 'tpm2-0-tss-devel']) else: - self.cancel("%s not supported for this test" % detected_distro.name) + self.cancel("%s not supported for this test" % + detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) diff --git a/security/ima-modsig-tests.py b/security/ima-modsig-tests.py index 871d9258a..920344a91 100644 --- a/security/ima-modsig-tests.py +++ b/security/ima-modsig-tests.py @@ -23,6 +23,7 @@ class IMAmodsig(Test): ima-modsig tests for Linux :avocado: tags=privileged,security,ima """ + def _check_kernel_config(self, config_option): ret = linux_modules.check_kernel_config(config_option) if ret == linux_modules.ModuleConfig.NOT_SET: diff --git a/security/kernel-hardening-tests.py b/security/kernel-hardening-tests.py index c05171f40..ff0b9e7fd 100644 --- a/security/kernel-hardening-tests.py +++ b/security/kernel-hardening-tests.py @@ -22,6 +22,7 @@ class KerelHardConfig(Test): Kernel Hardening config options for Linux :avocado: tags=privileged,security,hardening """ + def _check_kernel_config(self, config_option): ret = linux_modules.check_kernel_config(config_option) if ret == linux_modules.ModuleConfig.NOT_SET: diff --git a/security/keyutils-tests.py b/security/keyutils-tests.py index a2b42ef64..a43171cc6 100644 --- a/security/keyutils-tests.py +++ b/security/keyutils-tests.py @@ -25,6 +25,7 @@ class KeyUtils(Test): keyutils-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support keyutils diff --git a/security/krb5-tests.py b/security/krb5-tests.py index 1684ce41e..5e8e413a1 100644 --- a/security/krb5-tests.py +++ b/security/krb5-tests.py @@ -25,6 +25,7 @@ class Krb5(Test): krb5-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support krb5 diff --git a/security/libkmip-tests.py b/security/libkmip-tests.py index dcd4c2c3a..6e936a078 100644 --- a/security/libkmip-tests.py +++ b/security/libkmip-tests.py @@ -25,6 +25,7 @@ class libkmip(Test): libkmip-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support libkmip diff --git a/security/openssh-tests.py b/security/openssh-tests.py index a988dca9f..d17fa54f3 100644 --- a/security/openssh-tests.py +++ b/security/openssh-tests.py @@ -25,6 +25,7 @@ class OpenSSH(Test): openssh-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support openssh diff --git a/security/openssl-tests.py b/security/openssl-tests.py index 02daa91c3..d764f7ff8 100644 --- a/security/openssl-tests.py +++ b/security/openssl-tests.py @@ -25,6 +25,7 @@ class OpenSSL(Test): openssl-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support openssl diff --git a/security/pam-tests.py b/security/pam-tests.py index 6bb4d2e95..fe6506275 100644 --- a/security/pam-tests.py +++ b/security/pam-tests.py @@ -25,6 +25,7 @@ class PAM(Test): PAM-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support PAM diff --git a/security/secvarctl-tests.py b/security/secvarctl-tests.py index d9077e0bb..69a283870 100644 --- a/security/secvarctl-tests.py +++ b/security/secvarctl-tests.py @@ -25,6 +25,7 @@ class secvarctl(Test): secvarctl testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support secvarctl diff --git a/security/selinux-tests.py b/security/selinux-tests.py index 3377ee205..f6fb35ed0 100644 --- a/security/selinux-tests.py +++ b/security/selinux-tests.py @@ -59,7 +59,8 @@ def setUp(self): url = self.params.get('url', default=default_url) tarball = self.fetch_asset(url, expire='7d') archive.extract(tarball, self.workdir) - self.srcdir = os.path.join(self.workdir, 'selinux-testsuite-master') + self.srcdir = os.path.join( + self.workdir, 'selinux-testsuite-master') elif run_type == "distro": self.srcdir = os.path.join(self.workdir, "selinux-distro") if not os.path.exists(self.srcdir): diff --git a/security/vTPM-tests.py b/security/vTPM-tests.py index 3970ef516..5ec602a88 100644 --- a/security/vTPM-tests.py +++ b/security/vTPM-tests.py @@ -23,6 +23,7 @@ class vTPM(Test): vTPM tests for Linux :avocado: tags=privileged,security,tpm """ + def setUp(self): device_tree_path = "/proc/device-tree/vdevice/" d_list = os.listdir(device_tree_path) diff --git a/security/xmlsec-tests.py b/security/xmlsec-tests.py index f6882ed4c..aba79b808 100644 --- a/security/xmlsec-tests.py +++ b/security/xmlsec-tests.py @@ -25,6 +25,7 @@ class XMLSec(Test): xmlsec-testsuite :avocado: tags=security,testsuite """ + def setUp(self): ''' Install the basic packages to support xmlsec diff --git a/toolchain/binutils.py b/toolchain/binutils.py index e472b8340..c5d1e75a5 100755 --- a/toolchain/binutils.py +++ b/toolchain/binutils.py @@ -74,7 +74,8 @@ def setUp(self): elif run_type == "distro": self.sourcedir = os.path.join(self.workdir, "binutils-distro") if not os.path.exists(self.sourcedir): - self.sourcedir = self._sm.get_source("binutils", self.sourcedir) + self.sourcedir = self._sm.get_source( + "binutils", self.sourcedir) # Compile the binutils os.chdir(self.sourcedir) @@ -85,7 +86,8 @@ def test(self): """ Runs the binutils `make check` """ - ret = build.make(self.sourcedir, extra_args='check', ignore_status=True) + ret = build.make(self.sourcedir, extra_args='check', + ignore_status=True) errors = 0 for root, _, filenames in os.walk(self.sourcedir): diff --git a/toolchain/papiTest.py b/toolchain/papiTest.py index d7eda5c7e..89fcc5207 100644 --- a/toolchain/papiTest.py +++ b/toolchain/papiTest.py @@ -37,7 +37,7 @@ def setUp(self): if not softm.check_installed(package) and not softm.install(package): self.cancel("%s is needed for the test to be run" % package) test_type = self.params.get('type', default='upstream') - + if test_type == 'upstream': papi_url = self.params.get( 'url', default="https://bitbucket.org/icl/papi.git")