Skip to content

Commit

Permalink
spytest pre-commit warnings fix in framework and use caching (sonic-n…
Browse files Browse the repository at this point in the history
…et#9430)

Co-authored-by: Rama Sasthri, Kristipati <[email protected]>
  • Loading branch information
ramakristipati and ramakristipatibrcm authored Aug 14, 2023
1 parent 95e6435 commit d10a36f
Show file tree
Hide file tree
Showing 10 changed files with 133 additions and 47 deletions.
Empty file removed spytest/bin/.pylintrc
Empty file.
11 changes: 5 additions & 6 deletions spytest/datastore/prompts/Prompts_Support.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# Prompts Support:
We have placed a file "sonic_prompts.yaml" under "datastore/prompts" directory in spytest clone.
# Prompts Support:

We have placed a file "sonic_prompts.yaml" under "datastore/prompts" directory in spytest clone.
Little bit of help/comments are also provided in that file.

This file contains 3 sections:
This file contains 3 sections:
#### **patterns:**
Where users will define each pattern with a unique name. Each pattern name and value should be unique.
For sonic management framework, "**--sonic-mgmt--**" is a default value used by framework,
Expand All @@ -20,7 +20,7 @@ In the above section, for some commands, we added place holders. Names for those
We have written an example script "**tests/infra_ut/test_ut_modes.py**" as part of our unit testing.
Look for functions which match "**test_mgmt_cli_mode_\***", "**test_vtysh_prompt_modes_\***", "**test_vtysh_mgmt_prompt_modes_\***" and "**test_all_modes_\***"

# Example:
# Example:

To add support for acl prompts, following is the way:

Expand All @@ -35,4 +35,3 @@ And in modes section:

For entering into that mode, ACL_NAME should be provided as input. So, need to add the following in required args
**`mgmt-ipv4-acl-config: ['aclname']`**

35 changes: 27 additions & 8 deletions spytest/spytest/batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
from spytest.version import get_git_ver
import utilities.common as utils
import utilities.parallel as putils
from utilities.profile import cprofile_start
from utilities.profile import cprofile_stop
from utilities.profile import dbg_cache
# from utilities.tracer import Tracer


Expand All @@ -38,6 +41,7 @@ def batch_init_env(wa):
def batch_init():
# Tracer.register(trace_calls, "batch", include=os.path.abspath(__file__))
wa = SpyTestDict()
wa.start_time = get_timenow()
wa.j2dict = SpyTestDict()
wa.context = None
wa.get_gw_name = {}
Expand Down Expand Up @@ -153,15 +157,22 @@ def is_deadnode_recovery():


def ftrace(msg):
if env.match("SPYTEST_LOGS_TIME_FMT_ELAPSED", "1", "0"):
prefix = "{}: ".format(get_elapsed(wa.start_time, True))
else:
prefix = "{}: ".format(get_timestamp())
# prefix = "{} {}".format(get_worker_id(), prefix)
# prefix = "{} {}".format(os.getpid(), prefix)
if msg:
msg = utils.augment_lines(msg, prefix)
if wa.logs_path:
if not wa.trace_file:
wa.trace_file = os.path.join(wa.logs_path, "batch_debug.log")
utils.write_file(wa.trace_file, "")
if msg:
prefix = "{}: ".format(get_timestamp())
# prefix = "{} {}".format(get_worker_id(), prefix)
# prefix = "{} {}".format(os.getpid(), prefix)
utils.write_file(wa.trace_file, "{}{}\n".format(prefix, msg), "a")
utils.write_file(wa.trace_file, msg, "a", "\n")
if msg and wa.debug_level >= 100:
print(msg)


def debug(*args, **kwargs):
Expand Down Expand Up @@ -220,7 +231,7 @@ def check_worker_status(node_modules, collection):
if not worker.started:
continue
if worker.last_report:
elapsed = get_elapsed(worker.last_report, False)
elapsed = get_elapsed(worker.last_report)
else:
elapsed = 0
try:
Expand Down Expand Up @@ -1206,14 +1217,16 @@ def _update_applicable(self):


def _show_testbed_topo(show=True):
header = ["Node", "Topology"]
rows = []
header, rows = ["Node", "Topology"], []
lines = utils.banner("TOPOLOGY", func="").strip().split("\n")
for worker in wa.workers.values():
topo = worker.tb_obj.get_topo()
rows.append([worker.name, topo])
lines.append("{:5s} = {}".format(worker.name, topo))
retval = utils.sprint_vtable(header, rows)
if show:
trace(retval)
# trace(retval)
trace("\n".join(lines))
return retval


Expand Down Expand Up @@ -2305,6 +2318,7 @@ def parse_buckets(count, testbeds, buckets_csv, logs_path):

# create testbed objects for testbed files
trace("============> Parsing Testbed files")
cprofile_start()
for testbed in testbeds:
# wa.logger = wa.logger or logging.getLogger()
tb = Testbed(testbed, logger=wa.logger, flex_dut=True)
Expand All @@ -2315,6 +2329,7 @@ def parse_buckets(count, testbeds, buckets_csv, logs_path):
# trace("Testbed: {}".format(testbed))
# trace(" Devices: {}".format(tb.get_device_names("DUT")))
tb_objs.append(tb)
cprofile_stop(os.path.join(logs_path, "batch_profile_parse_testbed"))

# initialize collected lists
for key in ["testbeds", "buckets", "min_buckets", "parent_testbeds", "testbed_objs"]:
Expand All @@ -2334,6 +2349,7 @@ def parse_buckets(count, testbeds, buckets_csv, logs_path):
wa.parent_testbeds.append(parent_testbed)

trace("============> Parse Bucket Testbed files")
cprofile_start()
for testbed in wa.testbeds:
fname = os.path.basename(testbed)
tb = Testbed(testbed, logger=wa.logger, flex_dut=True)
Expand All @@ -2344,6 +2360,9 @@ def parse_buckets(count, testbeds, buckets_csv, logs_path):
msg = "Topology({}): {}"
trace(msg.format(fname, tb.get_topo()))
wa.testbed_objs.append(tb)
cprofile_stop(os.path.join(logs_path, "batch_profile_buckets"))
if wa.debug_level >= 100:
dbg_cache()

wa.workers.clear()
wa.testbed_count = len(wa.testbeds)
Expand Down
6 changes: 3 additions & 3 deletions spytest/spytest/cmdargs.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,13 +303,13 @@ class HelpFormatter(argparse.HelpFormatter):
def _format_action(self, action):
if type(action).__name__ in ["ArgValidateEnv", "ArgValidateMaxTime"]:
action.nargs = 1
if type(action) == argparse._SubParsersAction:
if isinstance(action, argparse._SubParsersAction):
# inject new class variable for sub command formatting
subactions = action._get_subactions()
invocations = [self._format_action_invocation(a) for a in subactions]
self._subcommand_max_length = max(len(i) for i in invocations)

if type(action) == argparse._SubParsersAction._ChoicesPseudoAction:
if isinstance(action, argparse._SubParsersAction._ChoicesPseudoAction):
# format sub command help line
subcommand = self._format_action_invocation(action) # type: str
width = self._subcommand_max_length
Expand All @@ -318,7 +318,7 @@ def _format_action(self, action):
help_text = self._expand_help(action)
return " {:{width}} - {}\n".format(subcommand, help_text, width=width)

elif type(action) == argparse._SubParsersAction:
elif isinstance(action, argparse._SubParsersAction):
# process sub command help section
msg = '\n'
for subaction in action._get_subactions():
Expand Down
58 changes: 42 additions & 16 deletions spytest/spytest/ordyaml.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import re
import os
import copy
import yaml

from spytest.dicts import SpyTestDict
import utilities.common as utils
from utilities.profile import get_cache, set_cache


class NoAliasDumper(yaml.SafeDumper):
Expand All @@ -25,17 +27,23 @@ def _locate(self, filename):
def _load(self, stream, file_dict=dict(), Loader=yaml.Loader,
object_pairs_hook=SpyTestDict):
def _yaml_include(loader, node):
filename = self._locate(node.value)
if not filename:
file_path = self._locate(node.value)
if not file_path:
msg = "Failed to locate included file '{}'".format(node.value)
self.errs.append(msg)
return None
file_dict[filename] = 1
with utils.open_file(filename) as inputfile:
rv = yaml.load(inputfile, Loader)
file_dict[file_path] = 1
rv = get_cache("ordyaml.include", file_path, None)
if rv:
if not self.expand_include:
return self._add_include_map(node, rv)
rv = self._add_include_map(node, rv)
return rv
text = self.read_file(file_path)
rv = yaml.load(text, Loader)
set_cache("ordyaml.include", file_path, rv)
if not self.expand_include:
rv = self._add_include_map(node, rv)
return rv

def _construct_mapping(loader, node):
loader.flatten_mapping(node)
Expand Down Expand Up @@ -106,32 +114,50 @@ def __init__(self, filename, paths=[], content=""):
else:
self.init_content(content)

def init_content(self, content):
def init_content(self, content, for_file=None):
all_files = dict()
try:
self.text0 = content
self.obj = self._load(self.text0, all_files, yaml.SafeLoader)
self.text1 = self._dump(self.obj)
rv1 = get_cache("ordyaml.init_content.load", for_file, None) if for_file else None
rv2 = get_cache("ordyaml.init_content.dump", for_file, None) if for_file else None
if None not in [rv1, rv2]:
self.obj = copy.deepcopy(rv1)
self.text1 = copy.deepcopy(rv2)
else:
self.obj = self._load(self.text0, all_files, yaml.SafeLoader)
self.text1 = self._dump(self.obj)
set_cache("ordyaml.init_content.load", for_file, self.obj)
set_cache("ordyaml.init_content.dump", for_file, self.text1)
self.valid = True
return all_files
except Exception as e:
self.errs.append(e)
raise (e)

def read_file(self, file_path):
fh = utils.open_file(file_path)
if not fh:
return None
text = fh.read()
text = re.sub(r"[\"|']!include (.*).yaml[\"|']", r"!include \1.yaml", text)
fh.close()
return text

def init_file(self, filename, paths=[]):
self._init_paths(filename, paths)
file_path = self._locate(filename)
if not file_path:
self.errs.append("File {} not found".format(filename))
return None
fh = utils.open_file(file_path)
if not fh:
self.errs.append("Failed to open {}".format(filename))
return None
try:
text0 = fh.read()
fh.close()
self.all_files = self.init_content(text0)
text = get_cache("ordyaml.init", file_path, None)
if text is None:
text = self.read_file(file_path)
if text is None:
self.errs.append("Failed to open {}".format(filename))
return None
set_cache("ordyaml.init", file_path, text)
self.all_files = self.init_content(text, file_path)
self.all_files[file_path] = 1
return file_path
except Exception as e:
Expand Down
36 changes: 23 additions & 13 deletions spytest/spytest/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@
vendor_dir = os.path.join(os.path.dirname(__file__), '..', "vendor")
sys.path.insert(0, os.path.abspath(vendor_dir))

import textfsm # noqa: E402
import textfsm # noqa: E402
try:
import clitable
except Exception:
from textfsm import clitable

from spytest import env # noqa: E402
import utilities.common as utils # noqa: E402
from spytest import env # noqa: E402
import utilities.common as utils # noqa: E402


class Template(object):
Expand Down Expand Up @@ -51,6 +51,14 @@ def get_tmpl(self, cmd):
return cli_table.index.index[row_idx]['Template']
return None

def get_table(self, cmd):
attrs = dict(Command=cmd)
for cli_table in self.cli_tables.values():
row_idx = cli_table.index.GetRowMatch(attrs)
if row_idx != 0:
return cli_table
return None

# retrieve template and sample file given the command
def read_sample(self, cmd):
tmpl_file = self.get_tmpl(cmd)
Expand Down Expand Up @@ -81,16 +89,18 @@ def apply(self, output, cmd):
attrs["Platform"] = self.platform
if self.cli:
attrs["cli"] = self.cli
try:
tmpl_file = self.get_tmpl(cmd)
if not tmpl_file:
raise ValueError('Unknown command "%s"' % (cmd))
for cli_table in self.cli_tables.values():
cli_table.ParseCmd(output, attrs)
objs = self.result(cli_table.header, cli_table)
return [tmpl_file, objs]
except clitable.CliTableError as e:
raise ValueError('Unable to parse command "%s" - %s' % (cmd, str(e)))

tmpl_file = self.get_tmpl(cmd)
if not tmpl_file:
raise ValueError('Unknown command "%s"' % (cmd))

cli_table = self.get_table(cmd)
if not cli_table:
raise ValueError('Unable to parse command "%s"' % (cmd))

cli_table.ParseCmd(output, attrs)
objs = self.result(cli_table.header, cli_table)
return [tmpl_file, objs]

def result(self, header, rows):
objs = []
Expand Down
8 changes: 7 additions & 1 deletion spytest/spytest/testbed.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from spytest.st_time import get_elapsed

import utilities.common as utils
from utilities.profile import get_cache, set_cache

testbeds_root = os.path.join(os.path.dirname(__file__), '..')
testbeds_root = os.path.join(os.path.abspath(testbeds_root), "testbeds")
Expand Down Expand Up @@ -733,10 +734,15 @@ def _load_yaml(self, filename):
errs = []
try:
user_root = env.get("SPYTEST_USER_ROOT")
if user_root:
rv = get_cache("testbed.load.yaml", filename, None)
if rv:
self.oyaml = copy.deepcopy(rv)
elif user_root:
self.oyaml = OrderedYaml(filename, [user_root, testbeds_root])
set_cache("testbed.load.yaml", filename, self.oyaml)
else:
self.oyaml = OrderedYaml(filename, [testbeds_root])
set_cache("testbed.load.yaml", filename, self.oyaml)
if not self.oyaml.is_valid():
errs = self.oyaml.get_errors()
self.logger.error(errs)
Expand Down
8 changes: 8 additions & 0 deletions spytest/spytest/tgen/tgen_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@
from spytest.tgen_api import get_chassis
from spytest.tgen_api import is_soft_tgen

_latest_log_msg = ""


def get_latest_log_msg():
return _latest_log_msg


def _log_call(fname, **kwargs):
args_list = []
Expand Down Expand Up @@ -238,6 +244,8 @@ def _log_validation(cmsg, result, exp_val, real_rx_val, diff, strelem=None, fpel
if fpelem:
msg = msg + " filter param: {} value: {}".format(fpelem, fvelem)
st.log(msg)
global _latest_log_msg
_latest_log_msg = msg


def _verify_aggregate_stats(tr_details, **kwargs):
Expand Down
13 changes: 13 additions & 0 deletions spytest/spytest/tgen_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ def get_counter_name(mode, tg_type, comp_type, direction):
return tgen_utils.get_counter_name(mode, tg_type, comp_type, direction)


def get_latest_log_msg():
from spytest.tgen import tgen_utils
return tgen_utils.get_latest_log_msg()


def validate_tgen_traffic(**kwargs):
from spytest.tgen import tgen_utils
return tgen_utils.validate_tgen_traffic(**kwargs)
Expand Down Expand Up @@ -177,6 +182,14 @@ def send_verify_traffic(tg, stream, ph_rx, ph_tx, tolerance, stop_time=5, run_ti
return None


def report_traffic_verification_fail(msgid=None, line=None):
from spytest import st
from utilities.common import get_line_number
msgid = msgid or "failed_traffic_verification"
line = line or get_line_number(1)
st.report_fail(msgid, "{} @{}".format(get_latest_log_msg(), line))


def get_min(v1, v2):
return v1 if v1 < v2 else v2

Expand Down
Loading

0 comments on commit d10a36f

Please sign in to comment.