diff --git a/0.6.0/404.html b/0.6.0/404.html new file mode 100644 index 000000000..7f626e3ed --- /dev/null +++ b/0.6.0/404.html @@ -0,0 +1,1391 @@ + + + + + + + + + + + + + + + + + + + + + + Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/advanced_usages/as-python-lib/index.html b/0.6.0/advanced_usages/as-python-lib/index.html new file mode 100644 index 000000000..b3301c0c2 --- /dev/null +++ b/0.6.0/advanced_usages/as-python-lib/index.html @@ -0,0 +1,1888 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + ANTA as a Python Library - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

ANTA as a Python Library

+ +

ANTA is a Python library that can be used in user applications. This section describes how you can leverage ANTA Python modules to help you create your own NRFU solution.

+
+

Tip

+

If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html

+
+

AntaDevice Abstract Class

+

A device is represented in ANTA as a instance of a subclass of the AntaDevice abstract class. +There are few abstract methods that needs to be implemented by child classes:

+
    +
  • The collect() coroutine is in charge of collecting outputs of AntaCommand instances.
  • +
  • The refresh() coroutine is in charge of updating attributes of the AntaDevice instance. These attributes are used by AntaInventory to filter out unreachable devices or by AntaTest to skip devices based on their hardware models.
  • +
+

The copy() coroutine is used to copy files to and from the device. It does not need to be implemented if tests are not using it.

+
AsyncEOSDevice Class
+

The AsyncEOSDevice class is an implementation of AntaDevice for Arista EOS. +It uses the aio-eapi eAPI client and the AsyncSSH library.

+
    +
  • The collect() coroutine collects AntaCommand outputs using eAPI.
  • +
  • The refresh() coroutine tries to open a TCP connection on the eAPI port and update the is_online attribute accordingly. If the TCP connection succeeds, it sends a show version command to gather the hardware model of the device and updates the established and hw_model attributes.
  • +
  • The copy() coroutine copies files to and from the device using the SCP protocol.
  • +
+

AntaInventory Class

+

The AntaInventory class is a subclass of the standard Python type dict. The keys of this dictionary are the device names, the values are AntaDevice instances.

+

AntaInventory provides methods to interact with the ANTA inventory:

+ +

To parse a YAML inventory file and print the devices connection status:

+
"""
+Example
+"""
+import asyncio
+
+from anta.inventory import AntaInventory
+
+
+async def main(inv: AntaInventory) -> None:
+    """
+    Take an AntaInventory and:
+    1. try to connect to every device in the inventory
+    2. print a message for every device connection status
+    """
+    await inv.connect_inventory()
+
+    for device in inv.values():
+        if device.established:
+            print(f"Device {device.name} is online")
+        else:
+            print(f"Could not connect to device {device.name}")
+
+if __name__ == "__main__":
+    # Create the AntaInventory instance
+    inventory = AntaInventory.parse(
+        inventory_file="inv.yml",
+        username="arista",
+        password="@rista123",
+        timeout=15,
+    )
+
+    # Run the main coroutine
+    res = asyncio.run(main(inventory))
+
+
+How to create your inventory file +

Please visit this dedicated section for how to use inventory and catalog files.

+
+

To run an EOS commands list on the reachable devices from the inventory: +

"""
+Example
+"""
+# This is needed to run the script for python < 3.10 for typing annotations
+from __future__ import annotations
+
+import asyncio
+from pprint import pprint
+
+from anta.inventory import AntaInventory
+from anta.models import AntaCommand
+
+
+async def main(inv: AntaInventory, commands: list[str]) -> dict[str, list[AntaCommand]]:
+    """
+    Take an AntaInventory and a list of commands as string and:
+    1. try to connect to every device in the inventory
+    2. collect the results of the commands from each device
+
+    Returns:
+      a dictionary where key is the device name and the value is the list of AntaCommand ran towards the device
+    """
+    await inv.connect_inventory()
+
+    # Make a list of coroutine to run commands towards each connected device
+    coros = []
+    # dict to keep track of the commands per device
+    result_dict = {}
+    for name, device in inv.get_inventory(established_only=True).items():
+        anta_commands = [AntaCommand(command=command, ofmt="json") for command in commands]
+        result_dict[name] = anta_commands
+        coros.append(device.collect_commands(anta_commands))
+
+    # Run the coroutines
+    await asyncio.gather(*coros)
+
+    return result_dict
+
+
+if __name__ == "__main__":
+    # Create the AntaInventory instance
+    inventory = AntaInventory.parse(
+        inventory_file="inv.yml",
+        username="arista",
+        password="@rista123",
+        timeout=15,
+    )
+
+    # Create a list of commands with json output
+    commands = ["show version", "show ip bgp summary"]
+
+    # Run the main asyncio  entry point
+    res = asyncio.run(main(inventory, commands))
+
+    pprint(res)
+

+

Use tests from ANTA

+

All the test classes inherit from the same abstract Base Class AntaTest. The Class definition indicates which commands are required for the test and the user should focus only on writing the test function with optional keywords argument. The instance of the class upon creation instantiates a TestResult object that can be accessed later on to check the status of the test ([unset, skipped, success, failure, error]).

+
Test structure
+

All tests are built on a class named AntaTest which provides a complete toolset for a test:

+
    +
  • Object creation
  • +
  • Test definition
  • +
  • TestResult definition
  • +
  • Abstracted method to collect data
  • +
+

This approach means each time you create a test it will be based on this AntaTest class. Besides that, you will have to provide some elements:

+
    +
  • name: Name of the test
  • +
  • description: A human readable description of your test
  • +
  • categories: a list of categories to sort test.
  • +
  • commands: a list of command to run. This list must be a list of AntaCommand which is described in the next part of this document.
  • +
+

Here is an example of a hardware test related to device temperature:

+
from __future__ import annotations
+
+import logging
+from typing import Any, Dict, List, Optional, cast
+
+from anta.models import AntaTest, AntaCommand
+
+
+class VerifyTemperature(AntaTest):
+    """
+    Verifies device temparture is currently OK.
+    """
+
+    # The test name
+    name = "VerifyTemperature"
+    # A small description of the test, usually the first line of the class docstring
+    description = "Verifies device temparture is currently OK"
+    # The category of the test, usually the module name
+    categories = ["hardware"]
+    # The command(s) used for the test. Could be a template instead
+    commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
+
+    # Decorator
+    @AntaTest.anta_test
+    # abstract method that must be defined by the child Test class
+    def test(self) -> None:
+        """Run VerifyTemperature validation"""
+        command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)
+        temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
+        if temperature_status == "temperatureOk":
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }")
+
+

When you run the test, object will automatically call its anta.models.AntaTest.collect() method to get device output for each command if no pre-collected data was given to the test. This method does a loop to call anta.inventory.models.InventoryDevice.collect() methods which is in charge of managing device connection and how to get data.

+
+run test offline +

You can also pass eos data directly to your test if you want to validate data collected in a different workflow. An example is provided below just for information:

+
test = VerifyTemperature(mocked_device, eos_data=test_data["eos_data"])
+asyncio.run(test.test())
+
+
+

The test function is always the same and must be defined with the @AntaTest.anta_test decorator. This function takes at least one argument which is a anta.inventory.models.InventoryDevice object. +In some cases a test would rely on some additional inputs from the user, for instance the number of expected peers or some expected numbers. All parameters must come with a default value and the test function should validate the parameters values (at this stage this is the only place where validation can be done but there are future plans to make this better).

+
class VerifyTemperature(AntaTest):
+    ...
+    @AntaTest.anta_test
+    def test(self) -> None:
+        pass
+
+class VerifyTransceiversManufacturers(AntaTest):
+    ...
+    @AntaTest.anta_test
+    def test(self, manufacturers: Optional[List[str]] = None) -> None:
+        # validate the manufactures parameter
+        pass
+
+

The test itself does not return any value, but the result is directly availble from your AntaTest object and exposes a anta.result_manager.models.TestResult object with result, name of the test and optional messages:

+
    +
  • name (str): Device name where the test has run.
  • +
  • test (str): Test name runs on the device.
  • +
  • test_category (List[str]): List of test categories the test belongs to.
  • +
  • test_description (str): Test description.
  • +
  • results (str): Result of the test. Can be one of [“unset”, “success”, “failure”, “error”, “skipped”].
  • +
  • messages (List[str], optional): Messages to report after the test if any.
  • +
+
from anta.tests.hardware import VerifyTemperature
+
+test = VerifyTemperature(mocked_device, eos_data=test_data["eos_data"])
+asyncio.run(test.test())
+assert test.result.result == "success"
+
+
Commands for test
+

To make it easier to get data, ANTA defines 2 different classes to manage commands to send to device:

+
anta.models.AntaCommand
+

Abstract a command with following information:

+
    +
  • Command to run,
  • +
  • Ouput format expected
  • +
  • eAPI version
  • +
  • Output of the command
  • +
+

Usage example:

+
from anta.models import AntaCommand
+
+cmd1 = AntaCommand(command="show zerotouch")
+cmd2 = AntaCommand(command="show running-config diffs", ofmt="text")
+
+
+

Command revision and version

+
    +
  • Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use text outformat sometimes.
  • +
  • The model can change across time (adding feature, … ) and when the model is changed in a non backward-compatible way, the revision number is bumped. The initial model starts with revision 1.
  • +
  • A revision applies to a particular CLI command whereas a version is global to an eAPI call. The version is internally translated to a specific revision for each CLI command in the RPC call. The currently supported version vaues are 1 and latest.
  • +
  • A revision takes precedence over a version (e.g. if a command is run with version=”latest” and revision=1, the first revision of the model is returned)
  • +
  • By default eAPI returns the first revision of each model to ensure that when upgrading, intergation with existing tools is not broken. This is done by using by default version=1 in eAPI calls.
  • +
+

ANTA uses by default version="latest" in AntaCommand. For some commands, you may want to run them with a different revision or version.

+

For instance the VerifyRoutingTableSize test leverages the first revision of show bfd peers:

+
# revision 1 as later revision introduce additional nesting for type
+commands = [AntaCommand(command="show bfd peers", revision=1)]
+
+
+
anta.models.AntaTemplate
+

Because some command can require more dynamic than just a command with no parameter provided by user, ANTA supports command template: you define a template in your test class and user provide parameters when creating test object.

+
+

Warning on AntaTemplate

+
    +
  • In its current versiom, an AntaTest class supports only ONE AntaTemplate.
  • +
  • The current interface to pass template parameter to a template is an area of future improvements. Feedbacks are welcome.
  • +
+
+
class RunArbitraryTemplateCommand(AntaTest):
+    """
+    Run an EOS command and return result
+    Based on AntaTest to build relevant output for pytest
+    """
+
+    name = "Run aributrary EOS command"
+    description = "To be used only with anta debug commands"
+    template = AntaTemplate(template="show interfaces {ifd}")
+    categories = ["debug"]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        errdisabled_interfaces = [interface for interface, value in response["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"]
+        ...
+
+
+params = [{"ifd": "Ethernet2"}, {"ifd": "Ethernet49/1"}]
+run_command1 = RunArbitraryTemplateCommand(device_anta, params)
+
+

In this example, test waits for interfaces to check from user setup and will only check for interfaces in params

+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/advanced_usages/custom-tests/index.html b/0.6.0/advanced_usages/custom-tests/index.html new file mode 100644 index 000000000..e9c2905ce --- /dev/null +++ b/0.6.0/advanced_usages/custom-tests/index.html @@ -0,0 +1,1877 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Create your own Library - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Create your own Library

+ +

Create your own custom tests

+
+

This documentation applies for both create tests in ANTA package or your custom package.

+
+

ANTA is not only a CLI with a collection of built-in tests, it is also a framework you can extend by building your own tests library.

+

For that, you need to create your own Python package as described in this hitchhiker’s guide to package Python code. We assume it is well known and we won’t focus on this aspect. Thus, your package must be impartable by ANTA hence available in $PYTHONPATH by any method.

+

Generic approach

+

ANTA comes with a class to use to build test. This class provides all the toolset required to define, collect and test data. The next code is an example of how to use ANTA to build a test

+
from __future__ import annotations
+
+import logging
+from typing import Any, Dict, List, Optional, cast
+
+from anta.models import AntaTest, AntaCommand
+
+
+class VerifyTemperature(AntaTest):
+    """
+    Verifies device temparture is currently OK.
+    """
+
+    name = "VerifyTemperature"
+    description = "Verifies device temparture is currently OK"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyTemperature validation"""
+        command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)
+        temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
+        if temperature_status == "temperatureOk":
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }")
+
+

Python imports

+
Mandatory imports
+

The following elements have to be imported:

+
    +
  • InventoryDevice: Where the eAPI session lives. It is used to send commands over HTTP/HTTPS define in your test.
  • +
  • anta.models.AntaTest: class that gives you all the tooling for your test
  • +
  • anta.models.AntaCommand: A class to abstract an Arista EOS command
  • +
+
from anta.models import AntaTest, AntaCommand
+
+
+class VerifyTemperature(AntaTest):
+    """
+    Verifies device temparture is currently OK.
+    """
+    ...
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        pass
+
+
Optional ANTA imports
+

Besides these 3 main imports, anta provides some additional and optional decorators:

+
    +
  • anta.decorators.skip_on_platforms: To skip a test for a function not available for some platform
  • +
  • anta.decorators.check_bgp_family_enable: To run tests only if specific BGP family is active.
  • +
+
from anta.decorators import skip_on_platforms
+
+
+class VerifyTransceiversManufacturers(AntaTest):
+    ...
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self, manufacturers: Optional[List[str]] = None) -> None:
+        pass
+
+
Optional python imports
+

And finally, you are free to import any other python library you may want to use in your package.

+
+

logging function

+

It is strongly recommended to import logging to help development process and being able to log some outputs usefull for test development.

+
+

If your test development is part of a pull request for ANTA, it is stringly advised to also import typing since our code testing requires to be compatible with Mypy.

+

Code for a test

+

A test is a python class where a test function is defined and will be run by the framework. So first you need to declare your class and then define your test function.

+
Create Test Class
+

To create class, you have to provide 4 elements:

+

Metadata information

+
    +
  • name: Name of the test
  • +
  • description: A human readable description of your test
  • +
  • categories: a list of categories to sort test.
  • +
+

Commands to run

+
    +
  • commands: a list of command to run. This list must be a list of AntaCommand which is described in the next part of this document.
  • +
  • template: a command template (AntaTemplate) to run where variables are provided during test execution.
  • +
+
from __future__ import annotations
+
+import logging
+from typing import Any, Dict, List, Optional, cast
+
+from anta.models import AntaTest, AntaCommand
+
+
+class <YourTestName>(AntaTest):
+    """
+    <a docstring description of your test>
+    """
+
+    name = "YourTestName"                                           # should be your class name
+    description = "<test description in human reading format>"
+    categories = ["<a list of arbitrary categories>"]
+    commands = [
+        AntaCommand(
+            command="<eos command to run>",
+            ofmt="<command format output>",
+            version="<eapi version to use>",
+            revision="<revision to use for the command>",           # revision has precedence over version
+        )
+    ]
+
+

This class will inherit methods from AntaTest and specfically the __init__(self,...) method to build your object. This function takes following arguments when you instantiate an object:

+
    +
  • device (InventoryDevice): Device object where to test happens.
  • +
  • template_params: If template is used in the test definition, then we provide data to build list of commands.
  • +
  • eos_data: Potential EOS data to pass if we don’t want to connect to device to grab data.
  • +
  • labels: a list of labels. It is not used yet and it is for futur use.
  • +
+
Function definition
+

The code here can be very simple as well as very complex and will depend of what you expect to do. But in all situation, the same baseline can be leverage:

+
class <YourTestName>(AntaTest):
+    ...
+    @AntaTest.anta_test
+    def test(self) -> None:
+        pass
+
+

If you want to support option in your test, just declare your options in your test method:

+
class <YourTestName>(AntaTest):
+    ...
+    @AntaTest.anta_test
+    def test(self, my_param1: Optional[str] = None) -> None:
+        pass
+
+

The options must be optional keyword arguments.

+
Check inputs
+

If your test has some user inputs, you first have to validate the supplied values are valid. If it is not valid, we expect TestResult to return skipped with a custom message.

+
class <YourTestName>(AntaTest):
+    ...
+    @AntaTest.anta_test
+    def test(self, minimum: Optional[int] = None) -> None:
+        # Check if test option is correct
+        if not minimum:
+            self.result.is_skipped("verify_dynamic_vlan was run without minimum value set")
+            return
+        # continue test..
+        ...
+
+
Implement your logic
+

Here you implement your own logic. In general, the first action is to send command to devices and capture its response.

+

In the example below, we request the list of vlans configured on device and then count all the vlans marked as dynamic

+
class <YourTestName>(AntaTest):
+    ...
+    @AntaTest.anta_test
+    def test(self, minimum: Optional[int] = None) -> None:
+        # Check if test option is correct
+        if not minimum:
+            self.result.is_skipped("verify_dynamic_vlan was run without minimum value set")
+            return
+
+        # Grab data for your command
+        command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)
+
+        # Do your test: In this example we count number of vlans with field dynamic set to true
+        num_dyn_vlan = len([ vlan for vlan,data in command_output['vlans'].items() if command_output['dynamic'] is True])
+        if num_dyn_vlan >= minimum:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Device has {num_dyn_vlan} configured, we expect at least {minimum}")
+
+

As you can see there is no error management to do in your code. Everything is packaged in anta_tests and below is a simple example of error captured with an incorrect JSON key in the code above:

+
ERROR    Exception raised for test verify_dynamic_vlan (on device 192.168.0.10) - KeyError ('vlans')
+
+
+

Get stack trace for debugging

+

If you want to access to the full exception stack, you can run your test with logging level set to DEBUG. With ANTA cli, it is available with following option: +

$ ANTA_DEBUG=True anta nrfu text --catalog test_custom.yml --log-level debug
+

+
+

Create your catalog

+
+

This section is required only if you are not merging your development into ANTA. Otherwise, just follow contribution guide.

+
+

It is very similar to what is documented in catalog section but you have to use your own package name.

+

Let say the custom catalog is anta_titom73 and the test is configured in anta_titom73.dc_project, the test catalog would look like:

+

anta_titom73.dc_project:
+  - VerifyFeatureX:
+      minimum: 1
+
+And now you can run your NRFU tests with the CLI:

+
anta nrfu text --catalog test_custom.yml
+spine01 :: verify_dynamic_vlan :: FAILURE (Device has 0 configured, we expect at least 1)
+spine02 :: verify_dynamic_vlan :: FAILURE (Device has 0 configured, we expect at least 1)
+leaf01 :: verify_dynamic_vlan :: SUCCESS
+leaf02 :: verify_dynamic_vlan :: SUCCESS
+leaf03 :: verify_dynamic_vlan :: SUCCESS
+leaf04 :: verify_dynamic_vlan :: SUCCESS
+
+
+

Install your python package

+

Anta uses Python path to access to your test. So it is critical to have your tests library installed correctly as explained at the begining of this page (in short, your module should be in your PYTHONPATH to be able to be loaded).

+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/device/index.html b/0.6.0/api/device/index.html new file mode 100644 index 000000000..e09816d16 --- /dev/null +++ b/0.6.0/api/device/index.html @@ -0,0 +1,2934 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Device models - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Device models

+ +
+ + + +

+ AntaDevice + + +

+
AntaDevice(name: str, tags: Optional[List[str]] = None)
+
+ +
+

+ Bases: ABC

+ + +

Abstract class representing a device in ANTA. +An implementation of this class needs must override the abstract coroutines collect() and +refresh().

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
name + str + +
+

Device name

+
+
is_online + bool + +
+

True if the device IP is reachable and a port can be open

+
+
established + bool + +
+

True if remote command execution succeeds

+
+
hw_model + Optional[str] + +
+

Hardware model of the device

+
+
tags + List[str] + +
+

List of tags for this device

+
+
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
name + str + +
+

Device name

+
+
+ required +
tags + Optional[List[str]] + +
+

List of tags for this device

+
+
+ None +
+ +
+ Source code in anta/device.py +
71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
def __init__(self, name: str, tags: Optional[List[str]] = None) -> None:
+    """
+    Constructor of AntaDevice
+
+    Args:
+        name: Device name
+        tags: List of tags for this device
+    """
+    self.name: str = name
+    self.hw_model: Optional[str] = None
+    self.tags: List[str] = tags if tags is not None else []
+    self.is_online: bool = False
+    self.established: bool = False
+
+    # Ensure tag 'all' is always set
+    if DEFAULT_TAG not in self.tags:
+        self.tags.append(DEFAULT_TAG)
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ collect + + + + abstractmethod + async + + +

+
collect(command: AntaCommand) -> None
+
+ +
+ +

Collect device command output. +This abstract coroutine can be used to implement any command collection method +for a device in ANTA.

+

The collect() implementation needs to populate the output attribute +of the AntaCommand object passed as argument.

+

If a failure occurs, the collect() implementation is expected to catch the +exception and implement proper logging, the output attribute of the +AntaCommand object passed as argument would be None in this case.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
command + AntaCommand + +
+

the command to collect

+
+
+ required +
+ +
+ Source code in anta/device.py +
106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
@abstractmethod
+async def collect(self, command: AntaCommand) -> None:
+    """
+    Collect device command output.
+    This abstract coroutine can be used to implement any command collection method
+    for a device in ANTA.
+
+    The `collect()` implementation needs to populate the `output` attribute
+    of the `AntaCommand` object passed as argument.
+
+    If a failure occurs, the `collect()` implementation is expected to catch the
+    exception and implement proper logging, the `output` attribute of the
+    `AntaCommand` object passed as argument would be `None` in this case.
+
+    Args:
+        command: the command to collect
+    """
+
+
+
+ +
+ + +
+ + + +

+ collect_commands + + + + async + + +

+
collect_commands(commands: List[AntaCommand]) -> None
+
+ +
+ +

Collect multiple commands.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
commands + List[AntaCommand] + +
+

the commands to collect

+
+
+ required +
+ +
+ Source code in anta/device.py +
124
+125
+126
+127
+128
+129
+130
+131
async def collect_commands(self, commands: List[AntaCommand]) -> None:
+    """
+    Collect multiple commands.
+
+    Args:
+        commands: the commands to collect
+    """
+    await asyncio.gather(*(self.collect(command=command) for command in commands))
+
+
+
+ +
+ + +
+ + + +

+ copy + + + + async + + +

+
copy(
+    sources: List[Path],
+    destination: Path,
+    direction: Literal["to", "from"] = "from",
+) -> None
+
+ +
+ +

Copy files to and from the device, usually through SCP. +It is not mandatory to implement this for a valid AntaDevice subclass.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sources + List[Path] + +
+

List of files to copy to or from the device.

+
+
+ required +
destination + Path + +
+

Local or remote destination when copying the files. Can be a folder.

+
+
+ required +
direction + Literal['to', 'from'] + +
+

Defines if this coroutine copies files to or from the device.

+
+
+ 'from' +
+ +
+ Source code in anta/device.py +
144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
async def copy(self, sources: List[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None:
+    """
+    Copy files to and from the device, usually through SCP.
+    It is not mandatory to implement this for a valid AntaDevice subclass.
+
+    Args:
+        sources: List of files to copy to or from the device.
+        destination: Local or remote destination when copying the files. Can be a folder.
+        direction: Defines if this coroutine copies files to or from the device.
+    """
+    raise NotImplementedError(f"copy() method has not been implemented in {self.__class__.__name__} definition")
+
+
+
+ +
+ + +
+ + + +

+ refresh + + + + abstractmethod + async + + +

+
refresh() -> None
+
+ +
+ +

Update attributes of an AntaDevice instance.

+ +
+ This coroutine must update the following attributes of AntaDevice +
    +
  • is_online: When the device IP is reachable and a port can be open
  • +
  • established: When a command execution succeeds
  • +
  • hw_model: The hardware model of the device
  • +
+
+
+ Source code in anta/device.py +
133
+134
+135
+136
+137
+138
+139
+140
+141
+142
@abstractmethod
+async def refresh(self) -> None:
+    """
+    Update attributes of an AntaDevice instance.
+
+    This coroutine must update the following attributes of AntaDevice:
+        - `is_online`: When the device IP is reachable and a port can be open
+        - `established`: When a command execution succeeds
+        - `hw_model`: The hardware model of the device
+    """
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ AsyncEOSDevice + + +

+
AsyncEOSDevice(
+    host: str,
+    username: str,
+    password: str,
+    name: Optional[str] = None,
+    enable_password: Optional[str] = None,
+    port: Optional[int] = None,
+    ssh_port: Optional[int] = 22,
+    tags: Optional[List[str]] = None,
+    timeout: Optional[float] = None,
+    insecure: bool = False,
+    proto: Literal["http", "https"] = "https",
+)
+
+ +
+

+ Bases: AntaDevice

+ + +

Implementation of AntaDevice for EOS using aio-eapi.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
name + +
+

Device name

+
+
is_online + +
+

True if the device IP is reachable and a port can be open

+
+
established + +
+

True if remote command execution succeeds

+
+
hw_model + +
+

Hardware model of the device

+
+
tags + +
+

List of tags for this device

+
+
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
host + str + +
+

Device FQDN or IP

+
+
+ required +
username + str + +
+

Username to connect to eAPI and SSH

+
+
+ required +
password + str + +
+

Password to connect to eAPI and SSH

+
+
+ required +
name + Optional[str] + +
+

Device name

+
+
+ None +
enable_password + Optional[str] + +
+

Password used to gain privileged access on EOS

+
+
+ None +
port + Optional[int] + +
+

eAPI port. Defaults to 80 is proto is ‘http’ or 443 if proto is ‘https’.

+
+
+ None +
ssh_port + Optional[int] + +
+

SSH port

+
+
+ 22 +
tags + Optional[List[str]] + +
+

List of tags for this device

+
+
+ None +
timeout + Optional[float] + +
+

Timeout value in seconds for outgoing connections. Default to 10 secs.

+
+
+ None +
insecure + bool + +
+

Disable SSH Host Key validation

+
+
+ False +
proto + Literal['http', 'https'] + +
+

eAPI protocol. Value can be ‘http’ or ‘https’

+
+
+ 'https' +
+ +
+ Source code in anta/device.py +
169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
def __init__(  # pylint: disable=R0913
+    self,
+    host: str,
+    username: str,
+    password: str,
+    name: Optional[str] = None,
+    enable_password: Optional[str] = None,
+    port: Optional[int] = None,
+    ssh_port: Optional[int] = 22,
+    tags: Optional[List[str]] = None,
+    timeout: Optional[float] = None,
+    insecure: bool = False,
+    proto: Literal["http", "https"] = "https",
+) -> None:
+    """
+    Constructor of AsyncEOSDevice
+
+    Args:
+        host: Device FQDN or IP
+        username: Username to connect to eAPI and SSH
+        password: Password to connect to eAPI and SSH
+        name: Device name
+        enable_password: Password used to gain privileged access on EOS
+        port: eAPI port. Defaults to 80 is proto is 'http' or 443 if proto is 'https'.
+        ssh_port: SSH port
+        tags: List of tags for this device
+        timeout: Timeout value in seconds for outgoing connections. Default to 10 secs.
+        insecure: Disable SSH Host Key validation
+        proto: eAPI protocol. Value can be 'http' or 'https'
+    """
+    if name is None:
+        name = f"{host}:{port}"
+    super().__init__(name, tags)
+    self._enable_password = enable_password
+    self._session: Device = Device(host=host, port=port, username=username, password=password, proto=proto, timeout=timeout)
+    ssh_params: Dict[str, Any] = {}
+    if insecure:
+        ssh_params.update({"known_hosts": None})
+    self._ssh_opts: SSHClientConnectionOptions = SSHClientConnectionOptions(host=host, port=ssh_port, username=username, password=password, **ssh_params)
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ collect + + + + async + + +

+
collect(command: AntaCommand) -> None
+
+ +
+ +

Collect device command output from EOS using aio-eapi.

+

Supports outformat json and text as output structure. +Gain privileged access using the enable_password attribute +of the AntaDevice instance if populated.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
command + AntaCommand + +
+

the command to collect

+
+
+ required +
+ +
+ Source code in anta/device.py +
234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
async def collect(self, command: AntaCommand) -> None:
+    """
+    Collect device command output from EOS using aio-eapi.
+
+    Supports outformat `json` and `text` as output structure.
+    Gain privileged access using the `enable_password` attribute
+    of the `AntaDevice` instance if populated.
+
+    Args:
+        command: the command to collect
+    """
+    try:
+        commands = []
+        if self._enable_password is not None:
+            commands.append(
+                {
+                    "cmd": "enable",
+                    "input": str(self._enable_password),
+                }
+            )
+        else:
+            commands.append({"cmd": "enable"})
+        if command.revision:
+            commands.append({"cmd": command.command, "revision": command.revision})
+        else:
+            commands.append({"cmd": command.command})
+        response = await self._session.cli(
+            commands=commands,
+            ofmt=command.ofmt,
+            version=command.version,
+        )
+        # remove first dict related to enable command
+        # only applicable to json output
+        if command.ofmt in ["json", "text"]:
+            # selecting only our command output
+            response = response[1]
+        command.output = response
+        logger.debug(f"{self.name}: {command}")
+
+    except EapiCommandError as e:
+        message = f"Command '{command.command}' failed on {self.name}"
+        anta_log_exception(e, message, logger)
+        command.failed = e
+    except (HTTPError, ConnectError) as e:
+        message = f"Cannot connect to device {self.name}"
+        anta_log_exception(e, message, logger)
+        command.failed = e
+    except Exception as e:  # pylint: disable=broad-exception-caught
+        message = f"Exception raised while collecting command '{command.command}' on device {self.name}"
+        anta_log_exception(e, message, logger)
+        command.failed = e
+        logger.debug(command)
+
+
+
+ +
+ + +
+ + + +

+ copy + + + + async + + +

+
copy(
+    sources: List[Path],
+    destination: Path,
+    direction: Literal["to", "from"] = "from",
+) -> None
+
+ +
+ +

Copy files to and from the device using asyncssh.scp().

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sources + List[Path] + +
+

List of files to copy to or from the device.

+
+
+ required +
destination + Path + +
+

Local or remote destination when copying the files. Can be a folder.

+
+
+ required +
direction + Literal['to', 'from'] + +
+

Defines if this coroutine copies files to or from the device.

+
+
+ 'from' +
+ +
+ Source code in anta/device.py +
318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
async def copy(self, sources: List[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None:
+    """
+    Copy files to and from the device using asyncssh.scp().
+
+    Args:
+        sources: List of files to copy to or from the device.
+        destination: Local or remote destination when copying the files. Can be a folder.
+        direction: Defines if this coroutine copies files to or from the device.
+    """
+    async with asyncssh.connect(
+        host=self._ssh_opts.host,
+        port=self._ssh_opts.port,
+        tunnel=self._ssh_opts.tunnel,
+        family=self._ssh_opts.family,
+        local_addr=self._ssh_opts.local_addr,
+        options=self._ssh_opts,
+    ) as conn:
+        src: Union[List[Tuple[SSHClientConnection, Path]], List[Path]]
+        dst: Union[Tuple[SSHClientConnection, Path], Path]
+        if direction == "from":
+            src = [(conn, file) for file in sources]
+            dst = destination
+            for file in sources:
+                logger.info(f"Copying '{file}' from device {self.name} to '{destination}' locally")
+        elif direction == "to":
+            src = sources
+            dst = (conn, destination)
+            for file in sources:
+                logger.info(f"Copying '{file}' to device {self.name} to '{destination}' remotely")
+        else:
+            logger.critical(f"'direction' argument to copy() fonction is invalid: {direction}")
+            return
+        await asyncssh.scp(src, dst)
+
+
+
+ +
+ + +
+ + + +

+ refresh + + + + async + + +

+
refresh() -> None
+
+ +
+ +

Update attributes of an AsyncEOSDevice instance.

+

This coroutine must update the following attributes of AsyncEOSDevice: +- is_online: When a device IP is reachable and a port can be open +- established: When a command execution succeeds +- hw_model: The hardware model of the device

+ +
+ Source code in anta/device.py +
287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
async def refresh(self) -> None:
+    """
+    Update attributes of an AsyncEOSDevice instance.
+
+    This coroutine must update the following attributes of AsyncEOSDevice:
+    - is_online: When a device IP is reachable and a port can be open
+    - established: When a command execution succeeds
+    - hw_model: The hardware model of the device
+    """
+    # Refresh command
+    COMMAND: str = "show version"
+    # Hardware model definition in show version
+    HW_MODEL_KEY: str = "modelName"
+    logger.debug(f"Refreshing device {self.name}")
+    self.is_online = await self._session.check_connection()
+    if self.is_online:
+        try:
+            response = await self._session.cli(command=COMMAND)
+        except EapiCommandError as e:
+            logger.warning(f"Cannot get hardware information from device {self.name}: {e.errmsg}")
+        except (HTTPError, ConnectError) as e:
+            logger.warning(f"Cannot get hardware information from device {self.name}: {exc_to_str(e)}")
+        else:
+            if HW_MODEL_KEY in response:
+                self.hw_model = response[HW_MODEL_KEY]
+            else:
+                logger.warning(f"Cannot get hardware information from device {self.name}: cannot parse '{COMMAND}'")
+    else:
+        logger.warning(f"Could not connect to device {self.name}: cannot open eAPI port")
+    self.established = bool(self.is_online and self.hw_model)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/inventory.models.input/index.html b/0.6.0/api/inventory.models.input/index.html new file mode 100644 index 000000000..14c95a4ed --- /dev/null +++ b/0.6.0/api/inventory.models.input/index.html @@ -0,0 +1,1868 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Inventory models - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Inventory models

+ +
+ + + +

+ AntaInventoryInput + + +

+ + +
+

+ Bases: BaseModel

+ + +

User’s inventory model.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
networks + List[AntaInventoryNetwork], Optional + +
+

List of AntaInventoryNetwork objects for networks.

+
+
hosts + List[AntaInventoryHost], Optional + +
+

List of AntaInventoryHost objects for hosts.

+
+
range + List[AntaInventoryRange], Optional + +
+

List of AntaInventoryRange objects for ranges.

+
+
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ AntaInventoryHost + + +

+ + +
+

+ Bases: BaseModel

+ + +

Host definition for user’s inventory.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
host + IPvAnyAddress + +
+

IPv4 or IPv6 address of the device

+
+
port + int + +
+

(Optional) eAPI port to use Default is 443.

+
+
name + str + +
+

(Optional) Name to display during tests report. Default is hostname:port

+
+
tags + List[str] + +
+

List of attached tags read from inventory file.

+
+
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ AntaInventoryNetwork + + +

+ + +
+

+ Bases: BaseModel

+ + +

Network definition for user’s inventory.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
network + IPvAnyNetwork + +
+

Subnet to use for testing.

+
+
tags + List[str] + +
+

List of attached tags read from inventory file.

+
+
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ AntaInventoryRange + + +

+ + +
+

+ Bases: BaseModel

+ + +

IP Range definition for user’s inventory.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
start + IPvAnyAddress + +
+

IPv4 or IPv6 address for the begining of the range.

+
+
stop + IPvAnyAddress + +
+

IPv4 or IPv6 address for the end of the range.

+
+
tags + List[str] + +
+

List of attached tags read from inventory file.

+
+
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/inventory/index.html b/0.6.0/api/inventory/index.html new file mode 100644 index 000000000..f964ec60e --- /dev/null +++ b/0.6.0/api/inventory/index.html @@ -0,0 +1,2224 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Inventory module - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Inventory module

+ +
+ + + +

+ AntaInventory + + +

+ + +
+

+ Bases: dict

+ + +

Inventory abstraction for ANTA framework.

+ + + + +
+ + + + + + + + + + +
+ + + +

+ add_device + + +

+
add_device(device: AntaDevice) -> None
+
+ +
+ +

Add a device to final inventory.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
device + AntaDevice + +
+

Device object to be added

+
+
+ required +
+ +
+ Source code in anta/inventory/__init__.py +
208
+209
+210
+211
+212
+213
+214
def add_device(self, device: AntaDevice) -> None:
+    """Add a device to final inventory.
+
+    Args:
+        device: Device object to be added
+    """
+    self[device.name] = device
+
+
+
+ +
+ + +
+ + + +

+ connect_inventory + + + + async + + +

+
connect_inventory() -> None
+
+ +
+ +

Run refresh() coroutines for all AntaDevice objects in this inventory.

+ +
+ Source code in anta/inventory/__init__.py +
220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
async def connect_inventory(self) -> None:
+    """Run `refresh()` coroutines for all AntaDevice objects in this inventory."""
+    logger.debug("Refreshing devices...")
+    results = await asyncio.gather(
+        *(device.refresh() for device in self.values()),
+        return_exceptions=True,
+    )
+    for r in results:
+        if isinstance(r, Exception):
+            message = "Error when refreshing inventory"
+            anta_log_exception(r, message, logger)
+
+
+
+ +
+ + +
+ + + +

+ get_inventory + + +

+
get_inventory(
+    established_only: bool = False,
+    tags: Optional[List[str]] = None,
+) -> AntaInventory
+
+ +
+ +

Returns a filtered inventory.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
established_only + bool + +
+

Whether or not to include only established devices. Default False.

+
+
+ False +
tags + Optional[List[str]] + +
+

List of tags to filter devices.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
AntaInventory + AntaInventory + +
+

An inventory with filtered AntaDevice objects.

+
+
+ +
+ Source code in anta/inventory/__init__.py +
172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
def get_inventory(self, established_only: bool = False, tags: Optional[List[str]] = None) -> AntaInventory:
+    """
+    Returns a filtered inventory.
+
+    Args:
+        established_only: Whether or not to include only established devices. Default False.
+        tags: List of tags to filter devices.
+
+    Returns:
+        AntaInventory: An inventory with filtered AntaDevice objects.
+    """
+
+    def _filter_devices(device: AntaDevice) -> bool:
+        """
+        Helper function to select the devices based on the input tags
+        and the requirement for an established connection.
+        """
+        if tags is not None and all(tag not in tags for tag in device.tags):
+            return False
+        return bool(not established_only or device.established)
+
+    devices: List[AntaDevice] = list(filter(_filter_devices, self.values()))
+    result = AntaInventory()
+    for device in devices:
+        result.add_device(device)
+    return result
+
+
+
+ +
+ + +
+ + + +

+ parse + + + + staticmethod + + +

+
parse(
+    inventory_file: str,
+    username: str,
+    password: str,
+    enable_password: Optional[str] = None,
+    timeout: Optional[float] = None,
+    insecure: bool = False,
+) -> AntaInventory
+
+ +
+ +

Create an AntaInventory instance from an inventory file. +The inventory devices are AsyncEOSDevice instances.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
inventory_file + str + +
+

Path to inventory YAML file where user has described his inputs

+
+
+ required +
username + str + +
+

Username to use to connect to devices

+
+
+ required +
password + str + +
+

Password to use to connect to devices

+
+
+ required +
timeout + float + +
+

timeout in seconds for every API call.

+
+
+ None +
+ + + +

Raises:

+ + + + + + + + + + + + + + + + + + + + + +
TypeDescription
+ InventoryRootKeyError + +
+

Root key of inventory is missing.

+
+
+ InventoryIncorrectSchema + +
+

Inventory file is not following AntaInventory Schema.

+
+
+ InventoryUnknownFormat + +
+

Output format is not supported.

+
+
+ +
+ Source code in anta/inventory/__init__.py +
119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
@staticmethod
+def parse(
+    inventory_file: str, username: str, password: str, enable_password: Optional[str] = None, timeout: Optional[float] = None, insecure: bool = False
+) -> AntaInventory:
+    # pylint: disable=too-many-arguments
+    """
+    Create an AntaInventory instance from an inventory file.
+    The inventory devices are AsyncEOSDevice instances.
+
+    Args:
+        inventory_file (str): Path to inventory YAML file where user has described his inputs
+        username (str): Username to use to connect to devices
+        password (str): Password to use to connect to devices
+        timeout (float, optional): timeout in seconds for every API call.
+
+    Raises:
+        InventoryRootKeyError: Root key of inventory is missing.
+        InventoryIncorrectSchema: Inventory file is not following AntaInventory Schema.
+        InventoryUnknownFormat: Output format is not supported.
+    """
+
+    inventory = AntaInventory()
+    kwargs: Dict[str, Any] = {"username": username, "password": password, "enable_password": enable_password, "timeout": timeout, "insecure": insecure}
+    kwargs = {k: v for k, v in kwargs.items() if v is not None}
+
+    with open(inventory_file, "r", encoding="UTF-8") as file:
+        data = safe_load(file)
+
+    # Load data using Pydantic
+    try:
+        inventory_input = AntaInventoryInput(**data[AntaInventory.INVENTORY_ROOT_KEY])
+    except KeyError as exc:
+        logger.error(f"Inventory root key is missing: {AntaInventory.INVENTORY_ROOT_KEY}")
+        raise InventoryRootKeyError(f"Inventory root key ({AntaInventory.INVENTORY_ROOT_KEY}) is not defined in your inventory") from exc
+    except ValidationError as exc:
+        logger.error("Inventory data are not compliant with inventory models")
+        raise InventoryIncorrectSchema(f"Inventory is not following the schema: {str(exc)}") from exc
+
+    # Read data from input
+    AntaInventory._parse_hosts(inventory_input, inventory, **kwargs)
+    AntaInventory._parse_networks(inventory_input, inventory, **kwargs)
+    AntaInventory._parse_ranges(inventory_input, inventory, **kwargs)
+
+    return inventory
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ exceptions + + +

+ +
+ +

Manage Exception in Inventory module.

+ + + +
+ + + + + + + + +
+ + + +

+ InventoryIncorrectSchema + + +

+ + +
+

+ Bases: Exception

+ + +

Error when user data does not follow ANTA schema.

+ + +
+ +
+ +
+ + + +

+ InventoryRootKeyError + + +

+ + +
+

+ Bases: Exception

+ + +

Error raised when inventory root key is not found.

+ + +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/models/index.html b/0.6.0/api/models/index.html new file mode 100644 index 000000000..43139752c --- /dev/null +++ b/0.6.0/api/models/index.html @@ -0,0 +1,2565 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Test models - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Test models

+ +
+ + + +

+ AntaTest + + +

+
AntaTest(
+    device: AntaDevice,
+    template_params: list[dict[str, Any]] | None = None,
+    eos_data: list[dict[Any, Any] | str] | None = None,
+    labels: list[str] | None = None,
+)
+
+ +
+

+ Bases: ABC

+ + +

Abstract class defining a test for Anta

+

The goal of this class is to handle the heavy lifting and make +writing a test as simple as possible.

+

TODO - complete doctstring with example

+ +
+ Source code in anta/models.py +
160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
def __init__(
+    self,
+    device: AntaDevice,
+    template_params: list[dict[str, Any]] | None = None,
+    # TODO document very well the order of eos_data
+    eos_data: list[dict[Any, Any] | str] | None = None,
+    labels: list[str] | None = None,
+):
+    """Class constructor"""
+    # Accept 6 input arguments
+    # pylint: disable=R0913
+    self.logger: logging.Logger = logging.getLogger(f"{self.__module__}.{self.__class__.__name__}")
+    self.device: AntaDevice = device
+    self.result: TestResult = TestResult(name=device.name, test=self.name, test_category=self.categories, test_description=self.description)
+    self.labels: List[str] = labels or []
+    self.instance_commands: List[AntaCommand] = []
+
+    # TODO - check optimization for deepcopy
+    # Generating instance_commands from list of commands and template
+    if hasattr(self.__class__, "commands") and (cmds := self.__class__.commands) is not None:
+        self.instance_commands.extend(deepcopy(cmds))
+    if hasattr(self.__class__, "template") and (tpl := self.__class__.template) is not None:
+        if template_params is None:
+            self.result.is_error("Command has template but no params were given")
+            return
+        self.template_params = template_params
+        for param in template_params:
+            try:
+                self.instance_commands.append(tpl.render(param))
+            except KeyError:
+                self.result.is_error(f"Cannot render template '{tpl.template}': wrong parameters")
+                return
+
+    if eos_data is not None:
+        self.logger.debug("Test initialized with input data")
+        self.save_commands_data(eos_data)
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ all_data_collected + + +

+
all_data_collected() -> bool
+
+ +
+ +

returns True if output is populated for every command

+ +
+ Source code in anta/models.py +
205
+206
+207
def all_data_collected(self) -> bool:
+    """returns True if output is populated for every command"""
+    return all(command.collected for command in self.instance_commands)
+
+
+
+ +
+ + +
+ + + +

+ anta_test + + + + staticmethod + + +

+
anta_test(
+    function: F,
+) -> Callable[..., Coroutine[Any, Any, TestResult]]
+
+ +
+ +

Decorator for anta_test that handles injecting test data if given and collecting it using asyncio if missing

+ +
+ Source code in anta/models.py +
236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
@staticmethod
+def anta_test(function: F) -> Callable[..., Coroutine[Any, Any, TestResult]]:
+    """
+    Decorator for anta_test that handles injecting test data if given and collecting it using asyncio if missing
+    """
+
+    @wraps(function)
+    async def wrapper(
+        self: AntaTest,
+        eos_data: list[dict[Any, Any] | str] | None = None,
+        **kwargs: Any,
+    ) -> TestResult:
+        """
+        Wraps the test function and implement (in this order):
+        1. Instantiate the command outputs if `eos_data` is provided
+        2. Collect missing command outputs from the device
+        3. Run the test function
+        4. Catches and set the result if the test function raises an exception
+
+        Returns:
+            TestResult: self.result, populated with the correct exit status
+        """
+        if self.result.result != "unset":
+            return self.result
+
+        # TODO maybe_skip decorators
+
+        # Data
+        if eos_data is not None:
+            self.save_commands_data(eos_data)
+            self.logger.debug(f"Test {self.name} initialized with input data {eos_data}")
+
+        # If some data is missing, try to collect
+        if not self.all_data_collected():
+            await self.collect()
+            if self.result.result != "unset":
+                return self.result
+
+        try:
+            if cmds := self.get_failed_commands():
+                self.result.is_error(
+                    "\n".join([f"{cmd.command} has failed: {exc_to_str(cmd.failed)}" if cmd.failed else f"{cmd.command} has failed" for cmd in cmds])
+                )
+                return self.result
+            function(self, **kwargs)
+        except Exception as e:  # pylint: disable=broad-exception-caught
+            message = f"Exception raised for test {self.name} (on device {self.device.name})"
+            anta_log_exception(e, message, self.logger)
+            self.result.is_error(exc_to_str(e))
+
+        AntaTest.update_progress()
+        return self.result
+
+    return wrapper
+
+
+
+ +
+ + +
+ + + +

+ collect + + + + async + + +

+
collect() -> None
+
+ +
+ +

Method used to collect outputs of all commands of this test class from the device of this test instance.

+ +
+ Source code in anta/models.py +
225
+226
+227
+228
+229
+230
+231
+232
+233
+234
async def collect(self) -> None:
+    """
+    Method used to collect outputs of all commands of this test class from the device of this test instance.
+    """
+    try:
+        await self.device.collect_commands(self.instance_commands)
+    except Exception as e:  # pylint: disable=broad-exception-caught
+        message = f"Exception raised while collecting commands for test {self.name} (on device {self.device.name})"
+        anta_log_exception(e, message, self.logger)
+        self.result.is_error(exc_to_str(e))
+
+
+
+ +
+ + +
+ + + +

+ get_failed_commands + + +

+
get_failed_commands() -> List[AntaCommand]
+
+ +
+ +

returns a list of all the commands that have a populated failed field

+ +
+ Source code in anta/models.py +
209
+210
+211
def get_failed_commands(self) -> List[AntaCommand]:
+    """returns a list of all the commands that have a populated failed field"""
+    return [command for command in self.instance_commands if command.failed is not None]
+
+
+
+ +
+ + +
+ + + +

+ save_commands_data + + +

+
save_commands_data(
+    eos_data: list[dict[Any, Any] | str]
+) -> None
+
+ +
+ +

Called at init or at test execution time

+ +
+ Source code in anta/models.py +
197
+198
+199
+200
+201
+202
+203
def save_commands_data(self, eos_data: list[dict[Any, Any] | str]) -> None:
+    """Called at init or at test execution time"""
+    if len(eos_data) != len(self.instance_commands):
+        self.result.is_error("Test initialization error: Trying to save more data than there are commands for the test")
+        return
+    for index, data in enumerate(eos_data or []):
+        self.instance_commands[index].output = data
+
+
+
+ +
+ + +
+ + + +

+ test + + + + abstractmethod + + +

+
test() -> Coroutine[Any, Any, TestResult]
+
+ +
+ +

This abstract method is the core of the test. +It MUST set the correct status of self.result with the appropriate error messages

+

it must be implemented as follow

+

@AntaTest.anta_test +def test(self) -> None: + ‘’‘ + assert code + ‘’‘

+ +
+ Source code in anta/models.py +
299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
@abstractmethod
+def test(self) -> Coroutine[Any, Any, TestResult]:
+    """
+    This abstract method is the core of the test.
+    It MUST set the correct status of self.result with the appropriate error messages
+
+    it must be implemented as follow
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+       '''
+       assert code
+       '''
+    """
+
+
+
+ +
+ + +
+ + + +

+ update_progress + + + + classmethod + + +

+
update_progress() -> None
+
+ +
+ +

Update progress bar for all AntaTest objects if it exists

+ +
+ Source code in anta/models.py +
291
+292
+293
+294
+295
+296
+297
@classmethod
+def update_progress(cls) -> None:
+    """
+    Update progress bar for all AntaTest objects if it exists
+    """
+    if cls.progress and (cls.nrfu_task is not None):
+        cls.progress.update(cls.nrfu_task, advance=1)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ AntaCommand + + +

+ + +
+

+ Bases: BaseModel

+ + +

Class to define a test command with its API version

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
command + str + +
+

Device command

+
+
version + Literal[1, 'latest'] + +
+

eAPI version - valid values are 1 or “latest” - default is “latest”

+
+
revision + Optional[conint(ge=1, le=99)] + +
+

Revision of the command. Valid values are 1 to 99. Revision has precedence over version.

+
+
ofmt + Literal['json', 'text'] + +
+

eAPI output - json or text - default is json

+
+
template + Optional[AntaTemplate] + +
+

AntaTemplate object used to render this command

+
+
params + Optional[Dict[str, Any]] + +
+

dictionary of variables with string values to render the template

+
+
failed + Optional[Exception] + +
+

If the command execution fails, the Exception object is stored in this field

+
+
+ + + + +
+ + + + + + + +
+ + + +

+ collected + + + + property + + +

+
collected: bool
+
+ +
+ +

Return True if the command has been collected

+
+ +
+ +
+ + + +

+ json_output + + + + property + + +

+
json_output: Dict[str, Any]
+
+ +
+ +

Get the command output as JSON

+
+ +
+ +
+ + + +

+ text_output + + + + property + + +

+
text_output: str
+
+ +
+ +

Get the command output as a string

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ AntaTemplate + + +

+ + +
+

+ Bases: BaseModel

+ + +

Class to define a test command with its API version

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
template + str + +
+

Python f-string. Example: ‘show vlan {vlan_id}’

+
+
version + Literal[1, 'latest'] + +
+

eAPI version - valid values are 1 or “latest” - default is “latest”

+
+
revision + Optional[conint(ge=1, le=99)] + +
+

Revision of the command. Valid values are 1 to 99. Revision has precedence over version.

+
+
ofmt + Literal['json', 'text'] + +
+

eAPI output - json or text - default is json

+
+
+ + + + +
+ + + + + + + + + + +
+ + + +

+ render + + +

+
render(params: Dict[str, Any]) -> AntaCommand
+
+ +
+ +

Render an AntaCommand from an AntaTemplate instance. +Keep the parameters used in the AntaTemplate instance.

+

Args: + params: dictionary of variables with string values to render the Python f-string

+

Returns: + AntaCommand: The rendered AntaCommand. + This AntaCommand instance have a template attribute that references this + AntaTemplate instance.

+ +
+ Source code in anta/models.py +
51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
def render(self, params: Dict[str, Any]) -> AntaCommand:
+    """Render an AntaCommand from an AntaTemplate instance.
+    Keep the parameters used in the AntaTemplate instance.
+
+     Args:
+         params: dictionary of variables with string values to render the Python f-string
+
+     Returns:
+         AntaCommand: The rendered AntaCommand.
+                      This AntaCommand instance have a template attribute that references this
+                      AntaTemplate instance.
+    """
+    return AntaCommand(command=self.template.format(**params), ofmt=self.ofmt, version=self.version, revision=self.revision, template=self, params=params)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/report_manager/index.html b/0.6.0/api/report_manager/index.html new file mode 100644 index 000000000..0cb957870 --- /dev/null +++ b/0.6.0/api/report_manager/index.html @@ -0,0 +1,2195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Report Manager module - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Report Manager module

+ +
+ + + +

+ ReportTable + + +

+
ReportTable()
+
+ +
+ + +

TableReport Generate a Table based on TestResult.

+ +
+ Source code in anta/reporter/__init__.py +
26
+27
+28
+29
+30
+31
+32
+33
+34
def __init__(self) -> None:
+    """
+    __init__ Class constructor
+    """
+    self.colors = []
+    self.colors.append(ColorManager(level="success", color=RICH_COLOR_PALETTE.SUCCESS))
+    self.colors.append(ColorManager(level="failure", color=RICH_COLOR_PALETTE.FAILURE))
+    self.colors.append(ColorManager(level="error", color=RICH_COLOR_PALETTE.ERROR))
+    self.colors.append(ColorManager(level="skipped", color=RICH_COLOR_PALETTE.SKIPPED))
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ report_all + + +

+
report_all(
+    result_manager: ResultManager,
+    host: Optional[str] = None,
+    testcase: Optional[str] = None,
+    title: str = "All tests results",
+) -> Table
+
+ +
+ +

Create a table report with all tests for one or all devices.

+

Create table with full output: Host / Test / Status / Message

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
result_manager + ResultManager + +
+

A manager with a list of tests.

+
+
+ required +
host + str + +
+

IP Address of a host to search for. Defaults to None.

+
+
+ None +
testcase + str + +
+

A test name to search for. Defaults to None.

+
+
+ None +
title + str + +
+

Title for the report. Defaults to ‘All tests results’.

+
+
+ 'All tests results' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Table + Table + +
+

A fully populated rich Table

+
+
+ +
+ Source code in anta/reporter/__init__.py +
 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
def report_all(
+    self,
+    result_manager: ResultManager,
+    host: Optional[str] = None,
+    testcase: Optional[str] = None,
+    title: str = "All tests results",
+) -> Table:
+    """
+    Create a table report with all tests for one or all devices.
+
+    Create table with full output: Host / Test / Status / Message
+
+    Args:
+        result_manager (ResultManager): A manager with a list of tests.
+        host (str, optional): IP Address of a host to search for. Defaults to None.
+        testcase (str, optional): A test name to search for. Defaults to None.
+        title (str, optional): Title for the report. Defaults to 'All tests results'.
+
+    Returns:
+        Table: A fully populated rich Table
+    """
+    table = Table(title=title)
+    headers = ["Device IP", "Test Name", "Test Status", "Message(s)", "Test description", "Test category"]
+    table = self._build_headers(headers=headers, table=table)
+
+    for result in result_manager.get_results(output_format="list"):
+        # pylint: disable=R0916
+        if (host is None and testcase is None) or (host is not None and str(result.name) == host) or (testcase is not None and testcase == str(result.test)):
+            state = self._color_result(status=str(result.result), output_type="str")
+            message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else ""
+            test_categories = ", ".join(result.test_category)
+            table.add_row(str(result.name), result.test, state, message, result.test_description, test_categories)
+    return table
+
+
+
+ +
+ + +
+ + + +

+ report_summary_hosts + + +

+
report_summary_hosts(
+    result_manager: ResultManager,
+    host: Optional[str] = None,
+    title: str = "Summary per host",
+) -> Table
+
+ +
+ +

Create a table report with result agregated per host.

+

Create table with full output: Host / Number of success / Number of failure / Number of error / List of nodes in error or failure

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
result_manager + ResultManager + +
+

A manager with a list of tests.

+
+
+ required +
host + str + +
+

IP Address of a host to search for. Defaults to None.

+
+
+ None +
title + str + +
+

Title for the report. Defaults to ‘All tests results’.

+
+
+ 'Summary per host' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Table + Table + +
+

A fully populated rich Table

+
+
+ +
+ Source code in anta/reporter/__init__.py +
171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
def report_summary_hosts(
+    self,
+    result_manager: ResultManager,
+    host: Optional[str] = None,
+    title: str = "Summary per host",
+) -> Table:
+    """
+    Create a table report with result agregated per host.
+
+    Create table with full output: Host / Number of success / Number of failure / Number of error / List of nodes in error or failure
+
+    Args:
+        result_manager (ResultManager): A manager with a list of tests.
+        host (str, optional): IP Address of a host to search for. Defaults to None.
+        title (str, optional): Title for the report. Defaults to 'All tests results'.
+
+    Returns:
+        Table: A fully populated rich Table
+    """
+    table = Table(title=title)
+    headers = [
+        "Host IP",
+        "# of success",
+        "# of skipped",
+        "# of failure",
+        "# of errors",
+        "List of failed or error test cases",
+    ]
+    table = self._build_headers(headers=headers, table=table)
+    for host_read in result_manager.get_hosts():
+        if host is None or str(host_read) == host:
+            results = result_manager.get_result_by_host(host_read)
+            logger.debug("data to use for computation")
+            logger.debug(f"{host}: {results}")
+            nb_failure = len([result for result in results if result.result == "failure"])
+            nb_error = len([result for result in results if result.result == "error"])
+            list_failure = [str(result.test) for result in results if result.result in ["failure", "error"]]
+            nb_success = len([result for result in results if result.result == "success"])
+            nb_skipped = len([result for result in results if result.result == "skipped"])
+            table.add_row(
+                str(host_read),
+                str(nb_success),
+                str(nb_skipped),
+                str(nb_failure),
+                str(nb_error),
+                str(list_failure),
+            )
+    return table
+
+
+
+ +
+ + +
+ + + +

+ report_summary_tests + + +

+
report_summary_tests(
+    result_manager: ResultManager,
+    testcase: Optional[str] = None,
+    title: str = "Summary per test case",
+) -> Table
+
+ +
+ +

Create a table report with result agregated per test.

+

Create table with full output: Test / Number of success / Number of failure / Number of error / List of nodes in error or failure

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
result_manager + ResultManager + +
+

A manager with a list of tests.

+
+
+ required +
testcase + str + +
+

A test name to search for. Defaults to None.

+
+
+ None +
title + str + +
+

Title for the report. Defaults to ‘All tests results’.

+
+
+ 'Summary per test case' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Table + Table + +
+

A fully populated rich Table

+
+
+ +
+ Source code in anta/reporter/__init__.py +
123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
def report_summary_tests(
+    self,
+    result_manager: ResultManager,
+    testcase: Optional[str] = None,
+    title: str = "Summary per test case",
+) -> Table:
+    """
+    Create a table report with result agregated per test.
+
+    Create table with full output: Test / Number of success / Number of failure / Number of error / List of nodes in error or failure
+
+    Args:
+        result_manager (ResultManager): A manager with a list of tests.
+        testcase (str, optional): A test name to search for. Defaults to None.
+        title (str, optional): Title for the report. Defaults to 'All tests results'.
+
+    Returns:
+        Table: A fully populated rich Table
+    """
+    # sourcery skip: class-extract-method
+    table = Table(title=title)
+    headers = [
+        "Test Case",
+        "# of success",
+        "# of skipped",
+        "# of failure",
+        "# of errors",
+        "List of failed or error nodes",
+    ]
+    table = self._build_headers(headers=headers, table=table)
+    for testcase_read in result_manager.get_testcases():
+        if testcase is None or str(testcase_read) == testcase:
+            results = result_manager.get_result_by_test(testcase_read)
+            nb_failure = len([result for result in results if result.result == "failure"])
+            nb_error = len([result for result in results if result.result == "error"])
+            list_failure = [str(result.name) for result in results if result.result in ["failure", "error"]]
+            nb_success = len([result for result in results if result.result == "success"])
+            nb_skipped = len([result for result in results if result.result == "skipped"])
+            table.add_row(
+                testcase_read,
+                str(nb_success),
+                str(nb_skipped),
+                str(nb_failure),
+                str(nb_error),
+                str(list_failure),
+            )
+    return table
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/report_manager_models/index.html b/0.6.0/api/report_manager_models/index.html new file mode 100644 index 000000000..8283f1005 --- /dev/null +++ b/0.6.0/api/report_manager_models/index.html @@ -0,0 +1,1877 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Report Manager models - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Report Manager models

+ +
+ + + +

+ ColorManager + + +

+ + +
+

+ Bases: BaseModel

+ + +

Color management for status report.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
level + str + +
+

Test result value.

+
+
color + str + +
+

Associated color.

+
+
+ + + + +
+ + + + + + + + + + +
+ + + +

+ name_must_be_in + + +

+
name_must_be_in(v: str) -> str
+
+ +
+ +

Status validator

+

Validate status is a supported one

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
v + str + +
+

User defined level

+
+
+ required +
+ + + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If level is unsupported

+
+
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

level value

+
+
+ +
+ Source code in anta/reporter/models.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
@validator("level", allow_reuse=True)
+def name_must_be_in(cls, v: str) -> str:
+    """
+    Status validator
+
+    Validate status is a supported one
+
+    Args:
+        v (str): User defined level
+
+    Raises:
+        ValueError: If level is unsupported
+
+    Returns:
+        str: level value
+    """
+    if v not in RESULT_OPTIONS:
+        raise ValueError(f"must be one of {RESULT_OPTIONS}")
+    return v
+
+
+
+ +
+ + +
+ + + +

+ string + + +

+
string() -> str
+
+ +
+ +

Build an str with color code

+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

String with level and its associated color

+
+
+ +
+ Source code in anta/reporter/models.py +
49
+50
+51
+52
+53
+54
+55
+56
def string(self) -> str:
+    """
+    Build an str with color code
+
+    Returns:
+        str: String with level and its associated color
+    """
+    return f"[{self.color}]{self.level}"
+
+
+
+ +
+ + +
+ + + +

+ style_rich + + +

+
style_rich() -> Text
+
+ +
+ +

Build a rich Text syntax with color

+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Text + Text + +
+

object with level string and its associated color.

+
+
+ +
+ Source code in anta/reporter/models.py +
40
+41
+42
+43
+44
+45
+46
+47
def style_rich(self) -> Text:
+    """
+    Build a rich Text syntax with color
+
+    Returns:
+        Text: object with level string and its associated color.
+    """
+    return Text(self.level, style=self.color)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/result_manager/index.html b/0.6.0/api/result_manager/index.html new file mode 100644 index 000000000..7ab6e70b9 --- /dev/null +++ b/0.6.0/api/result_manager/index.html @@ -0,0 +1,2579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Result Manager module - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Result Manager module

+ +
+ + + +

+ ResultManager + + +

+
ResultManager()
+
+ +
+ + +

Helper to manage Test Results and generate reports.

+ + + +

Examples:

+

Create Inventory:

+
inventory_anta = AntaInventory.parse(
+    inventory_file='examples/inventory.yml',
+    username='ansible',
+    password='ansible',
+    timeout=0.5
+)
+
+ +

Create Result Manager:

+
manager = ResultManager()
+
+ +

Run tests for all connected devices:

+
for device in inventory_anta.get_inventory():
+    manager.add_test_result(
+        VerifyNTP(device=device).test()
+    )
+    manager.add_test_result(
+        VerifyEOSVersion(device=device).test(version='4.28.3M')
+    )
+
+ +

Print result in native format:

+
manager.get_results()
+[
+    TestResult(
+        host=IPv4Address('192.168.0.10'),
+        test='VerifyNTP',
+        result='failure',
+        message="device is not running NTP correctly"
+    ),
+    TestResult(
+        host=IPv4Address('192.168.0.10'),
+        test='VerifyEOSVersion',
+        result='success',
+        message=None
+    ),
+]
+
+ +

The status of the class is initialized to “unset”

+

Then when adding a test with a status that is NOT ‘error’ the following +table shows the updated status:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Current StatusAdded test StatusUpdated Status
unsetAnyAny
skippedunset, skippedskipped
skippedsuccesssuccess
skippedfailurefailure
successunset, skipped, successsuccess
successfailurefailure
failureunset, skipped success, failurefailure
+

If the status of the added test is error, the status is untouched and the +error_status is set to True.

+ +
+ Source code in anta/result_manager/__init__.py +
63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
def __init__(self) -> None:
+    """
+    Class constructor.
+
+    The status of the class is initialized to "unset"
+
+    Then when adding a test with a status that is NOT 'error' the following
+    table shows the updated status:
+
+    | Current Status |         Added test Status       | Updated Status |
+    | -------------- | ------------------------------- | -------------- |
+    |      unset     |              Any                |       Any      |
+    |     skipped    |         unset, skipped          |     skipped    |
+    |     skipped    |            success              |     success    |
+    |     skipped    |            failure              |     failure    |
+    |     success    |     unset, skipped, success     |     success    |
+    |     success    |            failure              |     failure    |
+    |     failure    | unset, skipped success, failure |     failure    |
+
+    If the status of the added test is error, the status is untouched and the
+    error_status is set to True.
+    """
+    logger.debug("Instantiate result-manager")
+    self._result_entries = ListResult()
+    # Initialize status
+    self.status = "unset"
+    self.error_status = False
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ __update_status + + +

+
__update_status(test_status: str) -> None
+
+ +
+ +

Update ResultManager status based on the table above.

+ +
+ Source code in anta/result_manager/__init__.py +
 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
def __update_status(self, test_status: str) -> None:
+    """
+    Update ResultManager status based on the table above.
+    """
+    if test_status not in RESULT_OPTIONS:
+        raise ValueError("{test_status} is not a valid result option")
+    if test_status == "error":
+        self.error_status = True
+        return
+
+    if self.status == "unset":
+        self.status = test_status
+    elif self.status == "skipped" and test_status in {"success", "failure"}:
+        self.status = test_status
+    elif self.status == "success" and test_status == "failure":
+        self.status = "failure"
+
+
+
+ +
+ + +
+ + + +

+ add_test_result + + +

+
add_test_result(entry: TestResult) -> None
+
+ +
+ +

Add a result to the list

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
entry + TestResult + +
+

TestResult data to add to the report

+
+
+ required +
+ +
+ Source code in anta/result_manager/__init__.py +
114
+115
+116
+117
+118
+119
+120
+121
+122
def add_test_result(self, entry: TestResult) -> None:
+    """Add a result to the list
+
+    Args:
+        entry (TestResult): TestResult data to add to the report
+    """
+    logger.debug(entry)
+    self._result_entries.append(entry)
+    self.__update_status(entry.result)
+
+
+
+ +
+ + +
+ + + +

+ add_test_results + + +

+
add_test_results(entries: List[TestResult]) -> None
+
+ +
+ +

Add a list of results to the list

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
entries + List[TestResult] + +
+

list of TestResult data to add to the report

+
+
+ required +
+ +
+ Source code in anta/result_manager/__init__.py +
124
+125
+126
+127
+128
+129
+130
+131
def add_test_results(self, entries: List[TestResult]) -> None:
+    """Add a list of results to the list
+
+    Args:
+        entries (List[TestResult]): list of TestResult data to add to the report
+    """
+    for e in entries:
+        self.add_test_result(e)
+
+
+
+ +
+ + +
+ + + +

+ get_hosts + + +

+
get_hosts() -> List[str]
+
+ +
+ +

Get list of IP addresses in current manager.

+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ List[str] + +
+

List[str]: List of IP addresses.

+
+
+ +
+ Source code in anta/result_manager/__init__.py +
216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
def get_hosts(self) -> List[str]:
+    """
+    Get list of IP addresses in current manager.
+
+    Returns:
+        List[str]: List of IP addresses.
+    """
+    result_list = []
+    for testcase in self._result_entries:
+        if str(testcase.name) not in result_list:
+            result_list.append(str(testcase.name))
+    return result_list
+
+
+
+ +
+ + +
+ + + +

+ get_result_by_host + + +

+
get_result_by_host(
+    host_ip: str, output_format: str = "native"
+) -> Any
+
+ +
+ +

Get list of test result for a given host.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
host_ip + str + +
+

IP Address of the host to use to filter results.

+
+
+ required +
output_format + str + +
+

format selector. Can be either native/list. Defaults to ‘native’.

+
+
+ 'native' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Any + Any + +
+

List of results related to the host.

+
+
+ +
+ Source code in anta/result_manager/__init__.py +
183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
def get_result_by_host(self, host_ip: str, output_format: str = "native") -> Any:
+    """
+    Get list of test result for a given host.
+
+    Args:
+        host_ip (str): IP Address of the host to use to filter results.
+        output_format (str, optional): format selector. Can be either native/list. Defaults to 'native'.
+
+    Returns:
+        Any: List of results related to the host.
+    """
+    if output_format == "list":
+        return [result for result in self._result_entries if str(result.name) == host_ip]
+
+    result_manager_filtered = ListResult()
+    for result in self._result_entries:
+        if str(result.name) == host_ip:
+            result_manager_filtered.append(result)
+    return result_manager_filtered
+
+
+
+ +
+ + +
+ + + +

+ get_result_by_test + + +

+
get_result_by_test(
+    test_name: str, output_format: str = "native"
+) -> Any
+
+ +
+ +

Get list of test result for a given test.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
test_name + str + +
+

Test name to use to filter results

+
+
+ required +
output_format + str + +
+

format selector. Can be either native/list. Defaults to ‘native’.

+
+
+ 'native' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Any + +
+

list[TestResult]: List of results related to the test.

+
+
+ +
+ Source code in anta/result_manager/__init__.py +
163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
def get_result_by_test(self, test_name: str, output_format: str = "native") -> Any:
+    """
+    Get list of test result for a given test.
+
+    Args:
+        test_name (str): Test name to use to filter results
+        output_format (str, optional): format selector. Can be either native/list. Defaults to 'native'.
+
+    Returns:
+        list[TestResult]: List of results related to the test.
+    """
+    if output_format == "list":
+        return [result for result in self._result_entries if str(result.test) == test_name]
+
+    result_manager_filtered = ListResult()
+    for result in self._result_entries:
+        if result.test == test_name:
+            result_manager_filtered.append(result)
+    return result_manager_filtered
+
+
+
+ +
+ + +
+ + + +

+ get_results + + +

+
get_results(output_format: str = 'native') -> Any
+
+ +
+ +

Expose list of all test results in different format

+ +
+ Support multiple format +
    +
  • native: ListResults format
  • +
  • list: a list of TestResult
  • +
  • json: a native JSON format
  • +
+
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
output_format + str + +
+

format selector. Can be either native/list/json. Defaults to ‘native’.

+
+
+ 'native' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
any + Any + +
+

List of results.

+
+
+ +
+ Source code in anta/result_manager/__init__.py +
139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
def get_results(self, output_format: str = "native") -> Any:
+    """
+    Expose list of all test results in different format
+
+    Support multiple format:
+      - native: ListResults format
+      - list: a list of TestResult
+      - json: a native JSON format
+
+    Args:
+        output_format (str, optional): format selector. Can be either native/list/json. Defaults to 'native'.
+
+    Returns:
+        any: List of results.
+    """
+    if output_format == "list":
+        return list(self._result_entries)
+
+    if output_format == "json":
+        return json.dumps(pydantic_to_dict(self._result_entries), indent=4)
+
+    # Default return for native format.
+    return self._result_entries
+
+
+
+ +
+ + +
+ + + +

+ get_status + + +

+
get_status(ignore_error: bool = False) -> str
+
+ +
+ +

Returns the current status including error_status if ignore_error is False

+ +
+ Source code in anta/result_manager/__init__.py +
133
+134
+135
+136
+137
def get_status(self, ignore_error: bool = False) -> str:
+    """
+    Returns the current status including error_status if ignore_error is False
+    """
+    return "error" if self.error_status and not ignore_error else self.status
+
+
+
+ +
+ + +
+ + + +

+ get_testcases + + +

+
get_testcases() -> List[str]
+
+ +
+ +

Get list of name of all test cases in current manager.

+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ List[str] + +
+

List[str]: List of names for all tests.

+
+
+ +
+ Source code in anta/result_manager/__init__.py +
203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
def get_testcases(self) -> List[str]:
+    """
+    Get list of name of all test cases in current manager.
+
+    Returns:
+        List[str]: List of names for all tests.
+    """
+    result_list = []
+    for testcase in self._result_entries:
+        if str(testcase.test) not in result_list:
+            result_list.append(str(testcase.test))
+    return result_list
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/result_manager_models/index.html b/0.6.0/api/result_manager_models/index.html new file mode 100644 index 000000000..95815797a --- /dev/null +++ b/0.6.0/api/result_manager_models/index.html @@ -0,0 +1,2406 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Result Manager models - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Result Manager models

+ +
+ + + +

+ TestResult + + +

+ + +
+

+ Bases: BaseModel

+ + +

Describe the result of a test from a single device.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
name + str + +
+

Device name where the test has run.

+
+
test + str + +
+

Test name runs on the device.

+
+
test_category + List[str] + +
+

List of test categories the test belongs to.

+
+
test_description + str + +
+

Test description.

+
+
results + str + +
+

Result of the test. Can be one of [“unset”, “success”, “failure”, “error”, “skipped”].

+
+
message + str + +
+

Message to report after the test if any.

+
+
+ + + + +
+ + + + + + + + + + +
+ + + +

+ is_error + + +

+
is_error(message: str = '') -> bool
+
+ +
+ +

Helper to set status to error

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
message + str + +
+

Optional message related to the test

+
+
+ '' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
bool + bool + +
+

Always true

+
+
+ +
+ Source code in anta/result_manager/models.py +
87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
def is_error(self, message: str = "") -> bool:
+    """
+    Helper to set status to error
+
+    Args:
+        message (str): Optional message related to the test
+
+    Returns:
+        bool: Always true
+    """
+    return self._set_status("error", message)
+
+
+
+ +
+ + +
+ + + +

+ is_failure + + +

+
is_failure(message: str = '') -> bool
+
+ +
+ +

Helper to set status to failure

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
message + str + +
+

Optional message related to the test

+
+
+ '' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
bool + bool + +
+

Always true

+
+
+ +
+ Source code in anta/result_manager/models.py +
63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
def is_failure(self, message: str = "") -> bool:
+    """
+    Helper to set status to failure
+
+    Args:
+        message (str): Optional message related to the test
+
+    Returns:
+        bool: Always true
+    """
+    return self._set_status("failure", message)
+
+
+
+ +
+ + +
+ + + +

+ is_skipped + + +

+
is_skipped(message: str = '') -> bool
+
+ +
+ +

Helper to set status to skipped

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
message + str + +
+

Optional message related to the test

+
+
+ '' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
bool + bool + +
+

Always true

+
+
+ +
+ Source code in anta/result_manager/models.py +
75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
def is_skipped(self, message: str = "") -> bool:
+    """
+    Helper to set status to skipped
+
+    Args:
+        message (str): Optional message related to the test
+
+    Returns:
+        bool: Always true
+    """
+    return self._set_status("skipped", message)
+
+
+
+ +
+ + +
+ + + +

+ is_success + + +

+
is_success(message: str = '') -> bool
+
+ +
+ +

Helper to set status to success

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
message + str + +
+

Optional message related to the test

+
+
+ '' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
bool + bool + +
+

Always true

+
+
+ +
+ Source code in anta/result_manager/models.py +
51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
def is_success(self, message: str = "") -> bool:
+    """
+    Helper to set status to success
+
+    Args:
+        message (str): Optional message related to the test
+
+    Returns:
+        bool: Always true
+    """
+    return self._set_status("success", message)
+
+
+
+ +
+ + +
+ + + +

+ name_must_be_in + + + + classmethod + + +

+
name_must_be_in(v: str) -> str
+
+ +
+ +

Status validator

+

Validate status is a supported one

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
v + str + +
+

User defined status

+
+
+ required +
+ + + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If status is unsupported

+
+
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

status value

+
+
+ +
+ Source code in anta/result_manager/models.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
@classmethod
+@field_validator("result")
+def name_must_be_in(cls, v: str) -> str:
+    """
+    Status validator
+
+    Validate status is a supported one
+
+    Args:
+        v (str): User defined status
+
+    Raises:
+        ValueError: If status is unsupported
+
+    Returns:
+        str: status value
+    """
+    if v not in RESULT_OPTIONS:
+        raise ValueError(f"must be one of {RESULT_OPTIONS}")
+    return v
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ ListResult + + +

+ + +
+

+ Bases: RootModel[List[TestResult]]

+ + +

List result for all tests on all devices.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + +
NameTypeDescription
__root__ + List[TestResult] + +
+

A list of TestResult objects.

+
+
+ + + + +
+ + + + + + + + + + +
+ + + +

+ append + + +

+
append(value: TestResult) -> None
+
+ +
+ +

Add support for append method.

+ +
+ Source code in anta/result_manager/models.py +
136
+137
+138
def append(self, value: TestResult) -> None:
+    """Add support for append method."""
+    self.root.append(value)
+
+
+
+ +
+ + +
+ + + +

+ extend + + +

+
extend(values: List[TestResult]) -> None
+
+ +
+ +

Add support for extend method.

+ +
+ Source code in anta/result_manager/models.py +
132
+133
+134
def extend(self, values: List[TestResult]) -> None:
+    """Add support for extend method."""
+    self.root.extend(values)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.aaa/index.html b/0.6.0/api/tests.aaa/index.html new file mode 100644 index 000000000..d09816f67 --- /dev/null +++ b/0.6.0/api/tests.aaa/index.html @@ -0,0 +1,3778 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AAA - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AAA

+ +

ANTA catalog for interfaces tests

+ + +
+ + + +
+ +

Test functions related to the EOS various AAA settings

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyAcctConsoleMethods + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x).

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided AAA accounting console method list is matching in the configured accounting types.
  • +
  • failure: The test will fail if the provided AAA accounting console method list is NOT matching in the configured accounting types.
  • +
  • skipped: The test will be skipped if the AAA accounting console method list or accounting type list are not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
class VerifyAcctConsoleMethods(AntaTest):
+    """
+    Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x).
+
+    Expected Results:
+        * success: The test will pass if the provided AAA accounting console method list is matching in the configured accounting types.
+        * failure: The test will fail if the provided AAA accounting console method list is NOT matching in the configured accounting types.
+        * skipped: The test will be skipped if the AAA accounting console method list or accounting type list are not provided.
+    """
+
+    name = "VerifyAcctConsoleMethods"
+    description = "Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x)."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show aaa methods accounting")]
+
+    @AntaTest.anta_test
+    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyAcctConsoleMethods validation.
+
+        Args:
+            methods: List of AAA accounting console methods. Methods should be in the right order.
+            auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.
+        """
+        if not methods or not auth_types:
+            self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+            return
+
+        methods_with_group = _check_group_methods(methods)
+
+        _check_auth_type(auth_types, ["system", "exec", "commands", "dot1x"])
+
+        command_output = self.instance_commands[0].json_output
+
+        not_matching = []
+        not_configured = []
+
+        for auth_type in auth_types:
+            auth_type_key = f"{auth_type}AcctMethods"
+
+            method_key = list(command_output[auth_type_key].keys())[0]
+
+            if not command_output[auth_type_key][method_key].get("consoleAction"):
+                not_configured.append(auth_type)
+
+            if command_output[auth_type_key][method_key]["consoleMethods"] != methods_with_group:
+                not_matching.append(auth_type)
+
+        if not_configured:
+            self.result.is_failure(f"AAA console accounting is not configured for {not_configured}")
+            return
+
+        if not not_matching:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"AAA accounting console methods {methods} are not matching for {not_matching}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    methods: Optional[List[str]] = None,
+    auth_types: Optional[List[str]] = None,
+) -> None
+
+ +
+ +

Run VerifyAcctConsoleMethods validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
methods + Optional[List[str]] + +
+

List of AAA accounting console methods. Methods should be in the right order.

+
+
+ None +
auth_types + Optional[List[str]] + +
+

List of accounting types to verify. List elements must be: commands, exec, system, dot1x.

+
+
+ None +
+ +
+ Source code in anta/tests/aaa.py +
350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
@AntaTest.anta_test
+def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyAcctConsoleMethods validation.
+
+    Args:
+        methods: List of AAA accounting console methods. Methods should be in the right order.
+        auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.
+    """
+    if not methods or not auth_types:
+        self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+        return
+
+    methods_with_group = _check_group_methods(methods)
+
+    _check_auth_type(auth_types, ["system", "exec", "commands", "dot1x"])
+
+    command_output = self.instance_commands[0].json_output
+
+    not_matching = []
+    not_configured = []
+
+    for auth_type in auth_types:
+        auth_type_key = f"{auth_type}AcctMethods"
+
+        method_key = list(command_output[auth_type_key].keys())[0]
+
+        if not command_output[auth_type_key][method_key].get("consoleAction"):
+            not_configured.append(auth_type)
+
+        if command_output[auth_type_key][method_key]["consoleMethods"] != methods_with_group:
+            not_matching.append(auth_type)
+
+    if not_configured:
+        self.result.is_failure(f"AAA console accounting is not configured for {not_configured}")
+        return
+
+    if not not_matching:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"AAA accounting console methods {methods} are not matching for {not_matching}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyAcctDefaultMethods + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x).

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided AAA accounting default method list is matching in the configured accounting types.
  • +
  • failure: The test will fail if the provided AAA accounting default method list is NOT matching in the configured accounting types.
  • +
  • skipped: The test will be skipped if the AAA accounting default method list or accounting type list are not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
class VerifyAcctDefaultMethods(AntaTest):
+    """
+    Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x).
+
+    Expected Results:
+        * success: The test will pass if the provided AAA accounting default method list is matching in the configured accounting types.
+        * failure: The test will fail if the provided AAA accounting default method list is NOT matching in the configured accounting types.
+        * skipped: The test will be skipped if the AAA accounting default method list or accounting type list are not provided.
+    """
+
+    name = "VerifyAcctDefaultMethods"
+    description = "Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x)."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show aaa methods accounting")]
+
+    @AntaTest.anta_test
+    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyAcctDefaultMethods validation.
+
+        Args:
+            methods: List of AAA accounting default methods. Methods should be in the right order.
+            auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.
+        """
+        if not methods or not auth_types:
+            self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+            return
+
+        methods_with_group = _check_group_methods(methods)
+
+        _check_auth_type(auth_types, ["system", "exec", "commands", "dot1x"])
+
+        command_output = self.instance_commands[0].json_output
+
+        not_matching = []
+        not_configured = []
+
+        for auth_type in auth_types:
+            auth_type_key = f"{auth_type}AcctMethods"
+
+            method_key = list(command_output[auth_type_key].keys())[0]
+
+            if not command_output[auth_type_key][method_key].get("defaultAction"):
+                not_configured.append(auth_type)
+
+            if command_output[auth_type_key][method_key]["defaultMethods"] != methods_with_group:
+                not_matching.append(auth_type)
+
+        if not_configured:
+            self.result.is_failure(f"AAA default accounting is not configured for {not_configured}")
+            return
+
+        if not not_matching:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"AAA accounting default methods {methods} are not matching for {not_matching}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    methods: Optional[List[str]] = None,
+    auth_types: Optional[List[str]] = None,
+) -> None
+
+ +
+ +

Run VerifyAcctDefaultMethods validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
methods + Optional[List[str]] + +
+

List of AAA accounting default methods. Methods should be in the right order.

+
+
+ None +
auth_types + Optional[List[str]] + +
+

List of accounting types to verify. List elements must be: commands, exec, system, dot1x.

+
+
+ None +
+ +
+ Source code in anta/tests/aaa.py +
292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
@AntaTest.anta_test
+def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyAcctDefaultMethods validation.
+
+    Args:
+        methods: List of AAA accounting default methods. Methods should be in the right order.
+        auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.
+    """
+    if not methods or not auth_types:
+        self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+        return
+
+    methods_with_group = _check_group_methods(methods)
+
+    _check_auth_type(auth_types, ["system", "exec", "commands", "dot1x"])
+
+    command_output = self.instance_commands[0].json_output
+
+    not_matching = []
+    not_configured = []
+
+    for auth_type in auth_types:
+        auth_type_key = f"{auth_type}AcctMethods"
+
+        method_key = list(command_output[auth_type_key].keys())[0]
+
+        if not command_output[auth_type_key][method_key].get("defaultAction"):
+            not_configured.append(auth_type)
+
+        if command_output[auth_type_key][method_key]["defaultMethods"] != methods_with_group:
+            not_matching.append(auth_type)
+
+    if not_configured:
+        self.result.is_failure(f"AAA default accounting is not configured for {not_configured}")
+        return
+
+    if not not_matching:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"AAA accounting default methods {methods} are not matching for {not_matching}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyAuthenMethods + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x).

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided AAA authentication method list is matching in the configured authentication types.
  • +
  • failure: The test will fail if the provided AAA authentication method list is NOT matching in the configured authentication types.
  • +
  • skipped: The test will be skipped if the AAA authentication method list or authentication type list are not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
class VerifyAuthenMethods(AntaTest):
+    """
+    Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x).
+
+    Expected Results:
+        * success: The test will pass if the provided AAA authentication method list is matching in the configured authentication types.
+        * failure: The test will fail if the provided AAA authentication method list is NOT matching in the configured authentication types.
+        * skipped: The test will be skipped if the AAA authentication method list or authentication type list are not provided.
+    """
+
+    name = "VerifyAuthenMethods"
+    description = "Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x)."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show aaa methods authentication")]
+
+    @AntaTest.anta_test
+    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyAuthenMethods validation.
+
+        Args:
+            methods: List of AAA authentication methods. Methods should be in the right order.
+            auth_types: List of authentication types to verify. List elements must be: login, enable, dot1x.
+        """
+        if not methods or not auth_types:
+            self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+            return
+
+        methods_with_group = _check_group_methods(methods)
+
+        _check_auth_type(auth_types, ["login", "enable", "dot1x"])
+
+        command_output = self.instance_commands[0].json_output
+
+        not_matching = []
+
+        for auth_type in auth_types:
+            auth_type_key = f"{auth_type}AuthenMethods"
+
+            if auth_type_key == "loginAuthenMethods":
+                if not command_output[auth_type_key].get("login"):
+                    self.result.is_failure("AAA authentication methods are not configured for login console")
+                    return
+
+                if command_output[auth_type_key]["login"]["methods"] != methods_with_group:
+                    self.result.is_failure(f"AAA authentication methods {methods} are not matching for login console")
+                    return
+
+            if command_output[auth_type_key]["default"]["methods"] != methods_with_group:
+                not_matching.append(auth_type)
+
+        if not not_matching:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"AAA authentication methods {methods} are not matching for {not_matching}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    methods: Optional[List[str]] = None,
+    auth_types: Optional[List[str]] = None,
+) -> None
+
+ +
+ +

Run VerifyAuthenMethods validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
methods + Optional[List[str]] + +
+

List of AAA authentication methods. Methods should be in the right order.

+
+
+ None +
auth_types + Optional[List[str]] + +
+

List of authentication types to verify. List elements must be: login, enable, dot1x.

+
+
+ None +
+ +
+ Source code in anta/tests/aaa.py +
185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
@AntaTest.anta_test
+def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyAuthenMethods validation.
+
+    Args:
+        methods: List of AAA authentication methods. Methods should be in the right order.
+        auth_types: List of authentication types to verify. List elements must be: login, enable, dot1x.
+    """
+    if not methods or not auth_types:
+        self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+        return
+
+    methods_with_group = _check_group_methods(methods)
+
+    _check_auth_type(auth_types, ["login", "enable", "dot1x"])
+
+    command_output = self.instance_commands[0].json_output
+
+    not_matching = []
+
+    for auth_type in auth_types:
+        auth_type_key = f"{auth_type}AuthenMethods"
+
+        if auth_type_key == "loginAuthenMethods":
+            if not command_output[auth_type_key].get("login"):
+                self.result.is_failure("AAA authentication methods are not configured for login console")
+                return
+
+            if command_output[auth_type_key]["login"]["methods"] != methods_with_group:
+                self.result.is_failure(f"AAA authentication methods {methods} are not matching for login console")
+                return
+
+        if command_output[auth_type_key]["default"]["methods"] != methods_with_group:
+            not_matching.append(auth_type)
+
+    if not not_matching:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"AAA authentication methods {methods} are not matching for {not_matching}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyAuthzMethods + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the AAA authorization method lists for different authorization types (commands, exec).

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided AAA authorization method list is matching in the configured authorization types.
  • +
  • failure: The test will fail if the provided AAA authorization method list is NOT matching in the configured authorization types.
  • +
  • skipped: The test will be skipped if the AAA authentication method list or authorization type list are not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
class VerifyAuthzMethods(AntaTest):
+    """
+    Verifies the AAA authorization method lists for different authorization types (commands, exec).
+
+    Expected Results:
+        * success: The test will pass if the provided AAA authorization method list is matching in the configured authorization types.
+        * failure: The test will fail if the provided AAA authorization method list is NOT matching in the configured authorization types.
+        * skipped: The test will be skipped if the AAA authentication method list or authorization type list are not provided.
+    """
+
+    name = "VerifyAuthzMethods"
+    description = "Verifies the AAA authorization method lists for different authorization types (commands, exec)."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show aaa methods authorization")]
+
+    @AntaTest.anta_test
+    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyAuthzMethods validation.
+
+        Args:
+            methods: List of AAA authorization methods. Methods should be in the right order.
+            auth_types: List of authorization types to verify. List elements must be: commands, exec.
+        """
+        if not methods or not auth_types:
+            self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+            return
+
+        _check_auth_type(auth_types, ["commands", "exec"])
+
+        methods_with_group = _check_group_methods(methods)
+
+        command_output = self.instance_commands[0].json_output
+
+        not_matching = []
+
+        for auth_type in auth_types:
+            auth_type_key = f"{auth_type}AuthzMethods"
+
+            method_key = list(command_output[auth_type_key].keys())[0]
+
+            if command_output[auth_type_key][method_key]["methods"] != methods_with_group:
+                not_matching.append(auth_type)
+
+        if not not_matching:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"AAA authorization methods {methods} are not matching for {not_matching}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    methods: Optional[List[str]] = None,
+    auth_types: Optional[List[str]] = None,
+) -> None
+
+ +
+ +

Run VerifyAuthzMethods validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
methods + Optional[List[str]] + +
+

List of AAA authorization methods. Methods should be in the right order.

+
+
+ None +
auth_types + Optional[List[str]] + +
+

List of authorization types to verify. List elements must be: commands, exec.

+
+
+ None +
+ +
+ Source code in anta/tests/aaa.py +
242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
@AntaTest.anta_test
+def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyAuthzMethods validation.
+
+    Args:
+        methods: List of AAA authorization methods. Methods should be in the right order.
+        auth_types: List of authorization types to verify. List elements must be: commands, exec.
+    """
+    if not methods or not auth_types:
+        self.result.is_skipped(f"{self.__class__.name} did not run because methods or auth_types were not supplied")
+        return
+
+    _check_auth_type(auth_types, ["commands", "exec"])
+
+    methods_with_group = _check_group_methods(methods)
+
+    command_output = self.instance_commands[0].json_output
+
+    not_matching = []
+
+    for auth_type in auth_types:
+        auth_type_key = f"{auth_type}AuthzMethods"
+
+        method_key = list(command_output[auth_type_key].keys())[0]
+
+        if command_output[auth_type_key][method_key]["methods"] != methods_with_group:
+            not_matching.append(auth_type)
+
+    if not not_matching:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"AAA authorization methods {methods} are not matching for {not_matching}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTacacsServerGroups + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if the provided TACACS server group(s) are configured.

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided TACACS server group(s) are configured.
  • +
  • failure: The test will fail if one or all the provided TACACS server group(s) are NOT configured.
  • +
  • skipped: The test will be skipped if TACACS server group(s) are not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
class VerifyTacacsServerGroups(AntaTest):
+    """
+    Verifies if the provided TACACS server group(s) are configured.
+
+    Expected Results:
+        * success: The test will pass if the provided TACACS server group(s) are configured.
+        * failure: The test will fail if one or all the provided TACACS server group(s) are NOT configured.
+        * skipped: The test will be skipped if TACACS server group(s) are not provided.
+    """
+
+    name = "VerifyTacacsServerGroups"
+    description = "Verifies if the provided TACACS server group(s) are configured."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show tacacs")]
+
+    @AntaTest.anta_test
+    def test(self, groups: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyTacacsServerGroups validation.
+
+        Args:
+            groups: List of TACACS server group.
+        """
+        if not groups:
+            self.result.is_skipped(f"{self.__class__.name} did not run because groups were not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        tacacs_groups = command_output["groups"]
+
+        if not tacacs_groups:
+            self.result.is_failure("No TACACS server group(s) are configured")
+            return
+
+        not_configured = [group for group in groups if group not in tacacs_groups]
+
+        if not not_configured:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"TACACS server group(s) {not_configured} are not configured")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(groups: Optional[List[str]] = None) -> None
+
+ +
+ +

Run VerifyTacacsServerGroups validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
groups + Optional[List[str]] + +
+

List of TACACS server group.

+
+
+ None +
+ +
+ Source code in anta/tests/aaa.py +
142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
@AntaTest.anta_test
+def test(self, groups: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyTacacsServerGroups validation.
+
+    Args:
+        groups: List of TACACS server group.
+    """
+    if not groups:
+        self.result.is_skipped(f"{self.__class__.name} did not run because groups were not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    tacacs_groups = command_output["groups"]
+
+    if not tacacs_groups:
+        self.result.is_failure("No TACACS server group(s) are configured")
+        return
+
+    not_configured = [group for group in groups if group not in tacacs_groups]
+
+    if not not_configured:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"TACACS server group(s) {not_configured} are not configured")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTacacsServers + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies TACACS servers are configured for a specified VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided TACACS servers are configured in the specified VRF.
  • +
  • failure: The test will fail if the provided TACACS servers are NOT configured in the specified VRF.
  • +
  • skipped: The test will be skipped if TACACS servers or VRF are not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
class VerifyTacacsServers(AntaTest):
+    """
+    Verifies TACACS servers are configured for a specified VRF.
+
+    Expected Results:
+        * success: The test will pass if the provided TACACS servers are configured in the specified VRF.
+        * failure: The test will fail if the provided TACACS servers are NOT configured in the specified VRF.
+        * skipped: The test will be skipped if TACACS servers or VRF are not provided.
+    """
+
+    name = "VerifyTacacsServers"
+    description = "Verifies TACACS servers are configured for a specified VRF."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show tacacs")]
+
+    @AntaTest.anta_test
+    def test(self, servers: Optional[List[str]] = None, vrf: str = "default") -> None:
+        """
+        Run VerifyTacacsServers validation.
+
+        Args:
+            servers: List of TACACS servers IP addresses.
+            vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.
+        """
+        if not servers or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because servers or vrf were not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        tacacs_servers = command_output["tacacsServers"]
+
+        if not tacacs_servers:
+            self.result.is_failure("No TACACS servers are configured")
+            return
+
+        not_configured = [
+            server
+            for server in servers
+            if not any(server == tacacs_server["serverInfo"]["hostname"] and vrf == tacacs_server["serverInfo"]["vrf"] for tacacs_server in tacacs_servers)
+        ]
+
+        if not not_configured:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"TACACS servers {not_configured} are not configured in VRF {vrf}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    servers: Optional[List[str]] = None,
+    vrf: str = "default",
+) -> None
+
+ +
+ +

Run VerifyTacacsServers validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
servers + Optional[List[str]] + +
+

List of TACACS servers IP addresses.

+
+
+ None +
vrf + str + +
+

The name of the VRF to transport TACACS messages. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/aaa.py +
 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
@AntaTest.anta_test
+def test(self, servers: Optional[List[str]] = None, vrf: str = "default") -> None:
+    """
+    Run VerifyTacacsServers validation.
+
+    Args:
+        servers: List of TACACS servers IP addresses.
+        vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.
+    """
+    if not servers or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because servers or vrf were not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    tacacs_servers = command_output["tacacsServers"]
+
+    if not tacacs_servers:
+        self.result.is_failure("No TACACS servers are configured")
+        return
+
+    not_configured = [
+        server
+        for server in servers
+        if not any(server == tacacs_server["serverInfo"]["hostname"] and vrf == tacacs_server["serverInfo"]["vrf"] for tacacs_server in tacacs_servers)
+    ]
+
+    if not not_configured:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"TACACS servers {not_configured} are not configured in VRF {vrf}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTacacsSourceIntf + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies TACACS source-interface for a specified VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided TACACS source-interface is configured in the specified VRF.
  • +
  • failure: The test will fail if the provided TACACS source-interface is NOT configured in the specified VRF.
  • +
  • skipped: The test will be skipped if source-interface or VRF is not provided.
  • +
+
+
+ Source code in anta/tests/aaa.py +
39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
class VerifyTacacsSourceIntf(AntaTest):
+    """
+    Verifies TACACS source-interface for a specified VRF.
+
+    Expected Results:
+        * success: The test will pass if the provided TACACS source-interface is configured in the specified VRF.
+        * failure: The test will fail if the provided TACACS source-interface is NOT configured in the specified VRF.
+        * skipped: The test will be skipped if source-interface or VRF is not provided.
+    """
+
+    name = "VerifyTacacsSourceIntf"
+    description = "Verifies TACACS source-interface for a specified VRF."
+    categories = ["aaa"]
+    commands = [AntaCommand(command="show tacacs")]
+
+    @AntaTest.anta_test
+    def test(self, intf: Optional[str] = None, vrf: str = "default") -> None:
+        """
+        Run VerifyTacacsSourceIntf validation.
+
+        Args:
+            intf: Source-interface to use as source IP of TACACS messages.
+            vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.
+        """
+        if not intf or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because intf or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        try:
+            if command_output["srcIntf"][vrf] == intf:
+                self.result.is_success()
+            else:
+                self.result.is_failure(f"Wrong source-interface configured in VRF {vrf}")
+
+        except KeyError:
+            self.result.is_failure(f"Source-interface {intf} is not configured in VRF {vrf}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    intf: Optional[str] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifyTacacsSourceIntf validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
intf + Optional[str] + +
+

Source-interface to use as source IP of TACACS messages.

+
+
+ None +
vrf + str + +
+

The name of the VRF to transport TACACS messages. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/aaa.py +
54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
@AntaTest.anta_test
+def test(self, intf: Optional[str] = None, vrf: str = "default") -> None:
+    """
+    Run VerifyTacacsSourceIntf validation.
+
+    Args:
+        intf: Source-interface to use as source IP of TACACS messages.
+        vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.
+    """
+    if not intf or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because intf or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    try:
+        if command_output["srcIntf"][vrf] == intf:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Wrong source-interface configured in VRF {vrf}")
+
+    except KeyError:
+        self.result.is_failure(f"Source-interface {intf} is not configured in VRF {vrf}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.configuration/index.html b/0.6.0/api/tests.configuration/index.html new file mode 100644 index 000000000..61991cca6 --- /dev/null +++ b/0.6.0/api/tests.configuration/index.html @@ -0,0 +1,1851 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Configuration - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Configuration

+ +

ANTA catalog for configuration tests

+ + +
+ + + +
+ +

Test functions related to the device configuration

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyRunningConfigDiffs + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no difference between the running-config and the startup-config.

+ +
+ Source code in anta/tests/configuration.py +
35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
class VerifyRunningConfigDiffs(AntaTest):
+    """
+    Verifies there is no difference between the running-config and the startup-config.
+    """
+
+    name = "VerifyRunningConfigDiffs"
+    description = ""
+    categories = ["configuration"]
+    commands = [AntaCommand(command="show running-config diffs", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyRunningConfigDiffs validation"""
+        command_output = self.instance_commands[0].output
+        if command_output is None or command_output == "":
+            self.result.is_success()
+        else:
+            self.result.is_failure()
+            self.result.is_failure(str(command_output))
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyRunningConfigDiffs validation

+ +
+ Source code in anta/tests/configuration.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyRunningConfigDiffs validation"""
+    command_output = self.instance_commands[0].output
+    if command_output is None or command_output == "":
+        self.result.is_success()
+    else:
+        self.result.is_failure()
+        self.result.is_failure(str(command_output))
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyZeroTouch + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies ZeroTouch is disabled.

+ +
+ Source code in anta/tests/configuration.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
class VerifyZeroTouch(AntaTest):
+    """
+    Verifies ZeroTouch is disabled.
+    """
+
+    name = "VerifyZeroTouch"
+    description = "Verifies ZeroTouch is disabled."
+    categories = ["configuration"]
+    commands = [AntaCommand(command="show zerotouch")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyZeroTouch validation"""
+
+        command_output = self.instance_commands[0].output
+
+        assert isinstance(command_output, dict)
+        if command_output["mode"] == "disabled":
+            self.result.is_success()
+        else:
+            self.result.is_failure("ZTP is NOT disabled")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyZeroTouch validation

+ +
+ Source code in anta/tests/configuration.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyZeroTouch validation"""
+
+    command_output = self.instance_commands[0].output
+
+    assert isinstance(command_output, dict)
+    if command_output["mode"] == "disabled":
+        self.result.is_success()
+    else:
+        self.result.is_failure("ZTP is NOT disabled")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.connectivity/index.html b/0.6.0/api/tests.connectivity/index.html new file mode 100644 index 000000000..2f8745754 --- /dev/null +++ b/0.6.0/api/tests.connectivity/index.html @@ -0,0 +1,1755 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Connectivity - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Connectivity

+ +

ANTA catalog for connectivity tests

+ + +
+ + + +
+ +

Test functions related to various connectivity checks

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyReachability + + +

+ + +
+

+ Bases: AntaTest

+ + +

Test network reachability to one or many destination IP(s).

+ +
+ Expected Results +
    +
  • success: The test will pass if all destination IP(s) are reachable.
  • +
  • failure: The test will fail if one or many destination IP(s) are unreachable.
  • +
  • error: The test will give an error if the destination IP(s) or the source interface/IP(s) are not provided as template_params.
  • +
+
+
+ Source code in anta/tests/connectivity.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
class VerifyReachability(AntaTest):
+    """
+    Test network reachability to one or many destination IP(s).
+
+    Expected Results:
+        * success: The test will pass if all destination IP(s) are reachable.
+        * failure: The test will fail if one or many destination IP(s) are unreachable.
+        * error: The test will give an error if the destination IP(s) or the source interface/IP(s) are not provided as template_params.
+    """
+
+    name = "VerifyReachability"
+    description = "Test the network reachability to one or many destination IP(s)."
+    categories = ["connectivity"]
+    template = AntaTemplate(template="ping {dst} source {src} repeat 2")
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyReachability validation.
+        """
+
+        failures = []
+
+        for command in self.instance_commands:
+            if command.params and ("src" and "dst") in command.params:
+                src, dst = command.params["src"], command.params["dst"]
+            else:
+                self.result.is_error("The destination IP(s) or the source interface/IP(s) are not provided as template_params")
+                return
+
+            if "2 received" not in command.json_output["messages"][0]:
+                failures.append((src, dst))
+
+        if not failures:
+            self.result.is_success()
+
+        else:
+            self.result.is_failure(f"Connectivity test failed for the following source-destination pairs: {failures}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyReachability validation.

+ +
+ Source code in anta/tests/connectivity.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyReachability validation.
+    """
+
+    failures = []
+
+    for command in self.instance_commands:
+        if command.params and ("src" and "dst") in command.params:
+            src, dst = command.params["src"], command.params["dst"]
+        else:
+            self.result.is_error("The destination IP(s) or the source interface/IP(s) are not provided as template_params")
+            return
+
+        if "2 received" not in command.json_output["messages"][0]:
+            failures.append((src, dst))
+
+    if not failures:
+        self.result.is_success()
+
+    else:
+        self.result.is_failure(f"Connectivity test failed for the following source-destination pairs: {failures}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.field_notices/index.html b/0.6.0/api/tests.field_notices/index.html new file mode 100644 index 000000000..7c60d13d4 --- /dev/null +++ b/0.6.0/api/tests.field_notices/index.html @@ -0,0 +1,2300 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Field Notices - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Field Notices

+ +

ANTA catalog for Field Notices tests

+ + +
+ + + +
+ +

Test functions to flag field notices

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyFieldNotice44Resolution + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device is using an Aboot version that fix the bug discussed +in the field notice 44 (Aboot manages system settings prior to EOS initialization).

+

https://www.arista.com/en/support/advisories-notices/field-notice/8756-field-notice-44

+ +
+ Source code in anta/tests/field_notices.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
class VerifyFieldNotice44Resolution(AntaTest):
+    """
+    Verifies the device is using an Aboot version that fix the bug discussed
+    in the field notice 44 (Aboot manages system settings prior to EOS initialization).
+
+    https://www.arista.com/en/support/advisories-notices/field-notice/8756-field-notice-44
+    """
+
+    name = "VerifyFieldNotice44Resolution"
+    description = (
+        "Verifies the device is using an Aboot version that fix the bug discussed in the field notice 44 (Aboot manages system settings prior to EOS initialization)"
+    )
+    categories = ["field notices", "software"]
+    commands = [AntaCommand(command="show version detail")]
+
+    # TODO maybe implement ONLY ON PLATFORMS instead
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:  # type: ignore[override]
+        """Run VerifyFieldNotice44Resolution validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        devices = [
+            "DCS-7010T-48",
+            "DCS-7010T-48-DC",
+            "DCS-7050TX-48",
+            "DCS-7050TX-64",
+            "DCS-7050TX-72",
+            "DCS-7050TX-72Q",
+            "DCS-7050TX-96",
+            "DCS-7050TX2-128",
+            "DCS-7050SX-64",
+            "DCS-7050SX-72",
+            "DCS-7050SX-72Q",
+            "DCS-7050SX2-72Q",
+            "DCS-7050SX-96",
+            "DCS-7050SX2-128",
+            "DCS-7050QX-32S",
+            "DCS-7050QX2-32S",
+            "DCS-7050SX3-48YC12",
+            "DCS-7050CX3-32S",
+            "DCS-7060CX-32S",
+            "DCS-7060CX2-32S",
+            "DCS-7060SX2-48YC6",
+            "DCS-7160-48YC6",
+            "DCS-7160-48TC6",
+            "DCS-7160-32CQ",
+            "DCS-7280SE-64",
+            "DCS-7280SE-68",
+            "DCS-7280SE-72",
+            "DCS-7150SC-24-CLD",
+            "DCS-7150SC-64-CLD",
+            "DCS-7020TR-48",
+            "DCS-7020TRA-48",
+            "DCS-7020SR-24C2",
+            "DCS-7020SRG-24C2",
+            "DCS-7280TR-48C6",
+            "DCS-7280TRA-48C6",
+            "DCS-7280SR-48C6",
+            "DCS-7280SRA-48C6",
+            "DCS-7280SRAM-48C6",
+            "DCS-7280SR2K-48C6-M",
+            "DCS-7280SR2-48YC6",
+            "DCS-7280SR2A-48YC6",
+            "DCS-7280SRM-40CX2",
+            "DCS-7280QR-C36",
+            "DCS-7280QRA-C36S",
+        ]
+        variants = ["-SSD-F", "-SSD-R", "-M-F", "-M-R", "-F", "-R"]
+
+        model = command_output["modelName"]
+        # TODO this list could be a regex
+        for variant in variants:
+            model = model.replace(variant, "")
+        if model not in devices:
+            self.result.is_skipped("device is not impacted by FN044")
+            return
+
+        for component in command_output["details"]["components"]:
+            if component["name"] == "Aboot":
+                aboot_version = component["version"].split("-")[2]
+        self.result.is_success()
+        if aboot_version.startswith("4.0.") and int(aboot_version.split(".")[2]) < 7:
+            self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+        elif aboot_version.startswith("4.1.") and int(aboot_version.split(".")[2]) < 1:
+            self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+        elif aboot_version.startswith("6.0.") and int(aboot_version.split(".")[2]) < 9:
+            self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+        elif aboot_version.startswith("6.1.") and int(aboot_version.split(".")[2]) < 7:
+            self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyFieldNotice44Resolution validation

+ +
+ Source code in anta/tests/field_notices.py +
25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:  # type: ignore[override]
+    """Run VerifyFieldNotice44Resolution validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    devices = [
+        "DCS-7010T-48",
+        "DCS-7010T-48-DC",
+        "DCS-7050TX-48",
+        "DCS-7050TX-64",
+        "DCS-7050TX-72",
+        "DCS-7050TX-72Q",
+        "DCS-7050TX-96",
+        "DCS-7050TX2-128",
+        "DCS-7050SX-64",
+        "DCS-7050SX-72",
+        "DCS-7050SX-72Q",
+        "DCS-7050SX2-72Q",
+        "DCS-7050SX-96",
+        "DCS-7050SX2-128",
+        "DCS-7050QX-32S",
+        "DCS-7050QX2-32S",
+        "DCS-7050SX3-48YC12",
+        "DCS-7050CX3-32S",
+        "DCS-7060CX-32S",
+        "DCS-7060CX2-32S",
+        "DCS-7060SX2-48YC6",
+        "DCS-7160-48YC6",
+        "DCS-7160-48TC6",
+        "DCS-7160-32CQ",
+        "DCS-7280SE-64",
+        "DCS-7280SE-68",
+        "DCS-7280SE-72",
+        "DCS-7150SC-24-CLD",
+        "DCS-7150SC-64-CLD",
+        "DCS-7020TR-48",
+        "DCS-7020TRA-48",
+        "DCS-7020SR-24C2",
+        "DCS-7020SRG-24C2",
+        "DCS-7280TR-48C6",
+        "DCS-7280TRA-48C6",
+        "DCS-7280SR-48C6",
+        "DCS-7280SRA-48C6",
+        "DCS-7280SRAM-48C6",
+        "DCS-7280SR2K-48C6-M",
+        "DCS-7280SR2-48YC6",
+        "DCS-7280SR2A-48YC6",
+        "DCS-7280SRM-40CX2",
+        "DCS-7280QR-C36",
+        "DCS-7280QRA-C36S",
+    ]
+    variants = ["-SSD-F", "-SSD-R", "-M-F", "-M-R", "-F", "-R"]
+
+    model = command_output["modelName"]
+    # TODO this list could be a regex
+    for variant in variants:
+        model = model.replace(variant, "")
+    if model not in devices:
+        self.result.is_skipped("device is not impacted by FN044")
+        return
+
+    for component in command_output["details"]["components"]:
+        if component["name"] == "Aboot":
+            aboot_version = component["version"].split("-")[2]
+    self.result.is_success()
+    if aboot_version.startswith("4.0.") and int(aboot_version.split(".")[2]) < 7:
+        self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+    elif aboot_version.startswith("4.1.") and int(aboot_version.split(".")[2]) < 1:
+        self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+    elif aboot_version.startswith("6.0.") and int(aboot_version.split(".")[2]) < 9:
+        self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+    elif aboot_version.startswith("6.1.") and int(aboot_version.split(".")[2]) < 7:
+        self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyFieldNotice72Resolution + + +

+ + +
+

+ Bases: AntaTest

+ + +

Checks if the device is potentially exposed to Field Notice 72, and if the issue has been mitigated.

+

https://www.arista.com/en/support/advisories-notices/field-notice/17410-field-notice-0072

+ +
+ Source code in anta/tests/field_notices.py +
102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
class VerifyFieldNotice72Resolution(AntaTest):
+    """
+    Checks if the device is potentially exposed to Field Notice 72, and if the issue has been mitigated.
+
+    https://www.arista.com/en/support/advisories-notices/field-notice/17410-field-notice-0072
+    """
+
+    name = "VerifyFieldNotice72Resolution"
+    description = "Verifies if the device has exposeure to FN72, and if the issue has been mitigated"
+    categories = ["field notices", "software"]
+    commands = [AntaCommand(command="show version detail")]
+
+    # TODO maybe implement ONLY ON PLATFORMS instead
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:  # type: ignore[override]
+        """Run VerifyFieldNotice72Resolution validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        devices = ["DCS-7280SR3-48YC8", "DCS-7280SR3K-48YC8"]
+        variants = ["-SSD-F", "-SSD-R", "-M-F", "-M-R", "-F", "-R"]
+        model = command_output["modelName"]
+
+        for variant in variants:
+            model = model.replace(variant, "")
+        if model not in devices:
+            self.result.is_skipped("Platform is not impacted by FN072")
+            return
+
+        serial = command_output["serialNumber"]
+        number = int(serial[3:7])
+
+        if "JPE" not in serial and "JAS" not in serial:
+            self.result.is_skipped("Device not exposed")
+            return
+
+        if model == "DCS-7280SR3-48YC8" and "JPE" in serial and number >= 2131:
+            self.result.is_skipped("Device not exposed")
+            return
+
+        if model == "DCS-7280SR3-48YC8" and "JAS" in serial and number >= 2041:
+            self.result.is_skipped("Device not exposed")
+            return
+
+        if model == "DCS-7280SR3K-48YC8" and "JPE" in serial and number >= 2134:
+            self.result.is_skipped("Device not exposed")
+            return
+
+        if model == "DCS-7280SR3K-48YC8" and "JAS" in serial and number >= 2041:
+            self.result.is_skipped("Device not exposed")
+            return
+
+        # Because each of the if checks above will return if taken, we only run the long
+        # check if we get this far
+        for entry in command_output["details"]["components"]:
+            if entry["name"] == "FixedSystemvrm1":
+                if int(entry["version"]) < 7:
+                    self.result.is_failure("Device is exposed to FN72")
+                else:
+                    self.result.is_success("FN72 is mitigated")
+                return
+        # We should never hit this point
+        self.result.is_error("Error in running test - FixedSystemvrm1 not found")
+        return
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyFieldNotice72Resolution validation

+ +
+ Source code in anta/tests/field_notices.py +
115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:  # type: ignore[override]
+    """Run VerifyFieldNotice72Resolution validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    devices = ["DCS-7280SR3-48YC8", "DCS-7280SR3K-48YC8"]
+    variants = ["-SSD-F", "-SSD-R", "-M-F", "-M-R", "-F", "-R"]
+    model = command_output["modelName"]
+
+    for variant in variants:
+        model = model.replace(variant, "")
+    if model not in devices:
+        self.result.is_skipped("Platform is not impacted by FN072")
+        return
+
+    serial = command_output["serialNumber"]
+    number = int(serial[3:7])
+
+    if "JPE" not in serial and "JAS" not in serial:
+        self.result.is_skipped("Device not exposed")
+        return
+
+    if model == "DCS-7280SR3-48YC8" and "JPE" in serial and number >= 2131:
+        self.result.is_skipped("Device not exposed")
+        return
+
+    if model == "DCS-7280SR3-48YC8" and "JAS" in serial and number >= 2041:
+        self.result.is_skipped("Device not exposed")
+        return
+
+    if model == "DCS-7280SR3K-48YC8" and "JPE" in serial and number >= 2134:
+        self.result.is_skipped("Device not exposed")
+        return
+
+    if model == "DCS-7280SR3K-48YC8" and "JAS" in serial and number >= 2041:
+        self.result.is_skipped("Device not exposed")
+        return
+
+    # Because each of the if checks above will return if taken, we only run the long
+    # check if we get this far
+    for entry in command_output["details"]["components"]:
+        if entry["name"] == "FixedSystemvrm1":
+            if int(entry["version"]) < 7:
+                self.result.is_failure("Device is exposed to FN72")
+            else:
+                self.result.is_success("FN72 is mitigated")
+            return
+    # We should never hit this point
+    self.result.is_error("Error in running test - FixedSystemvrm1 not found")
+    return
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.hardware/index.html b/0.6.0/api/tests.hardware/index.html new file mode 100644 index 000000000..7378b2509 --- /dev/null +++ b/0.6.0/api/tests.hardware/index.html @@ -0,0 +1,2973 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Hardware - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Hardware

+ +

ANTA catalog for hardware tests

+ + +
+ + + +
+ +

Test functions related to the hardware or environement

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyAdverseDrops + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no adverse drops on DCS7280E and DCS7500E.

+ +
+ Source code in anta/tests/hardware.py +
196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
class VerifyAdverseDrops(AntaTest):
+    """
+    Verifies there is no adverse drops on DCS7280E and DCS7500E.
+    """
+
+    name = "VerifyAdverseDrops"
+    description = "Verifies there is no adverse drops on DCS7280E and DCS7500E"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show hardware counter drop", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyAdverseDrops validation"""
+        command_output = self.instance_commands[0].json_output
+        total_adverse_drop = command_output["totalAdverseDrops"] if "totalAdverseDrops" in command_output.keys() else ""
+        if total_adverse_drop == 0:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Device TotalAdverseDrops counter is {total_adverse_drop}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyAdverseDrops validation

+ +
+ Source code in anta/tests/hardware.py +
206
+207
+208
+209
+210
+211
+212
+213
+214
+215
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyAdverseDrops validation"""
+    command_output = self.instance_commands[0].json_output
+    total_adverse_drop = command_output["totalAdverseDrops"] if "totalAdverseDrops" in command_output.keys() else ""
+    if total_adverse_drop == 0:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Device TotalAdverseDrops counter is {total_adverse_drop}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyEnvironmentCooling + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the fans status is in the accepted states list.

+

Default accepted states list is [‘ok’]

+ +
+ Source code in anta/tests/hardware.py +
119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
class VerifyEnvironmentCooling(AntaTest):
+    """
+    Verifies the fans status is in the accepted states list.
+
+    Default accepted states list is ['ok']
+    """
+
+    name = "VerifyEnvironmentCooling"
+    description = "Verifies the fans status is OK for fans"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show system environment cooling", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self, accepted_states: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyEnvironmentCooling validation
+
+        Args:
+            accepted_states: Accepted states list for fan status
+        """
+        if accepted_states is None:
+            accepted_states = ["ok"]
+
+        command_output = self.instance_commands[0].json_output
+        self.result.is_success()
+        # First go through power supplies fans
+        for power_supply in command_output.get("powerSupplySlots", []):
+            for fan in power_supply.get("fans", []):
+                if (state := fan["status"]) not in accepted_states:
+                    if self.result.result == "success":
+                        self.result.is_failure(f"Some fans state are not in the accepted list: {accepted_states}.")
+                    self.result.is_failure(f"Fan {fan['label']} on PowerSupply {power_supply['label']} has state '{state}'.")
+        # Then go through Fan Trays
+        for fan_tray in command_output.get("fanTraySlots", []):
+            for fan in fan_tray.get("fans", []):
+                if (state := fan["status"]) not in accepted_states:
+                    if self.result.result == "success":
+                        self.result.is_failure(f"Some fans state are not in the accepted list: {accepted_states}.")
+                    self.result.is_failure(f"Fan {fan['label']} on Fan Tray {fan_tray['label']} has state '{state}'.")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(accepted_states: Optional[List[str]] = None) -> None
+
+ +
+ +

Run VerifyEnvironmentCooling validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
accepted_states + Optional[List[str]] + +
+

Accepted states list for fan status

+
+
+ None +
+ +
+ Source code in anta/tests/hardware.py +
131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self, accepted_states: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyEnvironmentCooling validation
+
+    Args:
+        accepted_states: Accepted states list for fan status
+    """
+    if accepted_states is None:
+        accepted_states = ["ok"]
+
+    command_output = self.instance_commands[0].json_output
+    self.result.is_success()
+    # First go through power supplies fans
+    for power_supply in command_output.get("powerSupplySlots", []):
+        for fan in power_supply.get("fans", []):
+            if (state := fan["status"]) not in accepted_states:
+                if self.result.result == "success":
+                    self.result.is_failure(f"Some fans state are not in the accepted list: {accepted_states}.")
+                self.result.is_failure(f"Fan {fan['label']} on PowerSupply {power_supply['label']} has state '{state}'.")
+    # Then go through Fan Trays
+    for fan_tray in command_output.get("fanTraySlots", []):
+        for fan in fan_tray.get("fans", []):
+            if (state := fan["status"]) not in accepted_states:
+                if self.result.result == "success":
+                    self.result.is_failure(f"Some fans state are not in the accepted list: {accepted_states}.")
+                self.result.is_failure(f"Fan {fan['label']} on Fan Tray {fan_tray['label']} has state '{state}'.")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyEnvironmentPower + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the power supplies status is in the accepted states list

+

The default accepted states list is [‘ok’]

+ +
+ Source code in anta/tests/hardware.py +
161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
class VerifyEnvironmentPower(AntaTest):
+    """
+    Verifies the power supplies status is in the accepted states list
+
+    The default accepted states list is ['ok']
+    """
+
+    name = "VerifyEnvironmentPower"
+    description = "Verifies the power supplies status is OK"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show system environment power", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self, accepted_states: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyEnvironmentPower validation
+
+        Args:
+            accepted_states: Accepted states list for power supplies
+        """
+        if accepted_states is None:
+            accepted_states = ["ok"]
+        command_output = self.instance_commands[0].json_output
+        power_supplies = command_output["powerSupplies"] if "powerSupplies" in command_output.keys() else "{}"
+        wrong_power_supplies = {
+            powersupply: {"state": value["state"]} for powersupply, value in dict(power_supplies).items() if value["state"] not in accepted_states
+        }
+        if not wrong_power_supplies:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following power supplies states are not in the accepted_states list {accepted_states}")
+            self.result.messages.append(str(wrong_power_supplies))
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(accepted_states: Optional[List[str]] = None) -> None
+
+ +
+ +

Run VerifyEnvironmentPower validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
accepted_states + Optional[List[str]] + +
+

Accepted states list for power supplies

+
+
+ None +
+ +
+ Source code in anta/tests/hardware.py +
173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self, accepted_states: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyEnvironmentPower validation
+
+    Args:
+        accepted_states: Accepted states list for power supplies
+    """
+    if accepted_states is None:
+        accepted_states = ["ok"]
+    command_output = self.instance_commands[0].json_output
+    power_supplies = command_output["powerSupplies"] if "powerSupplies" in command_output.keys() else "{}"
+    wrong_power_supplies = {
+        powersupply: {"state": value["state"]} for powersupply, value in dict(power_supplies).items() if value["state"] not in accepted_states
+    }
+    if not wrong_power_supplies:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following power supplies states are not in the accepted_states list {accepted_states}")
+        self.result.messages.append(str(wrong_power_supplies))
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyEnvironmentSystemCooling + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the System Cooling is ok.

+ +
+ Source code in anta/tests/hardware.py +
 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
class VerifyEnvironmentSystemCooling(AntaTest):
+    """
+    Verifies the System Cooling is ok.
+    """
+
+    name = "VerifyEnvironmentSystemCooling"
+    description = "Verifies the fans status is OK for fans"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show system environment cooling", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyEnvironmentCooling validation"""
+
+        command_output = self.instance_commands[0].json_output
+        sys_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
+
+        self.result.is_success()
+        if sys_status != "coolingOk":
+            self.result.is_failure(f"Device System cooling is not OK: {sys_status}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyEnvironmentCooling validation

+ +
+ Source code in anta/tests/hardware.py +
106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyEnvironmentCooling validation"""
+
+    command_output = self.instance_commands[0].json_output
+    sys_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
+
+    self.result.is_success()
+    if sys_status != "coolingOk":
+        self.result.is_failure(f"Device System cooling is not OK: {sys_status}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTemperature + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies device temparture is currently OK (temperatureOK).

+ +
+ Source code in anta/tests/hardware.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
class VerifyTemperature(AntaTest):
+    """
+    Verifies device temparture is currently OK (temperatureOK).
+    """
+
+    name = "VerifyTemperature"
+    description = "Verifies device temparture is currently OK (temperatureOK)"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyTemperature validation"""
+        command_output = self.instance_commands[0].json_output
+        temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
+        if temperature_status == "temperatureOk":
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyTemperature validation

+ +
+ Source code in anta/tests/hardware.py +
53
+54
+55
+56
+57
+58
+59
+60
+61
+62
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyTemperature validation"""
+    command_output = self.instance_commands[0].json_output
+    temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
+    if temperature_status == "temperatureOk":
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTransceiversManufacturers + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies Manufacturers of all Transceivers.

+ +
+ Source code in anta/tests/hardware.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class VerifyTransceiversManufacturers(AntaTest):
+    """
+    Verifies Manufacturers of all Transceivers.
+    """
+
+    name = "VerifyTransceiversManufacturers"
+    description = ""
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show inventory", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self, manufacturers: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyTransceiversManufacturers validation
+
+        Args:
+            manufacturers: List of allowed transceivers manufacturers.
+        """
+        if not manufacturers:
+            self.result.is_skipped(f"{self.__class__.name} was not run as no manufacturers were given")
+        else:
+            command_output = self.instance_commands[0].json_output
+            wrong_manufacturers = {interface: value["mfgName"] for interface, value in command_output["xcvrSlots"].items() if value["mfgName"] not in manufacturers}
+            if not wrong_manufacturers:
+                self.result.is_success()
+            else:
+                self.result.is_failure("The following interfaces have transceivers from unauthorized manufacturers")
+                self.result.messages.append(str(wrong_manufacturers))
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(manufacturers: Optional[List[str]] = None) -> None
+
+ +
+ +

Run VerifyTransceiversManufacturers validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
manufacturers + Optional[List[str]] + +
+

List of allowed transceivers manufacturers.

+
+
+ None +
+ +
+ Source code in anta/tests/hardware.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self, manufacturers: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyTransceiversManufacturers validation
+
+    Args:
+        manufacturers: List of allowed transceivers manufacturers.
+    """
+    if not manufacturers:
+        self.result.is_skipped(f"{self.__class__.name} was not run as no manufacturers were given")
+    else:
+        command_output = self.instance_commands[0].json_output
+        wrong_manufacturers = {interface: value["mfgName"] for interface, value in command_output["xcvrSlots"].items() if value["mfgName"] not in manufacturers}
+        if not wrong_manufacturers:
+            self.result.is_success()
+        else:
+            self.result.is_failure("The following interfaces have transceivers from unauthorized manufacturers")
+            self.result.messages.append(str(wrong_manufacturers))
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTransceiversTemperature + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies Transceivers temperature is currently OK.

+ +
+ Source code in anta/tests/hardware.py +
65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
class VerifyTransceiversTemperature(AntaTest):
+    """
+    Verifies Transceivers temperature is currently OK.
+    """
+
+    name = "VerifyTransceiversTemperature"
+    description = "Verifies Transceivers temperature is currently OK"
+    categories = ["hardware"]
+    commands = [AntaCommand(command="show system environment temperature transceiver", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyTransceiversTemperature validation"""
+        command_output = self.instance_commands[0].json_output
+        sensors = command_output["tempSensors"] if "tempSensors" in command_output.keys() else ""
+        wrong_sensors = {
+            sensor["name"]: {
+                "hwStatus": sensor["hwStatus"],
+                "alertCount": sensor["alertCount"],
+            }
+            for sensor in sensors
+            if sensor["hwStatus"] != "ok" or sensor["alertCount"] != 0
+        }
+        if not wrong_sensors:
+            self.result.is_success()
+        else:
+            self.result.is_failure("The following sensors do not have the correct temperature or had alarms in the past:")
+            self.result.messages.append(str(wrong_sensors))
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyTransceiversTemperature validation

+ +
+ Source code in anta/tests/hardware.py +
75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyTransceiversTemperature validation"""
+    command_output = self.instance_commands[0].json_output
+    sensors = command_output["tempSensors"] if "tempSensors" in command_output.keys() else ""
+    wrong_sensors = {
+        sensor["name"]: {
+            "hwStatus": sensor["hwStatus"],
+            "alertCount": sensor["alertCount"],
+        }
+        for sensor in sensors
+        if sensor["hwStatus"] != "ok" or sensor["alertCount"] != 0
+    }
+    if not wrong_sensors:
+        self.result.is_success()
+    else:
+        self.result.is_failure("The following sensors do not have the correct temperature or had alarms in the past:")
+        self.result.messages.append(str(wrong_sensors))
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.interfaces/index.html b/0.6.0/api/tests.interfaces/index.html new file mode 100644 index 000000000..80bc2f7c0 --- /dev/null +++ b/0.6.0/api/tests.interfaces/index.html @@ -0,0 +1,4135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Interfaces - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Interfaces

+ +

ANTA catalog for interfaces tests

+ + +
+ + + +
+ +

Test functions related to the device interfaces

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyIPProxyARP + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if Proxy-ARP is enabled for the provided list of interface(s).

+ +
+ Expected Results +
    +
  • success: The test will pass if Proxy-ARP is enabled on the specified interface(s).
  • +
  • failure: The test will fail if Proxy-ARP is disabled on the specified interface(s).
  • +
  • error: The test will give an error if a list of interface(s) is not provided as template_params.
  • +
+
+
+ Source code in anta/tests/interfaces.py +
378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
class VerifyIPProxyARP(AntaTest):
+    """
+    Verifies if Proxy-ARP is enabled for the provided list of interface(s).
+
+    Expected Results:
+        * success: The test will pass if Proxy-ARP is enabled on the specified interface(s).
+        * failure: The test will fail if Proxy-ARP is disabled on the specified interface(s).
+        * error: The test will give an error if a list of interface(s) is not provided as template_params.
+
+    """
+
+    name = "VerifyIPProxyARP"
+    description = "Verifies if Proxy-ARP is enabled for the provided list of interface(s)."
+    categories = ["interfaces"]
+    template = AntaTemplate(template="show ip interface {intf}")
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyIPProxyARP validation.
+        """
+
+        disabled_intf = []
+        for command in self.instance_commands:
+            if command.params and "intf" in command.params:
+                intf = command.params["intf"]
+            if not command.json_output["interfaces"][intf]["proxyArp"]:
+                disabled_intf.append(intf)
+
+        if disabled_intf:
+            self.result.is_failure(f"The following interface(s) have Proxy-ARP disabled: {disabled_intf}")
+
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyIPProxyARP validation.

+ +
+ Source code in anta/tests/interfaces.py +
394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyIPProxyARP validation.
+    """
+
+    disabled_intf = []
+    for command in self.instance_commands:
+        if command.params and "intf" in command.params:
+            intf = command.params["intf"]
+        if not command.json_output["interfaces"][intf]["proxyArp"]:
+            disabled_intf.append(intf)
+
+    if disabled_intf:
+        self.result.is_failure(f"The following interface(s) have Proxy-ARP disabled: {disabled_intf}")
+
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyIllegalLACP + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no illegal LACP packets received.

+ +
+ Source code in anta/tests/interfaces.py +
221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
class VerifyIllegalLACP(AntaTest):
+    """
+    Verifies there is no illegal LACP packets received.
+    """
+
+    name = "VerifyIllegalLACP"
+    description = "Verifies there is no illegal LACP packets received."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show lacp counters all-ports")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyIllegalLACP validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        po_with_illegal_lacp: List[Dict[str, Dict[str, int]]] = []
+        for portchannel, portchannel_dict in command_output["portChannels"].items():
+            po_with_illegal_lacp.extend(
+                {portchannel: interface} for interface, interface_dict in portchannel_dict["interfaces"].items() if interface_dict["illegalRxCount"] != 0
+            )
+
+        if not po_with_illegal_lacp:
+            self.result.is_success()
+        else:
+            self.result.is_failure("The following port-channels have recieved illegal lacp packets on the " f"following ports: {po_with_illegal_lacp}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyIllegalLACP validation

+ +
+ Source code in anta/tests/interfaces.py +
231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyIllegalLACP validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    po_with_illegal_lacp: List[Dict[str, Dict[str, int]]] = []
+    for portchannel, portchannel_dict in command_output["portChannels"].items():
+        po_with_illegal_lacp.extend(
+            {portchannel: interface} for interface, interface_dict in portchannel_dict["interfaces"].items() if interface_dict["illegalRxCount"] != 0
+        )
+
+    if not po_with_illegal_lacp:
+        self.result.is_success()
+    else:
+        self.result.is_failure("The following port-channels have recieved illegal lacp packets on the " f"following ports: {po_with_illegal_lacp}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyInterfaceDiscards + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies interfaces packet discard counters are equal to zero.

+ +
+ Source code in anta/tests/interfaces.py +
70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
class VerifyInterfaceDiscards(AntaTest):
+    """
+    Verifies interfaces packet discard counters are equal to zero.
+    """
+
+    name = "VerifyInterfaceDiscards"
+    description = "Verifies interfaces packet discard counters are equal to zero."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show interfaces counters discards")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyInterfaceDiscards validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        wrong_interfaces: List[Dict[str, Dict[str, int]]] = []
+
+        for interface, outer_v in command_output["interfaces"].items():
+            wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)
+        if not wrong_interfaces:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following interfaces have non 0 discard counter(s): {wrong_interfaces}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyInterfaceDiscards validation

+ +
+ Source code in anta/tests/interfaces.py +
80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyInterfaceDiscards validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    wrong_interfaces: List[Dict[str, Dict[str, int]]] = []
+
+    for interface, outer_v in command_output["interfaces"].items():
+        wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)
+    if not wrong_interfaces:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following interfaces have non 0 discard counter(s): {wrong_interfaces}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyInterfaceErrDisabled + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no interface in error disable state.

+ +
+ Source code in anta/tests/interfaces.py +
 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
class VerifyInterfaceErrDisabled(AntaTest):
+    """
+    Verifies there is no interface in error disable state.
+    """
+
+    name = "VerifyInterfaceErrDisabled"
+    description = "Verifies there is no interface in error disable state."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show interfaces status")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyInterfaceErrDisabled validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        errdisabled_interfaces = [interface for interface, value in command_output["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"]
+
+        if errdisabled_interfaces:
+            self.result.is_failure(f"The following interfaces are in error disabled state: {errdisabled_interfaces}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyInterfaceErrDisabled validation

+ +
+ Source code in anta/tests/interfaces.py +
106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyInterfaceErrDisabled validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    errdisabled_interfaces = [interface for interface, value in command_output["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"]
+
+    if errdisabled_interfaces:
+        self.result.is_failure(f"The following interfaces are in error disabled state: {errdisabled_interfaces}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyInterfaceErrors + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies interfaces error counters are equal to zero.

+ +
+ Source code in anta/tests/interfaces.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
class VerifyInterfaceErrors(AntaTest):
+    """
+    Verifies interfaces error counters are equal to zero.
+    """
+
+    name = "VerifyInterfaceErrors"
+    description = "Verifies interfaces error counters are equal to zero."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show interfaces counters errors")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyInterfaceUtilization validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        wrong_interfaces: List[Dict[str, Dict[str, int]]] = []
+        for interface, outer_v in command_output["interfaceErrorCounters"].items():
+            wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)
+        if not wrong_interfaces:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following interfaces have non 0 error counter(s): {wrong_interfaces}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyInterfaceUtilization validation

+ +
+ Source code in anta/tests/interfaces.py +
55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyInterfaceUtilization validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    wrong_interfaces: List[Dict[str, Dict[str, int]]] = []
+    for interface, outer_v in command_output["interfaceErrorCounters"].items():
+        wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)
+    if not wrong_interfaces:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following interfaces have non 0 error counter(s): {wrong_interfaces}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyInterfaceUtilization + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies interfaces utilization is below 75%.

+ +
+ Source code in anta/tests/interfaces.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
class VerifyInterfaceUtilization(AntaTest):
+    """
+    Verifies interfaces utilization is below 75%.
+    """
+
+    name = "VerifyInterfaceUtilization"
+    description = "Verifies interfaces utilization is below 75%."
+    categories = ["interfaces"]
+    # TODO - move from text to json if possible
+    commands = [AntaCommand(command="show interfaces counters rates", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyInterfaceUtilization validation"""
+
+        command_output = self.instance_commands[0].text_output
+
+        wrong_interfaces = {}
+        for line in command_output.split("\n")[1:]:
+            if len(line) > 0:
+                if line.split()[-5] == "-" or line.split()[-2] == "-":
+                    pass
+                elif float(line.split()[-5].replace("%", "")) > 75.0:
+                    wrong_interfaces[line.split()[0]] = line.split()[-5]
+                elif float(line.split()[-2].replace("%", "")) > 75.0:
+                    wrong_interfaces[line.split()[0]] = line.split()[-2]
+
+        if not wrong_interfaces:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following interfaces have a usage > 75%: {wrong_interfaces}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyInterfaceUtilization validation

+ +
+ Source code in anta/tests/interfaces.py +
23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyInterfaceUtilization validation"""
+
+    command_output = self.instance_commands[0].text_output
+
+    wrong_interfaces = {}
+    for line in command_output.split("\n")[1:]:
+        if len(line) > 0:
+            if line.split()[-5] == "-" or line.split()[-2] == "-":
+                pass
+            elif float(line.split()[-5].replace("%", "")) > 75.0:
+                wrong_interfaces[line.split()[0]] = line.split()[-5]
+            elif float(line.split()[-2].replace("%", "")) > 75.0:
+                wrong_interfaces[line.split()[0]] = line.split()[-2]
+
+    if not wrong_interfaces:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following interfaces have a usage > 75%: {wrong_interfaces}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyInterfacesStatus + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the number of Ethernet interfaces up/up on the device is higher or equal than a value.

+ +
+ Source code in anta/tests/interfaces.py +
120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
class VerifyInterfacesStatus(AntaTest):
+    """
+    Verifies the number of Ethernet interfaces up/up on the device is higher or equal than a value.
+    """
+
+    name = "VerifyInterfacesStatus"
+    description = "Verifies the number of Ethernet interfaces up/up on the device is higher or equal than a value."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show interfaces description")]
+
+    @AntaTest.anta_test
+    def test(self, minimum: Optional[int] = None) -> None:
+        """
+        Run VerifyInterfacesStatus validation
+
+        Args:
+            minimum: Expected minimum number of Ethernet interfaces up/up.
+        """
+
+        if minimum is None or minimum < 0:
+            self.result.is_skipped(f"VerifyInterfacesStatus was not run as an invalid minimum value was given {minimum}.")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        count_up_up = 0
+        other_ethernet_interfaces = []
+
+        for interface in command_output["interfaceDescriptions"]:
+            interface_dict = command_output["interfaceDescriptions"][interface]
+            if "Ethernet" in interface:
+                if re.match(r"connected|up", interface_dict["lineProtocolStatus"]) and re.match(r"connected|up", interface_dict["interfaceStatus"]):
+                    count_up_up += 1
+                else:
+                    other_ethernet_interfaces.append(interface)
+
+        if count_up_up >= minimum:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Only {count_up_up}, less than {minimum} Ethernet interfaces are UP/UP")
+            self.result.messages.append(f"The following Ethernet interfaces are not UP/UP: {other_ethernet_interfaces}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(minimum: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyInterfacesStatus validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
minimum + Optional[int] + +
+

Expected minimum number of Ethernet interfaces up/up.

+
+
+ None +
+ +
+ Source code in anta/tests/interfaces.py +
130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
@AntaTest.anta_test
+def test(self, minimum: Optional[int] = None) -> None:
+    """
+    Run VerifyInterfacesStatus validation
+
+    Args:
+        minimum: Expected minimum number of Ethernet interfaces up/up.
+    """
+
+    if minimum is None or minimum < 0:
+        self.result.is_skipped(f"VerifyInterfacesStatus was not run as an invalid minimum value was given {minimum}.")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    count_up_up = 0
+    other_ethernet_interfaces = []
+
+    for interface in command_output["interfaceDescriptions"]:
+        interface_dict = command_output["interfaceDescriptions"][interface]
+        if "Ethernet" in interface:
+            if re.match(r"connected|up", interface_dict["lineProtocolStatus"]) and re.match(r"connected|up", interface_dict["interfaceStatus"]):
+                count_up_up += 1
+            else:
+                other_ethernet_interfaces.append(interface)
+
+    if count_up_up >= minimum:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Only {count_up_up}, less than {minimum} Ethernet interfaces are UP/UP")
+        self.result.messages.append(f"The following Ethernet interfaces are not UP/UP: {other_ethernet_interfaces}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyL3MTU + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the global layer 3 Maximum Transfer Unit (MTU) for all layer 3 interfaces.

+ +
+ Expected Results +
    +
  • success: The test will pass if all layer 3 interfaces have the proper MTU configured.
  • +
  • failure: The test will fail if one or many layer 3 interfaces have the wrong MTU configured.
  • +
  • skipped: The test will be skipped if the MTU value is not provided.
  • +
+
+
+ Limitations +
    +
  • Only Ethernet, Port-Channel, Vlan interfaces are supported.
  • +
  • Other interface types, like Management, Loopback, Vxlan, Tunnel are currently not supported.
  • +
+

https://www.arista.com/en/support/toi/eos-4-23-1f/14388-global-knob-to-set-mtu-for-all-layer-3-interfaces

+ +
+ Source code in anta/tests/interfaces.py +
324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
class VerifyL3MTU(AntaTest):
+    """
+    Verifies the global layer 3 Maximum Transfer Unit (MTU) for all layer 3 interfaces.
+
+    Expected Results:
+        * success: The test will pass if all layer 3 interfaces have the proper MTU configured.
+        * failure: The test will fail if one or many layer 3 interfaces have the wrong MTU configured.
+        * skipped: The test will be skipped if the MTU value is not provided.
+
+    Limitations:
+        * Only Ethernet, Port-Channel, Vlan interfaces are supported.
+        * Other interface types, like Management, Loopback, Vxlan, Tunnel are currently not supported.
+
+    https://www.arista.com/en/support/toi/eos-4-23-1f/14388-global-knob-to-set-mtu-for-all-layer-3-interfaces
+
+    """
+
+    name = "VerifyL3MTU"
+    description = "Verifies the global layer 3 Maximum Transfer Unit (MTU) for all layer 3 interfaces."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show interfaces")]
+
+    NOT_SUPPORTED_INTERFACES: List[str] = ["Management", "Loopback", "Vxlan", "Tunnel"]
+
+    @AntaTest.anta_test
+    def test(self, mtu: int = 1500) -> None:
+        """
+        Run VerifyL3MTU validation
+
+        Args:
+          mtu: Layer 3 MTU to verify. Defaults to 1500.
+
+        """
+
+        if not mtu:
+            self.result.is_skipped(f"{self.__class__.name} did not run because mtu was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        wrong_l3mtu_intf = []
+
+        for interface, values in command_output["interfaces"].items():
+            if re.sub(r"\d+$", "", interface) not in self.NOT_SUPPORTED_INTERFACES:
+                if values["forwardingModel"] == "routed" and values["mtu"] != mtu:
+                    wrong_l3mtu_intf.append(interface)
+
+        if not wrong_l3mtu_intf:
+            self.result.is_success()
+
+        else:
+            self.result.is_failure(f"The following interface(s) have the wrong MTU configured: {wrong_l3mtu_intf}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(mtu: int = 1500) -> None
+
+ +
+ +

Run VerifyL3MTU validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
mtu + int + +
+

Layer 3 MTU to verify. Defaults to 1500.

+
+
+ 1500 +
+ +
+ Source code in anta/tests/interfaces.py +
348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
@AntaTest.anta_test
+def test(self, mtu: int = 1500) -> None:
+    """
+    Run VerifyL3MTU validation
+
+    Args:
+      mtu: Layer 3 MTU to verify. Defaults to 1500.
+
+    """
+
+    if not mtu:
+        self.result.is_skipped(f"{self.__class__.name} did not run because mtu was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    wrong_l3mtu_intf = []
+
+    for interface, values in command_output["interfaces"].items():
+        if re.sub(r"\d+$", "", interface) not in self.NOT_SUPPORTED_INTERFACES:
+            if values["forwardingModel"] == "routed" and values["mtu"] != mtu:
+                wrong_l3mtu_intf.append(interface)
+
+    if not wrong_l3mtu_intf:
+        self.result.is_success()
+
+    else:
+        self.result.is_failure(f"The following interface(s) have the wrong MTU configured: {wrong_l3mtu_intf}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoopbackCount + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the number of loopback interfaces on the device is the one we expect and if none of the loopback is down.

+ +
+ Source code in anta/tests/interfaces.py +
249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
class VerifyLoopbackCount(AntaTest):
+    """
+    Verifies the number of loopback interfaces on the device is the one we expect and if none of the loopback is down.
+    """
+
+    name = "VerifyLoopbackCount"
+    description = "Verifies the number of loopback interfaces on the device is the one we expect and if none of the loopback is down."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show ip interface brief")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None) -> None:
+        """
+        Run VerifyLoopbackCount validation
+
+        Args:
+            number: Number of loopback interfaces expected to be present.
+        """
+
+        if number is None:
+            self.result.is_skipped("VerifyLoopbackCount was not run as no number value was given.")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        loopback_count = 0
+        down_loopback_interfaces = []
+
+        for interface in command_output["interfaces"]:
+            interface_dict = command_output["interfaces"][interface]
+            if "Loopback" in interface:
+                loopback_count += 1
+                if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"):
+                    down_loopback_interfaces.append(interface)
+
+        if loopback_count == number and len(down_loopback_interfaces) == 0:
+            self.result.is_success()
+        else:
+            self.result.is_failure()
+            if loopback_count != number:
+                self.result.is_failure(f"Found {loopback_count} Loopbacks when expecting {number}")
+            elif len(down_loopback_interfaces) != 0:
+                self.result.is_failure(f"The following Loopbacks are not up: {down_loopback_interfaces}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(number: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyLoopbackCount validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

Number of loopback interfaces expected to be present.

+
+
+ None +
+ +
+ Source code in anta/tests/interfaces.py +
259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
@AntaTest.anta_test
+def test(self, number: Optional[int] = None) -> None:
+    """
+    Run VerifyLoopbackCount validation
+
+    Args:
+        number: Number of loopback interfaces expected to be present.
+    """
+
+    if number is None:
+        self.result.is_skipped("VerifyLoopbackCount was not run as no number value was given.")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    loopback_count = 0
+    down_loopback_interfaces = []
+
+    for interface in command_output["interfaces"]:
+        interface_dict = command_output["interfaces"][interface]
+        if "Loopback" in interface:
+            loopback_count += 1
+            if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"):
+                down_loopback_interfaces.append(interface)
+
+    if loopback_count == number and len(down_loopback_interfaces) == 0:
+        self.result.is_success()
+    else:
+        self.result.is_failure()
+        if loopback_count != number:
+            self.result.is_failure(f"Found {loopback_count} Loopbacks when expecting {number}")
+        elif len(down_loopback_interfaces) != 0:
+            self.result.is_failure(f"The following Loopbacks are not up: {down_loopback_interfaces}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyPortChannels + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no inactive port in port channels.

+ +
+ Source code in anta/tests/interfaces.py +
193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
class VerifyPortChannels(AntaTest):
+    """
+    Verifies there is no inactive port in port channels.
+    """
+
+    name = "VerifyPortChannels"
+    description = "Verifies there is no inactive port in port channels."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show port-channel")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyPortChannels validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        po_with_invactive_ports: List[Dict[str, str]] = []
+        for portchannel, portchannel_dict in command_output["portChannels"].items():
+            if len(portchannel_dict["inactivePorts"]) != 0:
+                po_with_invactive_ports.extend({portchannel: portchannel_dict["inactivePorts"]})
+
+        if not po_with_invactive_ports:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following port-channels have inactive port(s): {po_with_invactive_ports}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyPortChannels validation

+ +
+ Source code in anta/tests/interfaces.py +
203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyPortChannels validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    po_with_invactive_ports: List[Dict[str, str]] = []
+    for portchannel, portchannel_dict in command_output["portChannels"].items():
+        if len(portchannel_dict["inactivePorts"]) != 0:
+            po_with_invactive_ports.extend({portchannel: portchannel_dict["inactivePorts"]})
+
+    if not po_with_invactive_ports:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following port-channels have inactive port(s): {po_with_invactive_ports}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySVI + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no interface vlan down.

+ +
+ Source code in anta/tests/interfaces.py +
294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
class VerifySVI(AntaTest):
+    """
+    Verifies there is no interface vlan down.
+    """
+
+    name = "VerifySVI"
+    description = "Verifies there is no interface vlan down."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show ip interface brief")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifySVI validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        down_svis = []
+
+        for interface in command_output["interfaces"]:
+            interface_dict = command_output["interfaces"][interface]
+            if "Vlan" in interface:
+                if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"):
+                    down_svis.append(interface)
+
+        if len(down_svis) == 0:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following SVIs are not up: {down_svis}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifySVI validation

+ +
+ Source code in anta/tests/interfaces.py +
304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifySVI validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    down_svis = []
+
+    for interface in command_output["interfaces"]:
+        interface_dict = command_output["interfaces"][interface]
+        if "Vlan" in interface:
+            if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"):
+                down_svis.append(interface)
+
+    if len(down_svis) == 0:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following SVIs are not up: {down_svis}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyStormControlDrops + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device did not drop packets due its to storm-control configuration.

+ +
+ Source code in anta/tests/interfaces.py +
163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
class VerifyStormControlDrops(AntaTest):
+    """
+    Verifies the device did not drop packets due its to storm-control configuration.
+    """
+
+    name = "VerifyStormControlDrops"
+    description = "Verifies the device did not drop packets due its to storm-control configuration."
+    categories = ["interfaces"]
+    commands = [AntaCommand(command="show storm-control")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyStormControlDrops validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        storm_controlled_interfaces: Dict[str, Dict[str, Any]] = {}
+        for interface, interface_dict in command_output["interfaces"].items():
+            for traffic_type, traffic_type_dict in interface_dict["trafficTypes"].items():
+                if "drop" in traffic_type_dict and traffic_type_dict["drop"] != 0:
+                    storm_controlled_interface_dict = storm_controlled_interfaces.setdefault(interface, {})
+                    storm_controlled_interface_dict.update({traffic_type: traffic_type_dict["drop"]})
+
+        if not storm_controlled_interfaces:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following interfaces have none 0 storm-control drop counters {storm_controlled_interfaces}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyStormControlDrops validation

+ +
+ Source code in anta/tests/interfaces.py +
173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyStormControlDrops validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    storm_controlled_interfaces: Dict[str, Dict[str, Any]] = {}
+    for interface, interface_dict in command_output["interfaces"].items():
+        for traffic_type, traffic_type_dict in interface_dict["trafficTypes"].items():
+            if "drop" in traffic_type_dict and traffic_type_dict["drop"] != 0:
+                storm_controlled_interface_dict = storm_controlled_interfaces.setdefault(interface, {})
+                storm_controlled_interface_dict.update({traffic_type: traffic_type_dict["drop"]})
+
+    if not storm_controlled_interfaces:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following interfaces have none 0 storm-control drop counters {storm_controlled_interfaces}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.logging/index.html b/0.6.0/api/tests.logging/index.html new file mode 100644 index 000000000..88571d42a --- /dev/null +++ b/0.6.0/api/tests.logging/index.html @@ -0,0 +1,3183 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Logging - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Logging

+ +

ANTA catalog for logging tests

+ + +
+ + + +
+ +

Test functions related to the EOS various logging settings

+

NOTE: ‘show logging’ does not support json output yet

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyLoggingAccounting + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if AAA accounting logs are generated.

+ +
+ Expected Results +
    +
  • success: The test will pass if AAA accounting logs are generated.
  • +
  • failure: The test will fail if AAA accounting logs are NOT generated.
  • +
+
+
+ Source code in anta/tests/logging.py +
265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
class VerifyLoggingAccounting(AntaTest):
+    """
+    Verifies if AAA accounting logs are generated.
+
+    Expected Results:
+        * success: The test will pass if AAA accounting logs are generated.
+        * failure: The test will fail if AAA accounting logs are NOT generated.
+    """
+
+    name = "VerifyLoggingAccounting"
+    description = "Verifies if AAA accounting logs are generated."
+    categories = ["logging"]
+    commands = [AntaCommand(command="show aaa accounting logs | tail", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyLoggingAccountingvalidation.
+        """
+        pattern = r"cmd=show aaa accounting logs"
+        output = self.instance_commands[0].text_output
+
+        if re.search(pattern, output):
+            self.result.is_success()
+        else:
+            self.result.is_failure("AAA accounting logs are not generated")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyLoggingAccountingvalidation.

+ +
+ Source code in anta/tests/logging.py +
279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyLoggingAccountingvalidation.
+    """
+    pattern = r"cmd=show aaa accounting logs"
+    output = self.instance_commands[0].text_output
+
+    if re.search(pattern, output):
+        self.result.is_success()
+    else:
+        self.result.is_failure("AAA accounting logs are not generated")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoggingHostname + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if logs are generated with the device FQDN.

+ +
+ Expected Results +
    +
  • success: The test will pass if logs are generated with the device FQDN.
  • +
  • failure: The test will fail if logs are NOT generated with the device FQDN.
  • +
+
+
+ Source code in anta/tests/logging.py +
182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
class VerifyLoggingHostname(AntaTest):
+    """
+    Verifies if logs are generated with the device FQDN.
+
+    Expected Results:
+        * success: The test will pass if logs are generated with the device FQDN.
+        * failure: The test will fail if logs are NOT generated with the device FQDN.
+    """
+
+    name = "VerifyLoggingHostname"
+    description = "Verifies if logs are generated with the device FQDN."
+    categories = ["logging"]
+    commands = [
+        AntaCommand(command="show hostname"),
+        AntaCommand(command="send log level informational message ANTA VerifyLoggingHostname validation"),
+        AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text"),
+    ]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyLoggingHostname validation.
+        """
+        output_hostname = self.instance_commands[0].json_output
+        output_logging = self.instance_commands[2].text_output
+        fqdn = output_hostname["fqdn"]
+        lines = output_logging.strip().split("\n")[::-1]
+
+        log_pattern = r"ANTA VerifyLoggingHostname validation"
+
+        last_line_with_pattern = ""
+        for line in lines:
+            if re.search(log_pattern, line):
+                last_line_with_pattern = line
+                break
+
+        if fqdn in last_line_with_pattern:
+            self.result.is_success()
+        else:
+            self.result.is_failure("Logs are not generated with the device FQDN")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyLoggingHostname validation.

+ +
+ Source code in anta/tests/logging.py +
200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyLoggingHostname validation.
+    """
+    output_hostname = self.instance_commands[0].json_output
+    output_logging = self.instance_commands[2].text_output
+    fqdn = output_hostname["fqdn"]
+    lines = output_logging.strip().split("\n")[::-1]
+
+    log_pattern = r"ANTA VerifyLoggingHostname validation"
+
+    last_line_with_pattern = ""
+    for line in lines:
+        if re.search(log_pattern, line):
+            last_line_with_pattern = line
+            break
+
+    if fqdn in last_line_with_pattern:
+        self.result.is_success()
+    else:
+        self.result.is_failure("Logs are not generated with the device FQDN")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoggingHosts + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies logging hosts (syslog servers) for a specified VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided syslog servers are configured in the specified VRF.
  • +
  • failure: The test will fail if the provided syslog servers are NOT configured in the specified VRF.
  • +
  • skipped: The test will be skipped if syslog servers or VRF are not provided.
  • +
+
+
+ Source code in anta/tests/logging.py +
104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
class VerifyLoggingHosts(AntaTest):
+    """
+    Verifies logging hosts (syslog servers) for a specified VRF.
+
+    Expected Results:
+        * success: The test will pass if the provided syslog servers are configured in the specified VRF.
+        * failure: The test will fail if the provided syslog servers are NOT configured in the specified VRF.
+        * skipped: The test will be skipped if syslog servers or VRF are not provided.
+    """
+
+    name = "VerifyLoggingHosts"
+    description = "Verifies logging hosts (syslog servers) for a specified VRF."
+    categories = ["logging"]
+    commands = [AntaCommand(command="show logging", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self, hosts: Optional[List[str]] = None, vrf: str = "default") -> None:
+        """
+        Run VerifyLoggingHosts validation.
+
+        Args:
+            hosts: List of hosts (syslog servers) IP addresses.
+            vrf: The name of the VRF to transport log messages. Defaults to 'default'.
+        """
+        if not hosts or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because hosts or vrf were not supplied")
+            return
+
+        output = self.instance_commands[0].text_output
+
+        not_configured = []
+
+        for host in hosts:
+            pattern = rf"Logging to '{host}'.*VRF {vrf}"
+            if not re.search(pattern, _get_logging_states(self.logger, output)):
+                not_configured.append(host)
+
+        if not not_configured:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Syslog servers {not_configured} are not configured in VRF {vrf}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    hosts: Optional[List[str]] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifyLoggingHosts validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
hosts + Optional[List[str]] + +
+

List of hosts (syslog servers) IP addresses.

+
+
+ None +
vrf + str + +
+

The name of the VRF to transport log messages. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/logging.py +
119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
@AntaTest.anta_test
+def test(self, hosts: Optional[List[str]] = None, vrf: str = "default") -> None:
+    """
+    Run VerifyLoggingHosts validation.
+
+    Args:
+        hosts: List of hosts (syslog servers) IP addresses.
+        vrf: The name of the VRF to transport log messages. Defaults to 'default'.
+    """
+    if not hosts or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because hosts or vrf were not supplied")
+        return
+
+    output = self.instance_commands[0].text_output
+
+    not_configured = []
+
+    for host in hosts:
+        pattern = rf"Logging to '{host}'.*VRF {vrf}"
+        if not re.search(pattern, _get_logging_states(self.logger, output)):
+            not_configured.append(host)
+
+    if not not_configured:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Syslog servers {not_configured} are not configured in VRF {vrf}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoggingLogsGeneration + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if logs are generated.

+ +
+ Expected Results +
    +
  • success: The test will pass if logs are generated.
  • +
  • failure: The test will fail if logs are NOT generated.
  • +
+
+
+ Source code in anta/tests/logging.py +
147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
class VerifyLoggingLogsGeneration(AntaTest):
+    """
+    Verifies if logs are generated.
+
+    Expected Results:
+        * success: The test will pass if logs are generated.
+        * failure: The test will fail if logs are NOT generated.
+    """
+
+    name = "VerifyLoggingLogsGeneration"
+    description = "Verifies if logs are generated."
+    categories = ["logging"]
+    commands = [
+        AntaCommand(command="send log level informational message ANTA VerifyLoggingLogsGeneration validation"),
+        AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text"),
+    ]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyLoggingLogs validation.
+        """
+        log_pattern = r"ANTA VerifyLoggingLogsGeneration validation"
+
+        output = self.instance_commands[1].text_output
+        lines = output.strip().split("\n")[::-1]
+
+        for line in lines:
+            if re.search(log_pattern, line):
+                self.result.is_success()
+                return
+
+        self.result.is_failure("Logs are not generated")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyLoggingLogs validation.

+ +
+ Source code in anta/tests/logging.py +
164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyLoggingLogs validation.
+    """
+    log_pattern = r"ANTA VerifyLoggingLogsGeneration validation"
+
+    output = self.instance_commands[1].text_output
+    lines = output.strip().split("\n")[::-1]
+
+    for line in lines:
+        if re.search(log_pattern, line):
+            self.result.is_success()
+            return
+
+    self.result.is_failure("Logs are not generated")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoggingPersistent + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if logging persistent is enabled and logs are saved in flash.

+ +
+ Expected Results +
    +
  • success: The test will pass if logging persistent is enabled and logs are in flash.
  • +
  • failure: The test will fail if logging persistent is disabled or no logs are saved in flash.
  • +
+
+
+ Source code in anta/tests/logging.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
class VerifyLoggingPersistent(AntaTest):
+    """
+    Verifies if logging persistent is enabled and logs are saved in flash.
+
+    Expected Results:
+        * success: The test will pass if logging persistent is enabled and logs are in flash.
+        * failure: The test will fail if logging persistent is disabled or no logs are saved in flash.
+    """
+
+    name = "VerifyLoggingPersistent"
+    description = "Verifies if logging persistent is enabled and logs are saved in flash."
+    categories = ["logging"]
+    commands = [
+        AntaCommand(command="show logging", ofmt="text"),
+        AntaCommand(command="dir flash:/persist/messages", ofmt="text"),
+    ]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyLoggingPersistent validation.
+        """
+        self.result.is_success()
+
+        log_output = self.instance_commands[0].text_output
+        dir_flash_output = self.instance_commands[1].text_output
+
+        if "Persistent logging: disabled" in _get_logging_states(self.logger, log_output):
+            self.result.is_failure("Persistent logging is disabled")
+            return
+
+        pattern = r"-rw-\s+(\d+)"
+        persist_logs = re.search(pattern, dir_flash_output)
+
+        if not persist_logs or int(persist_logs.group(1)) == 0:
+            self.result.is_failure("No persistent logs are saved in flash")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyLoggingPersistent validation.

+ +
+ Source code in anta/tests/logging.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyLoggingPersistent validation.
+    """
+    self.result.is_success()
+
+    log_output = self.instance_commands[0].text_output
+    dir_flash_output = self.instance_commands[1].text_output
+
+    if "Persistent logging: disabled" in _get_logging_states(self.logger, log_output):
+        self.result.is_failure("Persistent logging is disabled")
+        return
+
+    pattern = r"-rw-\s+(\d+)"
+    persist_logs = re.search(pattern, dir_flash_output)
+
+    if not persist_logs or int(persist_logs.group(1)) == 0:
+        self.result.is_failure("No persistent logs are saved in flash")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoggingSourceIntf + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies logging source-interface for a specified VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if the provided logging source-interface is configured in the specified VRF.
  • +
  • failure: The test will fail if the provided logging source-interface is NOT configured in the specified VRF.
  • +
  • skipped: The test will be skipped if source-interface or VRF is not provided.
  • +
+
+
+ Source code in anta/tests/logging.py +
 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
class VerifyLoggingSourceIntf(AntaTest):
+    """
+    Verifies logging source-interface for a specified VRF.
+
+    Expected Results:
+        * success: The test will pass if the provided logging source-interface is configured in the specified VRF.
+        * failure: The test will fail if the provided logging source-interface is NOT configured in the specified VRF.
+        * skipped: The test will be skipped if source-interface or VRF is not provided.
+    """
+
+    name = "VerifyLoggingSourceInt"
+    description = "Verifies logging source-interface for a specified VRF."
+    categories = ["logging"]
+    commands = [AntaCommand(command="show logging", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self, intf: Optional[str] = None, vrf: str = "default") -> None:
+        """
+        Run VerifyLoggingSrcDst validation.
+
+        Args:
+            intf: Source-interface to use as source IP of log messages.
+            vrf: The name of the VRF to transport log messages. Defaults to 'default'.
+        """
+        if not intf or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because intf or vrf was not supplied")
+            return
+
+        output = self.instance_commands[0].text_output
+
+        pattern = rf"Logging source-interface '{intf}'.*VRF {vrf}"
+
+        if re.search(pattern, _get_logging_states(self.logger, output)):
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Source-interface '{intf}' is not configured in VRF {vrf}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    intf: Optional[str] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifyLoggingSrcDst validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
intf + Optional[str] + +
+

Source-interface to use as source IP of log messages.

+
+
+ None +
vrf + str + +
+

The name of the VRF to transport log messages. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/logging.py +
 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
@AntaTest.anta_test
+def test(self, intf: Optional[str] = None, vrf: str = "default") -> None:
+    """
+    Run VerifyLoggingSrcDst validation.
+
+    Args:
+        intf: Source-interface to use as source IP of log messages.
+        vrf: The name of the VRF to transport log messages. Defaults to 'default'.
+    """
+    if not intf or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because intf or vrf was not supplied")
+        return
+
+    output = self.instance_commands[0].text_output
+
+    pattern = rf"Logging source-interface '{intf}'.*VRF {vrf}"
+
+    if re.search(pattern, _get_logging_states(self.logger, output)):
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Source-interface '{intf}' is not configured in VRF {vrf}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyLoggingTimestamp + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if logs are generated with the approprate timestamp.

+ +
+ Expected Results +
    +
  • success: The test will pass if logs are generated with the appropriated timestamp.
  • +
  • failure: The test will fail if logs are NOT generated with the appropriated timestamp.
  • +
+
+
+ Source code in anta/tests/logging.py +
224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
class VerifyLoggingTimestamp(AntaTest):
+    """
+    Verifies if logs are generated with the approprate timestamp.
+
+    Expected Results:
+        * success: The test will pass if logs are generated with the appropriated timestamp.
+        * failure: The test will fail if logs are NOT generated with the appropriated timestamp.
+    """
+
+    name = "VerifyLoggingTimestamp"
+    description = "Verifies if logs are generated with the appropriate timestamp."
+    categories = ["logging"]
+    commands = [
+        AntaCommand(command="send log level informational message ANTA VerifyLoggingTimestamp validation"),
+        AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text"),
+    ]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyLoggingTimestamp validation.
+        """
+        log_pattern = r"ANTA VerifyLoggingTimestamp validation"
+        timestamp_pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}-\d{2}:\d{2}"
+
+        output = self.instance_commands[1].text_output
+
+        lines = output.strip().split("\n")[::-1]
+
+        last_line_with_pattern = ""
+        for line in lines:
+            if re.search(log_pattern, line):
+                last_line_with_pattern = line
+                break
+
+        if re.search(timestamp_pattern, last_line_with_pattern):
+            self.result.is_success()
+        else:
+            self.result.is_failure("Logs are not generated with the appropriate timestamp format")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyLoggingTimestamp validation.

+ +
+ Source code in anta/tests/logging.py +
241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyLoggingTimestamp validation.
+    """
+    log_pattern = r"ANTA VerifyLoggingTimestamp validation"
+    timestamp_pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}-\d{2}:\d{2}"
+
+    output = self.instance_commands[1].text_output
+
+    lines = output.strip().split("\n")[::-1]
+
+    last_line_with_pattern = ""
+    for line in lines:
+        if re.search(log_pattern, line):
+            last_line_with_pattern = line
+            break
+
+    if re.search(timestamp_pattern, last_line_with_pattern):
+        self.result.is_success()
+    else:
+        self.result.is_failure("Logs are not generated with the appropriate timestamp format")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.mlag/index.html b/0.6.0/api/tests.mlag/index.html new file mode 100644 index 000000000..869a66cdf --- /dev/null +++ b/0.6.0/api/tests.mlag/index.html @@ -0,0 +1,2916 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + MLAG - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

MLAG

+ +

ANTA catalog for mlag tests

+ + +
+ + + +
+ +

Test functions related to Multi-chassis Link Aggregation (MLAG)

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyMlagConfigSanity + + +

+ + +
+

+ Bases: AntaTest

+ + +

This test verifies there are no MLAG config-sanity inconsistencies.

+ +
+ Expected Results +
    +
  • success: The test will pass if there are NO MLAG config-sanity inconsistencies.
  • +
  • failure: The test will fail if there are MLAG config-sanity inconsistencies.
  • +
  • skipped: The test will be skipped if MLAG is ‘disabled’.
  • +
  • error: The test will give an error if ‘mlagActive’ is not found in the JSON response.
  • +
+
+
+ Source code in anta/tests/mlag.py +
 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
class VerifyMlagConfigSanity(AntaTest):
+    """
+    This test verifies there are no MLAG config-sanity inconsistencies.
+
+    Expected Results:
+        * success: The test will pass if there are NO MLAG config-sanity inconsistencies.
+        * failure: The test will fail if there are MLAG config-sanity inconsistencies.
+        * skipped: The test will be skipped if MLAG is 'disabled'.
+        * error: The test will give an error if 'mlagActive' is not found in the JSON response.
+    """
+
+    name = "VerifyMlagConfigSanity"
+    description = "This test verifies there are no MLAG config-sanity inconsistencies."
+    categories = ["mlag"]
+    commands = [AntaCommand(command="show mlag config-sanity", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyMlagConfigSanity validation
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if (mlag_status := get_value(command_output, "mlagActive")) is None:
+            self.result.is_error("Incorrect JSON response - 'mlagActive' state was not found")
+            return
+
+        if mlag_status is False:
+            self.result.is_skipped("MLAG is disabled")
+            return
+
+        keys_to_verify = ["globalConfiguration", "interfaceConfiguration"]
+        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+        if not any(verified_output.values()):
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"MLAG config-sanity returned inconsistencies: {verified_output}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyMlagConfigSanity validation

+ +
+ Source code in anta/tests/mlag.py +
102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyMlagConfigSanity validation
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if (mlag_status := get_value(command_output, "mlagActive")) is None:
+        self.result.is_error("Incorrect JSON response - 'mlagActive' state was not found")
+        return
+
+    if mlag_status is False:
+        self.result.is_skipped("MLAG is disabled")
+        return
+
+    keys_to_verify = ["globalConfiguration", "interfaceConfiguration"]
+    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+    if not any(verified_output.values()):
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"MLAG config-sanity returned inconsistencies: {verified_output}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyMlagDualPrimary + + +

+ + +
+

+ Bases: AntaTest

+ + +

This test verifies the dual-primary detection and its parameters of the MLAG configuration.

+ +
+ Expected Results +
    +
  • success: The test will pass if the dual-primary detection is enabled and its parameters are configured properly.
  • +
  • failure: The test will fail if the dual-primary detection is NOT enabled or its parameters are NOT configured properly.
  • +
  • skipped: The test will be skipped if the dual-primary parameters are NOT provided or if MLAG is ‘disabled’.
  • +
+
+
+ Source code in anta/tests/mlag.py +
172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
class VerifyMlagDualPrimary(AntaTest):
+    """
+    This test verifies the dual-primary detection and its parameters of the MLAG configuration.
+
+    Expected Results:
+        * success: The test will pass if the dual-primary detection is enabled and its parameters are configured properly.
+        * failure: The test will fail if the dual-primary detection is NOT enabled or its parameters are NOT configured properly.
+        * skipped: The test will be skipped if the dual-primary parameters are NOT provided or if MLAG is 'disabled'.
+    """
+
+    name = "VerifyMlagDualPrimary"
+    description = "This test verifies the dual-primary detection and its parameters of the MLAG configuration."
+    categories = ["mlag"]
+    commands = [AntaCommand(command="show mlag detail", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(
+        self, detection_delay: Optional[int] = None, errdisabled: bool = False, recovery_delay: Optional[int] = None, recovery_delay_non_mlag: Optional[int] = None
+    ) -> None:
+        """
+        Run VerifyMlagDualPrimary validation
+
+        Args:
+            detection_delay: Delay detection for <N> seconds.
+            errdisabled: Errdisabled all interfaces when dual-primary is detected. Defaults to False.
+            recovery_delay: Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled.
+            recovery_delay_non_mlag: Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled.
+        """
+
+        if detection_delay is None or errdisabled is None or recovery_delay is None or recovery_delay_non_mlag is None:
+            self.result.is_skipped(
+                f"{self.__class__.name} did not run because detection_delay, errdisabled, recovery_delay or recovery_delay_non_mlag were not supplied"
+            )
+            return
+
+        errdisabled_action = "errdisableAllInterfaces" if errdisabled else "none"
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["state"] == "disabled":
+            self.result.is_skipped("MLAG is disabled")
+            return
+
+        if command_output["dualPrimaryDetectionState"] == "disabled":
+            self.result.is_failure("Dual-primary detection is disabled")
+            return
+
+        keys_to_verify = ["detail.dualPrimaryDetectionDelay", "detail.dualPrimaryAction", "dualPrimaryMlagRecoveryDelay", "dualPrimaryNonMlagRecoveryDelay"]
+        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+        if (
+            verified_output["detail.dualPrimaryDetectionDelay"] == detection_delay
+            and verified_output["detail.dualPrimaryAction"] == errdisabled_action
+            and verified_output["dualPrimaryMlagRecoveryDelay"] == recovery_delay
+            and verified_output["dualPrimaryNonMlagRecoveryDelay"] == recovery_delay_non_mlag
+        ):
+            self.result.is_success()
+
+        else:
+            self.result.is_failure(f"The dual-primary parameters are not configured properly: {verified_output}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    detection_delay: Optional[int] = None,
+    errdisabled: bool = False,
+    recovery_delay: Optional[int] = None,
+    recovery_delay_non_mlag: Optional[int] = None,
+) -> None
+
+ +
+ +

Run VerifyMlagDualPrimary validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
detection_delay + Optional[int] + +
+

Delay detection for seconds.

+
+
+ None +
errdisabled + bool + +
+

Errdisabled all interfaces when dual-primary is detected. Defaults to False.

+
+
+ False +
recovery_delay + Optional[int] + +
+

Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled.

+
+
+ None +
recovery_delay_non_mlag + Optional[int] + +
+

Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled.

+
+
+ None +
+ +
+ Source code in anta/tests/mlag.py +
187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
@AntaTest.anta_test
+def test(
+    self, detection_delay: Optional[int] = None, errdisabled: bool = False, recovery_delay: Optional[int] = None, recovery_delay_non_mlag: Optional[int] = None
+) -> None:
+    """
+    Run VerifyMlagDualPrimary validation
+
+    Args:
+        detection_delay: Delay detection for <N> seconds.
+        errdisabled: Errdisabled all interfaces when dual-primary is detected. Defaults to False.
+        recovery_delay: Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled.
+        recovery_delay_non_mlag: Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled.
+    """
+
+    if detection_delay is None or errdisabled is None or recovery_delay is None or recovery_delay_non_mlag is None:
+        self.result.is_skipped(
+            f"{self.__class__.name} did not run because detection_delay, errdisabled, recovery_delay or recovery_delay_non_mlag were not supplied"
+        )
+        return
+
+    errdisabled_action = "errdisableAllInterfaces" if errdisabled else "none"
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["state"] == "disabled":
+        self.result.is_skipped("MLAG is disabled")
+        return
+
+    if command_output["dualPrimaryDetectionState"] == "disabled":
+        self.result.is_failure("Dual-primary detection is disabled")
+        return
+
+    keys_to_verify = ["detail.dualPrimaryDetectionDelay", "detail.dualPrimaryAction", "dualPrimaryMlagRecoveryDelay", "dualPrimaryNonMlagRecoveryDelay"]
+    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+    if (
+        verified_output["detail.dualPrimaryDetectionDelay"] == detection_delay
+        and verified_output["detail.dualPrimaryAction"] == errdisabled_action
+        and verified_output["dualPrimaryMlagRecoveryDelay"] == recovery_delay
+        and verified_output["dualPrimaryNonMlagRecoveryDelay"] == recovery_delay_non_mlag
+    ):
+        self.result.is_success()
+
+    else:
+        self.result.is_failure(f"The dual-primary parameters are not configured properly: {verified_output}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyMlagInterfaces + + +

+ + +
+

+ Bases: AntaTest

+ + +

This test verifies there are no inactive or active-partial MLAG ports.

+ +
+ Expected Results +
    +
  • success: The test will pass if there are NO inactive or active-partial MLAG ports.
  • +
  • failure: The test will fail if there are inactive or active-partial MLAG ports.
  • +
  • skipped: The test will be skipped if MLAG is ‘disabled’.
  • +
+
+
+ Source code in anta/tests/mlag.py +
53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
class VerifyMlagInterfaces(AntaTest):
+    """
+    This test verifies there are no inactive or active-partial MLAG ports.
+
+    Expected Results:
+        * success: The test will pass if there are NO inactive or active-partial MLAG ports.
+        * failure: The test will fail if there are inactive or active-partial MLAG ports.
+        * skipped: The test will be skipped if MLAG is 'disabled'.
+    """
+
+    name = "VerifyMlagInterfaces"
+    description = "This test verifies there are no inactive or active-partial MLAG ports."
+    categories = ["mlag"]
+    commands = [AntaCommand(command="show mlag", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyMlagInterfaces validation
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["state"] == "disabled":
+            self.result.is_skipped("MLAG is disabled")
+            return
+
+        if command_output["mlagPorts"]["Inactive"] == 0 and command_output["mlagPorts"]["Active-partial"] == 0:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"MLAG status is not OK: {command_output['mlagPorts']}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyMlagInterfaces validation

+ +
+ Source code in anta/tests/mlag.py +
68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyMlagInterfaces validation
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["state"] == "disabled":
+        self.result.is_skipped("MLAG is disabled")
+        return
+
+    if command_output["mlagPorts"]["Inactive"] == 0 and command_output["mlagPorts"]["Active-partial"] == 0:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"MLAG status is not OK: {command_output['mlagPorts']}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyMlagReloadDelay + + +

+ + +
+

+ Bases: AntaTest

+ + +

This test verifies the reload-delay parameters of the MLAG configuration.

+ +
+ Expected Results +
    +
  • success: The test will pass if the reload-delay parameters are configured properly.
  • +
  • failure: The test will fail if the reload-delay parameters are NOT configured properly.
  • +
  • skipped: The test will be skipped if the reload-delay parameters are NOT provided or if MLAG is ‘disabled’.
  • +
+
+
+ Source code in anta/tests/mlag.py +
127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
class VerifyMlagReloadDelay(AntaTest):
+    """
+    This test verifies the reload-delay parameters of the MLAG configuration.
+
+    Expected Results:
+        * success: The test will pass if the reload-delay parameters are configured properly.
+        * failure: The test will fail if the reload-delay parameters are NOT configured properly.
+        * skipped: The test will be skipped if the reload-delay parameters are NOT provided or if MLAG is 'disabled'.
+    """
+
+    name = "VerifyMlagReloadDelay"
+    description = "This test verifies the reload-delay parameters of the MLAG configuration."
+    categories = ["mlag"]
+    commands = [AntaCommand(command="show mlag", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self, reload_delay: Optional[int] = None, reload_delay_non_mlag: Optional[int] = None) -> None:
+        """
+        Run VerifyMlagReloadDelay validation
+
+        Args:
+            reload_delay: Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled.
+            reload_delay_non_mlag: Delay (seconds) after reboot until ports that are not part of an MLAG are enabled.
+        """
+
+        if not reload_delay or not reload_delay_non_mlag:
+            self.result.is_skipped(f"{self.__class__.name} did not run because reload_delay or reload_delay_non_mlag were not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["state"] == "disabled":
+            self.result.is_skipped("MLAG is disabled")
+            return
+
+        keys_to_verify = ["reloadDelay", "reloadDelayNonMlag"]
+        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+        if verified_output["reloadDelay"] == reload_delay and verified_output["reloadDelayNonMlag"] == reload_delay_non_mlag:
+            self.result.is_success()
+
+        else:
+            self.result.is_failure(f"The reload-delay parameters are not configured properly: {verified_output}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    reload_delay: Optional[int] = None,
+    reload_delay_non_mlag: Optional[int] = None,
+) -> None
+
+ +
+ +

Run VerifyMlagReloadDelay validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
reload_delay + Optional[int] + +
+

Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled.

+
+
+ None +
reload_delay_non_mlag + Optional[int] + +
+

Delay (seconds) after reboot until ports that are not part of an MLAG are enabled.

+
+
+ None +
+ +
+ Source code in anta/tests/mlag.py +
142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
@AntaTest.anta_test
+def test(self, reload_delay: Optional[int] = None, reload_delay_non_mlag: Optional[int] = None) -> None:
+    """
+    Run VerifyMlagReloadDelay validation
+
+    Args:
+        reload_delay: Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled.
+        reload_delay_non_mlag: Delay (seconds) after reboot until ports that are not part of an MLAG are enabled.
+    """
+
+    if not reload_delay or not reload_delay_non_mlag:
+        self.result.is_skipped(f"{self.__class__.name} did not run because reload_delay or reload_delay_non_mlag were not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["state"] == "disabled":
+        self.result.is_skipped("MLAG is disabled")
+        return
+
+    keys_to_verify = ["reloadDelay", "reloadDelayNonMlag"]
+    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+    if verified_output["reloadDelay"] == reload_delay and verified_output["reloadDelayNonMlag"] == reload_delay_non_mlag:
+        self.result.is_success()
+
+    else:
+        self.result.is_failure(f"The reload-delay parameters are not configured properly: {verified_output}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyMlagStatus + + +

+ + +
+

+ Bases: AntaTest

+ + +

This test verifies the health status of the MLAG configuration.

+ +
+ Expected Results +
    +
  • success: The test will pass if the MLAG state is ‘active’, negotiation status is ‘connected’, + peer-link status and local interface status are ‘up’.
  • +
  • failure: The test will fail if the MLAG state is not ‘active’, negotiation status is not ‘connected’, + peer-link status or local interface status are not ‘up’.
  • +
  • skipped: The test will be skipped if MLAG is ‘disabled’.
  • +
+
+
+ Source code in anta/tests/mlag.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
class VerifyMlagStatus(AntaTest):
+    """
+    This test verifies the health status of the MLAG configuration.
+
+    Expected Results:
+        * success: The test will pass if the MLAG state is 'active', negotiation status is 'connected',
+                   peer-link status and local interface status are 'up'.
+        * failure: The test will fail if the MLAG state is not 'active', negotiation status is not 'connected',
+                   peer-link status or local interface status are not 'up'.
+        * skipped: The test will be skipped if MLAG is 'disabled'.
+    """
+
+    name = "VerifyMlagStatus"
+    description = "This test verifies the health status of the MLAG configuration."
+    categories = ["mlag"]
+    commands = [AntaCommand(command="show mlag", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyMlagStatus validation
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["state"] == "disabled":
+            self.result.is_skipped("MLAG is disabled")
+            return
+
+        keys_to_verify = ["state", "negStatus", "localIntfStatus", "peerLinkStatus"]
+        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+        if (
+            verified_output["state"] == "active"
+            and verified_output["negStatus"] == "connected"
+            and verified_output["localIntfStatus"] == "up"
+            and verified_output["peerLinkStatus"] == "up"
+        ):
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"MLAG status is not OK: {verified_output}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyMlagStatus validation

+ +
+ Source code in anta/tests/mlag.py +
27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyMlagStatus validation
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["state"] == "disabled":
+        self.result.is_skipped("MLAG is disabled")
+        return
+
+    keys_to_verify = ["state", "negStatus", "localIntfStatus", "peerLinkStatus"]
+    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
+
+    if (
+        verified_output["state"] == "active"
+        and verified_output["negStatus"] == "connected"
+        and verified_output["localIntfStatus"] == "up"
+        and verified_output["peerLinkStatus"] == "up"
+    ):
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"MLAG status is not OK: {verified_output}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.multicast/index.html b/0.6.0/api/tests.multicast/index.html new file mode 100644 index 000000000..3a05f8edc --- /dev/null +++ b/0.6.0/api/tests.multicast/index.html @@ -0,0 +1,2136 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Multicast - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Multicast

+ +

ANTA catalog for multicast tests

+ + +
+ + + +
+ +

Test functions related to multicast

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyIGMPSnoopingGlobal + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the IGMP snooping global configuration.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
configuration + str + +
+

Expected global IGMP snooping configuration (enabled or disabled).

+
+
+ required +
+ +
+ Source code in anta/tests/multicast.py +
54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
class VerifyIGMPSnoopingGlobal(AntaTest):
+    """
+    Verifies the IGMP snooping global configuration.
+
+    Args:
+        configuration (str): Expected global IGMP snooping configuration (enabled or disabled).
+    """
+
+    name = "VerifyIGMPSnoopingGlobal"
+    description = "Verifies the IGMP snooping global configuration."
+    categories = ["multicast", "igmp"]
+    commands = [AntaCommand(command="show ip igmp snooping")]
+
+    @AntaTest.anta_test
+    def test(self, configuration: Optional[str] = None) -> None:
+        """
+        Run VerifyIGMPSnoopingGlobal validation
+
+        Args:
+            configuration: Expected global IGMP configuration (enabled or disabled).
+        """
+
+        if not configuration:
+            self.result.is_skipped("VerifyIGMPSnoopingGlobal was not run as no configuration was given")
+            return
+
+        if configuration not in ["enabled", "disabled"]:
+            self.result.is_error(f"VerifyIGMPSnoopingGlobal was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        self.result.is_success()
+        if (igmp_state := command_output["igmpSnoopingState"]) != configuration:
+            self.result.is_failure(f"IGMP state is not valid: {igmp_state}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(configuration: Optional[str] = None) -> None
+
+ +
+ +

Run VerifyIGMPSnoopingGlobal validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
configuration + Optional[str] + +
+

Expected global IGMP configuration (enabled or disabled).

+
+
+ None +
+ +
+ Source code in anta/tests/multicast.py +
67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
@AntaTest.anta_test
+def test(self, configuration: Optional[str] = None) -> None:
+    """
+    Run VerifyIGMPSnoopingGlobal validation
+
+    Args:
+        configuration: Expected global IGMP configuration (enabled or disabled).
+    """
+
+    if not configuration:
+        self.result.is_skipped("VerifyIGMPSnoopingGlobal was not run as no configuration was given")
+        return
+
+    if configuration not in ["enabled", "disabled"]:
+        self.result.is_error(f"VerifyIGMPSnoopingGlobal was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    self.result.is_success()
+    if (igmp_state := command_output["igmpSnoopingState"]) != configuration:
+        self.result.is_failure(f"IGMP state is not valid: {igmp_state}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyIGMPSnoopingVlans + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the IGMP snooping configuration for some VLANs.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
vlans + List[str] + +
+

A list of VLANs

+
+
+ required +
configuration + str + +
+

Expected IGMP snooping configuration (enabled or disabled) for these VLANs.

+
+
+ required +
+ +
+ Source code in anta/tests/multicast.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
class VerifyIGMPSnoopingVlans(AntaTest):
+    """
+    Verifies the IGMP snooping configuration for some VLANs.
+
+    Args:
+        vlans (List[str]): A list of VLANs
+        configuration (str): Expected IGMP snooping configuration (enabled or disabled) for these VLANs.
+    """
+
+    name = "VerifyIGMPSnoopingVlans"
+    description = "Verifies the IGMP snooping configuration for some VLANs."
+    categories = ["multicast", "igmp"]
+    commands = [AntaCommand(command="show ip igmp snooping")]
+
+    @AntaTest.anta_test
+    def test(self, vlans: Optional[List[str]] = None, configuration: Optional[str] = None) -> None:
+        """
+        Run VerifyIGMPSnoopingVlans validation
+
+        Args:
+            vlans: List of VLANs.
+            configuration: Expected IGMP configuration (enabled or disabled) for these VLANs.
+        """
+
+        if not vlans or not configuration:
+            self.result.is_skipped("VerifyIGMPSnoopingVlans was not run as no vlans or configuration was given")
+            return
+        if configuration not in ["enabled", "disabled"]:
+            self.result.is_error(f"VerifyIGMPSnoopingVlans was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        self.result.is_success()
+        for vlan in vlans:
+            if vlan not in command_output["vlans"]:
+                self.result.is_failure(f"Supplied vlan {vlan} is not present on the device.")
+                continue
+
+            igmp_state = command_output["vlans"][str(vlan)]["igmpSnoopingState"]
+            if igmp_state != configuration:
+                self.result.is_failure(f"IGMP state for vlan {vlan} is {igmp_state}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    vlans: Optional[List[str]] = None,
+    configuration: Optional[str] = None,
+) -> None
+
+ +
+ +

Run VerifyIGMPSnoopingVlans validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
vlans + Optional[List[str]] + +
+

List of VLANs.

+
+
+ None +
configuration + Optional[str] + +
+

Expected IGMP configuration (enabled or disabled) for these VLANs.

+
+
+ None +
+ +
+ Source code in anta/tests/multicast.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
@AntaTest.anta_test
+def test(self, vlans: Optional[List[str]] = None, configuration: Optional[str] = None) -> None:
+    """
+    Run VerifyIGMPSnoopingVlans validation
+
+    Args:
+        vlans: List of VLANs.
+        configuration: Expected IGMP configuration (enabled or disabled) for these VLANs.
+    """
+
+    if not vlans or not configuration:
+        self.result.is_skipped("VerifyIGMPSnoopingVlans was not run as no vlans or configuration was given")
+        return
+    if configuration not in ["enabled", "disabled"]:
+        self.result.is_error(f"VerifyIGMPSnoopingVlans was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    self.result.is_success()
+    for vlan in vlans:
+        if vlan not in command_output["vlans"]:
+            self.result.is_failure(f"Supplied vlan {vlan} is not present on the device.")
+            continue
+
+        igmp_state = command_output["vlans"][str(vlan)]["igmpSnoopingState"]
+        if igmp_state != configuration:
+            self.result.is_failure(f"IGMP state for vlan {vlan} is {igmp_state}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.profiles/index.html b/0.6.0/api/tests.profiles/index.html new file mode 100644 index 000000000..2d22f17da --- /dev/null +++ b/0.6.0/api/tests.profiles/index.html @@ -0,0 +1,1975 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Profiles - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Profiles

+ +

ANTA catalog for profiles tests

+ + +
+ + + +
+ +

Test functions related to ASIC profiles

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyTcamProfile + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device is using the configured TCAM profile.

+ +
+ Source code in anta/tests/profiles.py +
41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
class VerifyTcamProfile(AntaTest):
+    """
+    Verifies the device is using the configured TCAM profile.
+    """
+
+    name = "VerifyTcamProfile"
+    description = "Verify that the assigned TCAM profile is actually running on the device"
+    categories = ["profiles"]
+    commands = [AntaCommand(command="show hardware tcam profile", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self, profile: Optional[str] = None) -> None:
+        """
+        Run VerifyTcamProfile validation
+
+        Args:
+            profile: Expected TCAM profile.
+        """
+        if not profile:
+            self.result.is_skipped("VerifyTcamProfile was not run as no profile was given")
+            return
+
+        command_output = self.instance_commands[0].json_output
+        if command_output["pmfProfiles"]["FixedSystem"]["status"] == command_output["pmfProfiles"]["FixedSystem"]["config"] == profile:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Incorrect profile running on device: {command_output['pmfProfiles']['FixedSystem']['status']}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(profile: Optional[str] = None) -> None
+
+ +
+ +

Run VerifyTcamProfile validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
profile + Optional[str] + +
+

Expected TCAM profile.

+
+
+ None +
+ +
+ Source code in anta/tests/profiles.py +
51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self, profile: Optional[str] = None) -> None:
+    """
+    Run VerifyTcamProfile validation
+
+    Args:
+        profile: Expected TCAM profile.
+    """
+    if not profile:
+        self.result.is_skipped("VerifyTcamProfile was not run as no profile was given")
+        return
+
+    command_output = self.instance_commands[0].json_output
+    if command_output["pmfProfiles"]["FixedSystem"]["status"] == command_output["pmfProfiles"]["FixedSystem"]["config"] == profile:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Incorrect profile running on device: {command_output['pmfProfiles']['FixedSystem']['status']}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyUnifiedForwardingTableMode + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device is using the expected Unified Forwarding Table mode.

+ +
+ Source code in anta/tests/profiles.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
class VerifyUnifiedForwardingTableMode(AntaTest):
+    """
+    Verifies the device is using the expected Unified Forwarding Table mode.
+    """
+
+    name = "VerifyUnifiedForwardingTableMode"
+    description = ""
+    categories = ["profiles"]
+    commands = [AntaCommand(command="show platform trident forwarding-table partition", ofmt="json")]
+
+    @skip_on_platforms(["cEOSLab", "vEOS-lab"])
+    @AntaTest.anta_test
+    def test(self, mode: Optional[str] = None) -> None:
+        """
+        Run VerifyUnifiedForwardingTableMode validation
+
+        Args:
+            mode: Expected UFT mode.
+        """
+        if not mode:
+            self.result.is_skipped("VerifyUnifiedForwardingTableMode was not run as no mode was given")
+            return
+
+        command_output = self.instance_commands[0].json_output
+        if command_output["uftMode"] == mode:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Device is not running correct UFT mode (expected: {mode} / running: {command_output['uftMode']})")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(mode: Optional[str] = None) -> None
+
+ +
+ +

Run VerifyUnifiedForwardingTableMode validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
mode + Optional[str] + +
+

Expected UFT mode.

+
+
+ None +
+ +
+ Source code in anta/tests/profiles.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
+@AntaTest.anta_test
+def test(self, mode: Optional[str] = None) -> None:
+    """
+    Run VerifyUnifiedForwardingTableMode validation
+
+    Args:
+        mode: Expected UFT mode.
+    """
+    if not mode:
+        self.result.is_skipped("VerifyUnifiedForwardingTableMode was not run as no mode was given")
+        return
+
+    command_output = self.instance_commands[0].json_output
+    if command_output["uftMode"] == mode:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Device is not running correct UFT mode (expected: {mode} / running: {command_output['uftMode']})")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.routing.bgp/index.html b/0.6.0/api/tests.routing.bgp/index.html new file mode 100644 index 000000000..669b4a67c --- /dev/null +++ b/0.6.0/api/tests.routing.bgp/index.html @@ -0,0 +1,3187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + BGP - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

BGP

+ +

ANTA catalog for routing-bgp tests

+ + +
+ + + +
+ +

BGP test functions

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyBGPEVPNCount + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all EVPN BGP sessions are established (default VRF) +and the actual number of BGP EVPN neighbors is the one we expect (default VRF).

+
    +
  • self.result = “skipped” if the number parameter is missing
  • +
  • self.result = “success” if all EVPN BGP sessions are Established and if the actual + number of BGP EVPN neighbors is the one we expect.
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
class VerifyBGPEVPNCount(AntaTest):
+    """
+    Verifies all EVPN BGP sessions are established (default VRF)
+    and the actual number of BGP EVPN neighbors is the one we expect (default VRF).
+
+    * self.result = "skipped" if the `number` parameter is missing
+    * self.result = "success" if all EVPN BGP sessions are Established and if the actual
+                         number of BGP EVPN neighbors is the one we expect.
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPEVPNCount"
+    description = "Verifies all EVPN BGP sessions are established (default VRF) and the actual number of BGP EVPN neighbors is the one we expect (default VRF)."
+    categories = ["routing", "bgp"]
+    commands = [AntaCommand(command="show bgp evpn summary")]
+
+    @check_bgp_family_enable("evpn")
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None) -> None:
+        """
+        Run VerifyBGPEVPNCount validation
+
+        Args:
+            number: The expected number of BGP EVPN neighbors in the default VRF.
+        """
+        if not number:
+            self.result.is_skipped("VerifyBGPEVPNCount could not run because number was not supplied.")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        peers = command_output["vrfs"]["default"]["peers"]
+        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+        if not non_established_peers and len(peers) == number:
+            self.result.is_success()
+        else:
+            self.result.is_failure()
+            if len(peers) != number:
+                self.result.is_failure(f"Expecting {number} BGP EVPN peers and got {len(peers)}")
+            if non_established_peers:
+                self.result.is_failure(f"The following EVPN peers are not established: {non_established_peers}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(number: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyBGPEVPNCount validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The expected number of BGP EVPN neighbors in the default VRF.

+
+
+ None +
+ +
+ Source code in anta/tests/routing/bgp.py +
197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
@check_bgp_family_enable("evpn")
+@AntaTest.anta_test
+def test(self, number: Optional[int] = None) -> None:
+    """
+    Run VerifyBGPEVPNCount validation
+
+    Args:
+        number: The expected number of BGP EVPN neighbors in the default VRF.
+    """
+    if not number:
+        self.result.is_skipped("VerifyBGPEVPNCount could not run because number was not supplied.")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    peers = command_output["vrfs"]["default"]["peers"]
+    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+    if not non_established_peers and len(peers) == number:
+        self.result.is_success()
+    else:
+        self.result.is_failure()
+        if len(peers) != number:
+            self.result.is_failure(f"Expecting {number} BGP EVPN peers and got {len(peers)}")
+        if non_established_peers:
+            self.result.is_failure(f"The following EVPN peers are not established: {non_established_peers}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyBGPEVPNState + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all EVPN BGP sessions are established (default VRF).

+
    +
  • self.result = “skipped” if no BGP EVPN peers are returned by the device
  • +
  • self.result = “success” if all EVPN BGP sessions are established.
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
class VerifyBGPEVPNState(AntaTest):
+    """
+    Verifies all EVPN BGP sessions are established (default VRF).
+
+    * self.result = "skipped" if no BGP EVPN peers are returned by the device
+    * self.result = "success" if all EVPN BGP sessions are established.
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPEVPNState"
+    description = "Verifies all EVPN BGP sessions are established (default VRF)."
+    categories = ["routing", "bgp"]
+    commands = [AntaCommand(command="show bgp evpn summary")]
+
+    @check_bgp_family_enable("evpn")
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyBGPEVPNState validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        bgp_vrfs = command_output["vrfs"]
+
+        peers = bgp_vrfs["default"]["peers"]
+        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+        if not non_established_peers:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following EVPN peers are not established: {non_established_peers}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyBGPEVPNState validation

+ +
+ Source code in anta/tests/routing/bgp.py +
163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
@check_bgp_family_enable("evpn")
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyBGPEVPNState validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    bgp_vrfs = command_output["vrfs"]
+
+    peers = bgp_vrfs["default"]["peers"]
+    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+    if not non_established_peers:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following EVPN peers are not established: {non_established_peers}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyBGPIPv4UnicastCount + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all IPv4 unicast BGP sessions are established +and all BGP messages queues for these sessions are empty +and the actual number of BGP IPv4 unicast neighbors is the one we expect.

+
    +
  • self.result = “skipped” if the number or vrf parameter is missing
  • +
  • self.result = “success” if all IPv4 unicast BGP sessions are established + and if all BGP messages queues for these sessions are empty + and if the actual number of BGP IPv4 unicast neighbors is equal to `number.
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
class VerifyBGPIPv4UnicastCount(AntaTest):
+    """
+    Verifies all IPv4 unicast BGP sessions are established
+    and all BGP messages queues for these sessions are empty
+    and the actual number of BGP IPv4 unicast neighbors is the one we expect.
+
+    * self.result = "skipped" if the `number` or `vrf` parameter is missing
+    * self.result = "success" if all IPv4 unicast BGP sessions are established
+                         and if all BGP messages queues for these sessions are empty
+                         and if the actual number of BGP IPv4 unicast neighbors is equal to `number.
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPIPv4UnicastCount"
+    description = (
+        "Verifies all IPv4 unicast BGP sessions are established and all their BGP messages queues are empty and "
+        " the actual number of BGP IPv4 unicast neighbors is the one we expect."
+    )
+    categories = ["routing", "bgp"]
+    template = AntaTemplate(template="show bgp ipv4 unicast summary vrf {vrf}")
+
+    @check_bgp_family_enable("ipv4")
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None) -> None:
+        """
+        Run VerifyBGPIPv4UnicastCount validation
+
+        Args:
+            number: The expected number of BGP IPv4 unicast neighbors.
+            vrf: VRF to verify (template parameter)
+        """
+
+        if not number:
+            self.result.is_skipped("VerifyBGPIPv4UnicastCount could not run because number was not supplied")
+            return
+
+        self.result.is_success()
+
+        for command in self.instance_commands:
+            if command.params and "vrf" in command.params:
+                vrf = command.params["vrf"]
+
+            peers = command.json_output["vrfs"][vrf]["peers"]
+            state_issue = _check_bgp_vrfs(command.json_output["vrfs"])
+
+            if len(peers) != number:
+                self.result.is_failure(f"Expecting {number} BGP peer in vrf {vrf} and got {len(peers)}")
+            if state_issue:
+                self.result.is_failure(f"The following IPv4 peers are not established: {state_issue}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(number: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyBGPIPv4UnicastCount validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The expected number of BGP IPv4 unicast neighbors.

+
+
+ None +
vrf + +
+

VRF to verify (template parameter)

+
+
+ required +
+ +
+ Source code in anta/tests/routing/bgp.py +
 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
@check_bgp_family_enable("ipv4")
+@AntaTest.anta_test
+def test(self, number: Optional[int] = None) -> None:
+    """
+    Run VerifyBGPIPv4UnicastCount validation
+
+    Args:
+        number: The expected number of BGP IPv4 unicast neighbors.
+        vrf: VRF to verify (template parameter)
+    """
+
+    if not number:
+        self.result.is_skipped("VerifyBGPIPv4UnicastCount could not run because number was not supplied")
+        return
+
+    self.result.is_success()
+
+    for command in self.instance_commands:
+        if command.params and "vrf" in command.params:
+            vrf = command.params["vrf"]
+
+        peers = command.json_output["vrfs"][vrf]["peers"]
+        state_issue = _check_bgp_vrfs(command.json_output["vrfs"])
+
+        if len(peers) != number:
+            self.result.is_failure(f"Expecting {number} BGP peer in vrf {vrf} and got {len(peers)}")
+        if state_issue:
+            self.result.is_failure(f"The following IPv4 peers are not established: {state_issue}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyBGPIPv4UnicastState + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all IPv4 unicast BGP sessions are established (for all VRF) +and all BGP messages queues for these sessions are empty (for all VRF).

+
    +
  • self.result = “skipped” if no BGP vrf are returned by the device
  • +
  • self.result = “success” if all IPv4 unicast BGP sessions are established (for all VRF) + and all BGP messages queues for these sessions are empty (for all VRF).
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
class VerifyBGPIPv4UnicastState(AntaTest):
+    """
+    Verifies all IPv4 unicast BGP sessions are established (for all VRF)
+    and all BGP messages queues for these sessions are empty (for all VRF).
+
+    * self.result = "skipped" if no BGP vrf are returned by the device
+    * self.result = "success" if all IPv4 unicast BGP sessions are established (for all VRF)
+                         and all BGP messages queues for these sessions are empty (for all VRF).
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPIPv4UnicastState"
+    description = "Verifies all IPv4 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF)."
+    categories = ["routing", "bgp"]
+    commands = [AntaCommand(command="show bgp ipv4 unicast summary vrf all")]
+
+    @check_bgp_family_enable("ipv4")
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyBGPIPv4UnicastState validation"""
+
+        command_output = self.instance_commands[0].json_output
+        state_issue = _check_bgp_vrfs(command_output["vrfs"])
+
+        if not state_issue:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Some IPv4 Unicast BGP Peer are not up: {state_issue}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyBGPIPv4UnicastState validation

+ +
+ Source code in anta/tests/routing/bgp.py +
53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
@check_bgp_family_enable("ipv4")
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyBGPIPv4UnicastState validation"""
+
+    command_output = self.instance_commands[0].json_output
+    state_issue = _check_bgp_vrfs(command_output["vrfs"])
+
+    if not state_issue:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Some IPv4 Unicast BGP Peer are not up: {state_issue}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyBGPIPv6UnicastState + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all IPv6 unicast BGP sessions are established (for all VRF) +and all BGP messages queues for these sessions are empty (for all VRF).

+
    +
  • self.result = “skipped” if no BGP vrf are returned by the device
  • +
  • self.result = “success” if all IPv6 unicast BGP sessions are established (for all VRF) + and all BGP messages queues for these sessions are empty (for all VRF).
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
class VerifyBGPIPv6UnicastState(AntaTest):
+    """
+    Verifies all IPv6 unicast BGP sessions are established (for all VRF)
+    and all BGP messages queues for these sessions are empty (for all VRF).
+
+    * self.result = "skipped" if no BGP vrf are returned by the device
+    * self.result = "success" if all IPv6 unicast BGP sessions are established (for all VRF)
+                         and all BGP messages queues for these sessions are empty (for all VRF).
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPIPv6UnicastState"
+    description = "Verifies all IPv6 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF)."
+    categories = ["routing", "bgp"]
+    commands = [AntaCommand(command="show bgp ipv6 unicast summary vrf all")]
+
+    @check_bgp_family_enable("ipv6")
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyBGPIPv6UnicastState validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        state_issue = _check_bgp_vrfs(command_output["vrfs"])
+
+        if not state_issue:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Some IPv4 Unicast BGP Peer are not up: {state_issue}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyBGPIPv6UnicastState validation

+ +
+ Source code in anta/tests/routing/bgp.py +
134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
@check_bgp_family_enable("ipv6")
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyBGPIPv6UnicastState validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    state_issue = _check_bgp_vrfs(command_output["vrfs"])
+
+    if not state_issue:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Some IPv4 Unicast BGP Peer are not up: {state_issue}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyBGPRTCCount + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all RTC BGP sessions are established (default VRF) +and the actual number of BGP RTC neighbors is the one we expect (default VRF).

+
    +
  • self.result = “skipped” if the number parameter is missing
  • +
  • self.result = “success” if all RTC BGP sessions are Established and if the actual + number of BGP RTC neighbors is the one we expect.
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
class VerifyBGPRTCCount(AntaTest):
+    """
+    Verifies all RTC BGP sessions are established (default VRF)
+    and the actual number of BGP RTC neighbors is the one we expect (default VRF).
+
+    * self.result = "skipped" if the `number` parameter is missing
+    * self.result = "success" if all RTC BGP sessions are Established and if the actual
+                         number of BGP RTC neighbors is the one we expect.
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPRTCCount"
+    description = "Verifies all RTC BGP sessions are established (default VRF) and the actual number of BGP RTC neighbors is the one we expect (default VRF)."
+    categories = ["routing", "bgp"]
+    commands = [AntaCommand(command="show bgp rt-membership summary")]
+
+    @check_bgp_family_enable("rtc")
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None) -> None:
+        """
+        Run VerifyBGPRTCCount validation
+
+        Args:
+            number: The expected number of BGP RTC neighbors (default VRF).
+        """
+        if not number:
+            self.result.is_skipped("VerifyBGPRTCCount could not run because number was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        peers = command_output["vrfs"]["default"]["peers"]
+        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+        if not non_established_peers and len(peers) == number:
+            self.result.is_success()
+        else:
+            self.result.is_failure()
+            if len(peers) != number:
+                self.result.is_failure(f"Expecting {number} BGP RTC peers and got {len(peers)}")
+            if non_established_peers:
+                self.result.is_failure(f"The following RTC peers are not established: {non_established_peers}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(number: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyBGPRTCCount validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The expected number of BGP RTC neighbors (default VRF).

+
+
+ None +
+ +
+ Source code in anta/tests/routing/bgp.py +
273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
@check_bgp_family_enable("rtc")
+@AntaTest.anta_test
+def test(self, number: Optional[int] = None) -> None:
+    """
+    Run VerifyBGPRTCCount validation
+
+    Args:
+        number: The expected number of BGP RTC neighbors (default VRF).
+    """
+    if not number:
+        self.result.is_skipped("VerifyBGPRTCCount could not run because number was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    peers = command_output["vrfs"]["default"]["peers"]
+    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+    if not non_established_peers and len(peers) == number:
+        self.result.is_success()
+    else:
+        self.result.is_failure()
+        if len(peers) != number:
+            self.result.is_failure(f"Expecting {number} BGP RTC peers and got {len(peers)}")
+        if non_established_peers:
+            self.result.is_failure(f"The following RTC peers are not established: {non_established_peers}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyBGPRTCState + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all RTC BGP sessions are established (default VRF).

+
    +
  • self.result = “skipped” if no BGP RTC peers are returned by the device
  • +
  • self.result = “success” if all RTC BGP sessions are established.
  • +
  • self.result = “failure” otherwise.
  • +
+ +
+ Source code in anta/tests/routing/bgp.py +
225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
class VerifyBGPRTCState(AntaTest):
+    """
+    Verifies all RTC BGP sessions are established (default VRF).
+
+    * self.result = "skipped" if no BGP RTC peers are returned by the device
+    * self.result = "success" if all RTC BGP sessions are established.
+    * self.result = "failure" otherwise.
+    """
+
+    name = "VerifyBGPRTCState"
+    description = "Verifies all RTC BGP sessions are established (default VRF)."
+    categories = ["routing", "bgp"]
+    commands = [AntaCommand(command="show bgp rt-membership summary")]
+
+    @check_bgp_family_enable("rtc")
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyBGPRTCState validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        bgp_vrfs = command_output["vrfs"]
+
+        peers = bgp_vrfs["default"]["peers"]
+        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+        if not non_established_peers:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"The following RTC peers are not established: {non_established_peers}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyBGPRTCState validation

+ +
+ Source code in anta/tests/routing/bgp.py +
239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
@check_bgp_family_enable("rtc")
+@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyBGPRTCState validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    bgp_vrfs = command_output["vrfs"]
+
+    peers = bgp_vrfs["default"]["peers"]
+    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict["peerState"] != "Established"]
+
+    if not non_established_peers:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"The following RTC peers are not established: {non_established_peers}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.routing.generic/index.html b/0.6.0/api/tests.routing.generic/index.html new file mode 100644 index 000000000..04322e994 --- /dev/null +++ b/0.6.0/api/tests.routing.generic/index.html @@ -0,0 +1,2178 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Generic - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Generic

+ +

ANTA catalog for routing-generic tests

+ + +
+ + + +
+ +

Generic routing test functions

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyBFD + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors).

+ +
+ Source code in anta/tests/routing/generic.py +
 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
class VerifyBFD(AntaTest):
+    """
+    Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors).
+    """
+
+    name = "VerifyBFD"
+    description = "Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors)."
+    categories = ["routing", "generic"]
+    # revision 1 as later revision introduce additional nesting for type
+    commands = [AntaCommand(command="show bfd peers", revision=1)]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyBFD validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        self.result.is_success()
+
+        for _, vrf_data in command_output["vrfs"].items():
+            for _, neighbor_data in vrf_data["ipv4Neighbors"].items():
+                for peer, peer_data in neighbor_data["peerStats"].items():
+                    if (peer_status := peer_data["status"]) != "up":
+                        failure_message = f"bfd state for peer '{peer}' is {peer_status} (expected up)."
+                        if (peer_l3intf := peer_data.get("l3intf")) is not None and peer_l3intf != "":
+                            failure_message += f" Interface: {peer_l3intf}."
+                        self.result.is_failure(failure_message)
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyBFD validation

+ +
+ Source code in anta/tests/routing/generic.py +
 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyBFD validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    self.result.is_success()
+
+    for _, vrf_data in command_output["vrfs"].items():
+        for _, neighbor_data in vrf_data["ipv4Neighbors"].items():
+            for peer, peer_data in neighbor_data["peerStats"].items():
+                if (peer_status := peer_data["status"]) != "up":
+                    failure_message = f"bfd state for peer '{peer}' is {peer_status} (expected up)."
+                    if (peer_l3intf := peer_data.get("l3intf")) is not None and peer_l3intf != "":
+                        failure_message += f" Interface: {peer_l3intf}."
+                    self.result.is_failure(failure_message)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyRoutingProtocolModel + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the configured routing protocol model is the one we expect. +And if there is no mismatch between the configured and operating routing protocol model.

+
model(str): Expected routing protocol model (multi-agent or ribd). Default is multi-agent
+
+ +
+ Source code in anta/tests/routing/generic.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class VerifyRoutingProtocolModel(AntaTest):
+    """
+    Verifies the configured routing protocol model is the one we expect.
+    And if there is no mismatch between the configured and operating routing protocol model.
+
+        model(str): Expected routing protocol model (multi-agent or ribd). Default is multi-agent
+    """
+
+    name = "VerifyRoutingProtocolModel"
+    description = (
+        "Verifies the configured routing protocol model is the expected one and if there is no mismatch between the configured and operating routing protocol model."
+    )
+    categories = ["routing", "generic"]
+    # "revision": 3
+    commands = [AntaCommand(command="show ip route summary")]
+
+    @AntaTest.anta_test
+    def test(self, model: Optional[str] = "multi-agent") -> None:
+        """Run VerifyRoutingProtocolModel validation"""
+
+        if not model:
+            self.result.is_skipped("VerifyRoutingProtocolModel was not run as no model was given")
+            return
+        command_output = self.instance_commands[0].json_output
+
+        configured_model = command_output["protoModelStatus"]["configuredProtoModel"]
+        operating_model = command_output["protoModelStatus"]["operatingProtoModel"]
+        if configured_model == operating_model == model:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"routing model is misconfigured: configured: {configured_model} - operating: {operating_model} - expected: {model}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(model: Optional[str] = 'multi-agent') -> None
+
+ +
+ +

Run VerifyRoutingProtocolModel validation

+ +
+ Source code in anta/tests/routing/generic.py +
26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
@AntaTest.anta_test
+def test(self, model: Optional[str] = "multi-agent") -> None:
+    """Run VerifyRoutingProtocolModel validation"""
+
+    if not model:
+        self.result.is_skipped("VerifyRoutingProtocolModel was not run as no model was given")
+        return
+    command_output = self.instance_commands[0].json_output
+
+    configured_model = command_output["protoModelStatus"]["configuredProtoModel"]
+    operating_model = command_output["protoModelStatus"]["operatingProtoModel"]
+    if configured_model == operating_model == model:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"routing model is misconfigured: configured: {configured_model} - operating: {operating_model} - expected: {model}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyRoutingTableSize + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the size of the IP routing table (default VRF). +Should be between the two provided thresholds.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
minimum(int) + +
+

Expected minimum routing table (default VRF) size.

+
+
+ required +
maximum(int) + +
+

Expected maximum routing table (default VRF) size.

+
+
+ required +
+ +
+ Source code in anta/tests/routing/generic.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
class VerifyRoutingTableSize(AntaTest):
+    """
+    Verifies the size of the IP routing table (default VRF).
+    Should be between the two provided thresholds.
+
+    Args:
+        minimum(int): Expected minimum routing table (default VRF) size.
+        maximum(int): Expected maximum routing table (default VRF) size.
+    """
+
+    name = "VerifyRoutingTableSize"
+    description = "Verifies the size of the IP routing table (default VRF). Should be between the two provided thresholds."
+    categories = ["routing", "generic"]
+    # "revision": 3
+    commands = [AntaCommand(command="show ip route summary")]
+
+    @AntaTest.anta_test
+    def test(self, minimum: Optional[int] = None, maximum: Optional[int] = None) -> None:
+        """Run VerifyRoutingTableSize validation"""
+
+        if not minimum or not maximum:
+            self.result.is_skipped(f"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} was not provided")
+            return
+        if not isinstance(minimum, int) or not isinstance(maximum, int):
+            self.result.is_error(f"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} is not a valid value (integer)")
+            return
+        if maximum < minimum:
+            self.result.is_error(f"VerifyRoutingTableSize was not run as minimum {minimum} is greate than maximum {maximum}.")
+            return
+
+        command_output = self.instance_commands[0].json_output
+        total_routes = int(command_output["vrfs"]["default"]["totalRoutes"])
+        if minimum <= total_routes <= maximum:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"routing-table has {total_routes} routes and not between min ({minimum}) and maximum ({maximum})")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    minimum: Optional[int] = None,
+    maximum: Optional[int] = None,
+) -> None
+
+ +
+ +

Run VerifyRoutingTableSize validation

+ +
+ Source code in anta/tests/routing/generic.py +
59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
@AntaTest.anta_test
+def test(self, minimum: Optional[int] = None, maximum: Optional[int] = None) -> None:
+    """Run VerifyRoutingTableSize validation"""
+
+    if not minimum or not maximum:
+        self.result.is_skipped(f"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} was not provided")
+        return
+    if not isinstance(minimum, int) or not isinstance(maximum, int):
+        self.result.is_error(f"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} is not a valid value (integer)")
+        return
+    if maximum < minimum:
+        self.result.is_error(f"VerifyRoutingTableSize was not run as minimum {minimum} is greate than maximum {maximum}.")
+        return
+
+    command_output = self.instance_commands[0].json_output
+    total_routes = int(command_output["vrfs"]["default"]["totalRoutes"])
+    if minimum <= total_routes <= maximum:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"routing-table has {total_routes} routes and not between min ({minimum}) and maximum ({maximum})")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.routing.ospf/index.html b/0.6.0/api/tests.routing.ospf/index.html new file mode 100644 index 000000000..862c6fff9 --- /dev/null +++ b/0.6.0/api/tests.routing.ospf/index.html @@ -0,0 +1,1957 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + OSPF - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

OSPF

+ +

ANTA catalog for routing-ospf tests

+ + +
+ + + +
+ +

OSPF test functions

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyOSPFNeighborCount + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the number of OSPF neighbors in FULL state is the one we expect.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + int + +
+

The expected number of OSPF neighbors in FULL state.

+
+
+ required +
+ +
+ Source code in anta/tests/routing/ospf.py +
 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
class VerifyOSPFNeighborCount(AntaTest):
+    """
+    Verifies the number of OSPF neighbors in FULL state is the one we expect.
+
+    Args:
+        number (int): The expected number of OSPF neighbors in FULL state.
+    """
+
+    name = "VerifyOSPFNeighborCount"
+    description = "Verifies the number of OSPF neighbors in FULL state is the one we expect."
+    categories = ["routing", "ospf"]
+    commands = [AntaCommand(command="show ip ospf neighbor")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None) -> None:
+        """Run VerifyOSPFNeighborCount validation"""
+        if not (isinstance(number, int) and number >= 0):
+            self.result.is_skipped(f"VerifyOSPFNeighborCount was not run as the number given '{number}' is not a valid value.")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        if (neighbor_count := _count_ospf_neighbor(command_output)) == 0:
+            self.result.is_skipped("no OSPF neighbor found")
+            return
+
+        self.result.is_success()
+
+        if neighbor_count != number:
+            self.result.is_failure(f"device has {neighbor_count} neighbors (expected {number})")
+
+        not_full_neighbors = _get_not_full_ospf_neighbors(command_output)
+        print(not_full_neighbors)
+        if not_full_neighbors:
+            self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(number: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyOSPFNeighborCount validation

+ +
+ Source code in anta/tests/routing/ospf.py +
 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
@AntaTest.anta_test
+def test(self, number: Optional[int] = None) -> None:
+    """Run VerifyOSPFNeighborCount validation"""
+    if not (isinstance(number, int) and number >= 0):
+        self.result.is_skipped(f"VerifyOSPFNeighborCount was not run as the number given '{number}' is not a valid value.")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    if (neighbor_count := _count_ospf_neighbor(command_output)) == 0:
+        self.result.is_skipped("no OSPF neighbor found")
+        return
+
+    self.result.is_success()
+
+    if neighbor_count != number:
+        self.result.is_failure(f"device has {neighbor_count} neighbors (expected {number})")
+
+    not_full_neighbors = _get_not_full_ospf_neighbors(command_output)
+    print(not_full_neighbors)
+    if not_full_neighbors:
+        self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyOSPFNeighborState + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all OSPF neighbors are in FULL state.

+ +
+ Source code in anta/tests/routing/ospf.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
class VerifyOSPFNeighborState(AntaTest):
+    """
+    Verifies all OSPF neighbors are in FULL state.
+    """
+
+    name = "VerifyOSPFNeighborState"
+    description = "Verifies all OSPF neighbors are in FULL state."
+    categories = ["routing", "ospf"]
+    commands = [AntaCommand(command="show ip ospf neighbor")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyOSPFNeighborState validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        if _count_ospf_neighbor(command_output) == 0:
+            self.result.is_skipped("no OSPF neighbor found")
+            return
+
+        self.result.is_success()
+
+        not_full_neighbors = _get_not_full_ospf_neighbors(command_output)
+        if not_full_neighbors:
+            self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyOSPFNeighborState validation

+ +
+ Source code in anta/tests/routing/ospf.py +
53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyOSPFNeighborState validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    if _count_ospf_neighbor(command_output) == 0:
+        self.result.is_skipped("no OSPF neighbor found")
+        return
+
+    self.result.is_success()
+
+    not_full_neighbors = _get_not_full_ospf_neighbors(command_output)
+    if not_full_neighbors:
+        self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.security/index.html b/0.6.0/api/tests.security/index.html new file mode 100644 index 000000000..f9b6c700c --- /dev/null +++ b/0.6.0/api/tests.security/index.html @@ -0,0 +1,3616 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Security - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Security

+ +

ANTA catalog for security tests

+ + +
+ + + +
+ +

Test functions related to the EOS various security settings

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyAPIHttpStatus + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if eAPI HTTP server is disabled globally.

+ +
+ Expected Results +
    +
  • success: The test will pass if eAPI HTTP server is disabled globally.
  • +
  • failure: The test will fail if eAPI HTTP server is NOT disabled globally.
  • +
+
+
+ Source code in anta/tests/security.py +
166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
class VerifyAPIHttpStatus(AntaTest):
+    """
+    Verifies if eAPI HTTP server is disabled globally.
+
+    Expected Results:
+        * success: The test will pass if eAPI HTTP server is disabled globally.
+        * failure: The test will fail if eAPI HTTP server is NOT disabled globally.
+    """
+
+    name = "VerifyAPIHttpStatus"
+    description = "Verifies if eAPI HTTP server is disabled globally."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management api http-commands")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyAPIHTTPStatus validation.
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["enabled"] and not command_output["httpServer"]["running"]:
+            self.result.is_success()
+        else:
+            self.result.is_failure("eAPI HTTP server is enabled globally")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyAPIHTTPStatus validation.

+ +
+ Source code in anta/tests/security.py +
180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyAPIHTTPStatus validation.
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["enabled"] and not command_output["httpServer"]["running"]:
+        self.result.is_success()
+    else:
+        self.result.is_failure("eAPI HTTP server is enabled globally")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyAPIHttpsSSL + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if eAPI HTTPS server SSL profile is configured and valid.

+ +
+ Expected results +
    +
  • success: The test will pass if the eAPI HTTPS server SSL profile is configured and valid.
  • +
  • failure: The test will fail if the eAPI HTTPS server SSL profile is NOT configured, misconfigured or invalid.
  • +
  • skipped: The test will be skipped if the SSL profile is not provided.
  • +
+
+
+ Source code in anta/tests/security.py +
194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
class VerifyAPIHttpsSSL(AntaTest):
+    """
+    Verifies if eAPI HTTPS server SSL profile is configured and valid.
+
+    Expected results:
+        * success: The test will pass if the eAPI HTTPS server SSL profile is configured and valid.
+        * failure: The test will fail if the eAPI HTTPS server SSL profile is NOT configured, misconfigured or invalid.
+        * skipped: The test will be skipped if the SSL profile is not provided.
+    """
+
+    name = "VerifyAPIHttpsSSL"
+    description = "Verifies if eAPI HTTPS server SSL profile is configured and valid."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management api http-commands")]
+
+    @AntaTest.anta_test
+    def test(self, profile: Optional[str] = None) -> None:
+        """
+        Run VerifyAPIHttpsSSL validation.
+
+        Args:
+            profile: SSL profile to verify.
+        """
+        if not profile:
+            self.result.is_skipped(f"{self.__class__.name} did not run because profile was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        try:
+            if command_output["sslProfile"]["name"] == profile and command_output["sslProfile"]["state"] == "valid":
+                self.result.is_success()
+            else:
+                self.result.is_failure(f"eAPI HTTPS server SSL profile ({profile}) is misconfigured or invalid")
+
+        except KeyError:
+            self.result.is_failure(f"eAPI HTTPS server SSL profile ({profile}) is not configured")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(profile: Optional[str] = None) -> None
+
+ +
+ +

Run VerifyAPIHttpsSSL validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
profile + Optional[str] + +
+

SSL profile to verify.

+
+
+ None +
+ +
+ Source code in anta/tests/security.py +
209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
@AntaTest.anta_test
+def test(self, profile: Optional[str] = None) -> None:
+    """
+    Run VerifyAPIHttpsSSL validation.
+
+    Args:
+        profile: SSL profile to verify.
+    """
+    if not profile:
+        self.result.is_skipped(f"{self.__class__.name} did not run because profile was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    try:
+        if command_output["sslProfile"]["name"] == profile and command_output["sslProfile"]["state"] == "valid":
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"eAPI HTTPS server SSL profile ({profile}) is misconfigured or invalid")
+
+    except KeyError:
+        self.result.is_failure(f"eAPI HTTPS server SSL profile ({profile}) is not configured")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyAPIIPv4Acl + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF.

+ +
+ Expected results +
    +
  • success: The test will pass if eAPI has the provided number of IPv4 ACL(s) in the specified VRF.
  • +
  • failure: The test will fail if eAPI has not the right number of IPv4 ACL(s) in the specified VRF.
  • +
  • skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/security.py +
233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
class VerifyAPIIPv4Acl(AntaTest):
+    """
+    Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF.
+
+    Expected results:
+        * success: The test will pass if eAPI has the provided number of IPv4 ACL(s) in the specified VRF.
+        * failure: The test will fail if eAPI has not the right number of IPv4 ACL(s) in the specified VRF.
+        * skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
+    """
+
+    name = "VerifyAPIIPv4Acl"
+    description = "Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management api http-commands ip access-list summary")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+        """
+        Run VerifyAPIIPv4Acl validation.
+
+        Args:
+            number: The number of expected IPv4 ACL(s).
+            vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.
+        """
+        if not number or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        ipv4_acl_list = command_output["ipAclList"]["aclList"]
+        ipv4_acl_number = len(ipv4_acl_list)
+        not_configured_acl_list = []
+
+        if ipv4_acl_number != number:
+            self.result.is_failure(f"Expected {number} eAPI IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}")
+            return
+
+        for ipv4_acl in ipv4_acl_list:
+            if vrf not in ipv4_acl["configuredVrfs"] or vrf not in ipv4_acl["activeVrfs"]:
+                not_configured_acl_list.append(ipv4_acl["name"])
+
+        if not_configured_acl_list:
+            self.result.is_failure(f"eAPI IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    number: Optional[int] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifyAPIIPv4Acl validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The number of expected IPv4 ACL(s).

+
+
+ None +
vrf + str + +
+

The name of the VRF in which to check for eAPI. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/security.py +
248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
@AntaTest.anta_test
+def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+    """
+    Run VerifyAPIIPv4Acl validation.
+
+    Args:
+        number: The number of expected IPv4 ACL(s).
+        vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.
+    """
+    if not number or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    ipv4_acl_list = command_output["ipAclList"]["aclList"]
+    ipv4_acl_number = len(ipv4_acl_list)
+    not_configured_acl_list = []
+
+    if ipv4_acl_number != number:
+        self.result.is_failure(f"Expected {number} eAPI IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}")
+        return
+
+    for ipv4_acl in ipv4_acl_list:
+        if vrf not in ipv4_acl["configuredVrfs"] or vrf not in ipv4_acl["activeVrfs"]:
+            not_configured_acl_list.append(ipv4_acl["name"])
+
+    if not_configured_acl_list:
+        self.result.is_failure(f"eAPI IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyAPIIPv6Acl + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF.

+ +
+ Expected results +
    +
  • success: The test will pass if eAPI has the provided number of IPv6 ACL(s) in the specified VRF.
  • +
  • failure: The test will fail if eAPI has not the right number of IPv6 ACL(s) in the specified VRF.
  • +
  • skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/security.py +
281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
class VerifyAPIIPv6Acl(AntaTest):
+    """
+    Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF.
+
+    Expected results:
+        * success: The test will pass if eAPI has the provided number of IPv6 ACL(s) in the specified VRF.
+        * failure: The test will fail if eAPI has not the right number of IPv6 ACL(s) in the specified VRF.
+        * skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
+    """
+
+    name = "VerifyAPIIPv6Acl"
+    description = "Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management api http-commands ipv6 access-list summary")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+        """
+        Run VerifyAPIIPv6Acl validation.
+
+        Args:
+            number: The number of expected IPv6 ACL(s).
+            vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.
+        """
+        if not number or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
+        ipv6_acl_number = len(ipv6_acl_list)
+        not_configured_acl_list = []
+
+        if ipv6_acl_number != number:
+            self.result.is_failure(f"Expected {number} eAPI IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}")
+            return
+
+        for ipv6_acl in ipv6_acl_list:
+            if vrf not in ipv6_acl["configuredVrfs"] or vrf not in ipv6_acl["activeVrfs"]:
+                not_configured_acl_list.append(ipv6_acl["name"])
+
+        if not_configured_acl_list:
+            self.result.is_failure(f"eAPI IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    number: Optional[int] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifyAPIIPv6Acl validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The number of expected IPv6 ACL(s).

+
+
+ None +
vrf + str + +
+

The name of the VRF in which to check for eAPI. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/security.py +
296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
@AntaTest.anta_test
+def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+    """
+    Run VerifyAPIIPv6Acl validation.
+
+    Args:
+        number: The number of expected IPv6 ACL(s).
+        vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.
+    """
+    if not number or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
+    ipv6_acl_number = len(ipv6_acl_list)
+    not_configured_acl_list = []
+
+    if ipv6_acl_number != number:
+        self.result.is_failure(f"Expected {number} eAPI IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}")
+        return
+
+    for ipv6_acl in ipv6_acl_list:
+        if vrf not in ipv6_acl["configuredVrfs"] or vrf not in ipv6_acl["activeVrfs"]:
+            not_configured_acl_list.append(ipv6_acl["name"])
+
+    if not_configured_acl_list:
+        self.result.is_failure(f"eAPI IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySSHIPv4Acl + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if the SSHD agent has the right number IPv4 ACL(s) configured for a specified VRF.

+ +
+ Expected results +
    +
  • success: The test will pass if the SSHD agent has the provided number of IPv4 ACL(s) in the specified VRF.
  • +
  • failure: The test will fail if the SSHD agent has not the right number of IPv4 ACL(s) in the specified VRF.
  • +
  • skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/security.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
class VerifySSHIPv4Acl(AntaTest):
+    """
+    Verifies if the SSHD agent has the right number IPv4 ACL(s) configured for a specified VRF.
+
+    Expected results:
+        * success: The test will pass if the SSHD agent has the provided number of IPv4 ACL(s) in the specified VRF.
+        * failure: The test will fail if the SSHD agent has not the right number of IPv4 ACL(s) in the specified VRF.
+        * skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
+    """
+
+    name = "VerifySSHIPv4Acl"
+    description = "Verifies if the SSHD agent has IPv4 ACL(s) configured."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management ssh ip access-list summary")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+        """
+        Run VerifySSHIPv4Acl validation.
+
+        Args:
+            number: The number of expected IPv4 ACL(s).
+            vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.
+        """
+        if not number or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        ipv4_acl_list = command_output["ipAclList"]["aclList"]
+        ipv4_acl_number = len(ipv4_acl_list)
+        not_configured_acl_list = []
+
+        if ipv4_acl_number != number:
+            self.result.is_failure(f"Expected {number} SSH IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}")
+            return
+
+        for ipv4_acl in ipv4_acl_list:
+            if vrf not in ipv4_acl["configuredVrfs"] or vrf not in ipv4_acl["activeVrfs"]:
+                not_configured_acl_list.append(ipv4_acl["name"])
+
+        if not_configured_acl_list:
+            self.result.is_failure(f"SSH IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    number: Optional[int] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifySSHIPv4Acl validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The number of expected IPv4 ACL(s).

+
+
+ None +
vrf + str + +
+

The name of the VRF in which to check for the SSHD agent. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/security.py +
57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
@AntaTest.anta_test
+def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+    """
+    Run VerifySSHIPv4Acl validation.
+
+    Args:
+        number: The number of expected IPv4 ACL(s).
+        vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.
+    """
+    if not number or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    ipv4_acl_list = command_output["ipAclList"]["aclList"]
+    ipv4_acl_number = len(ipv4_acl_list)
+    not_configured_acl_list = []
+
+    if ipv4_acl_number != number:
+        self.result.is_failure(f"Expected {number} SSH IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}")
+        return
+
+    for ipv4_acl in ipv4_acl_list:
+        if vrf not in ipv4_acl["configuredVrfs"] or vrf not in ipv4_acl["activeVrfs"]:
+            not_configured_acl_list.append(ipv4_acl["name"])
+
+    if not_configured_acl_list:
+        self.result.is_failure(f"SSH IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySSHIPv6Acl + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if the SSHD agent has the right number IPv6 ACL(s) configured for a specified VRF.

+ +
+ Expected results +
    +
  • success: The test will pass if the SSHD agent has the provided number of IPv6 ACL(s) in the specified VRF.
  • +
  • failure: The test will fail if the SSHD agent has not the right number of IPv6 ACL(s) in the specified VRF.
  • +
  • skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/security.py +
 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
class VerifySSHIPv6Acl(AntaTest):
+    """
+    Verifies if the SSHD agent has the right number IPv6 ACL(s) configured for a specified VRF.
+
+    Expected results:
+        * success: The test will pass if the SSHD agent has the provided number of IPv6 ACL(s) in the specified VRF.
+        * failure: The test will fail if the SSHD agent has not the right number of IPv6 ACL(s) in the specified VRF.
+        * skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
+    """
+
+    name = "VerifySSHIPv6Acl"
+    description = "Verifies if the SSHD agent has IPv6 ACL(s) configured."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management ssh ipv6 access-list summary")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+        """
+        Run VerifySSHIPv6Acl validation.
+
+        Args:
+            number: The number of expected IPv6 ACL(s).
+            vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.
+        """
+        if not number or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
+        ipv6_acl_number = len(ipv6_acl_list)
+        not_configured_acl_list = []
+
+        if ipv6_acl_number != number:
+            self.result.is_failure(f"Expected {number} SSH IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}")
+            return
+
+        for ipv6_acl in ipv6_acl_list:
+            if vrf not in ipv6_acl["configuredVrfs"] or vrf not in ipv6_acl["activeVrfs"]:
+                not_configured_acl_list.append(ipv6_acl["name"])
+
+        if not_configured_acl_list:
+            self.result.is_failure(f"SSH IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    number: Optional[int] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifySSHIPv6Acl validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The number of expected IPv6 ACL(s).

+
+
+ None +
vrf + str + +
+

The name of the VRF in which to check for the SSHD agent. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/security.py +
105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
@AntaTest.anta_test
+def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+    """
+    Run VerifySSHIPv6Acl validation.
+
+    Args:
+        number: The number of expected IPv6 ACL(s).
+        vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.
+    """
+    if not number or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
+    ipv6_acl_number = len(ipv6_acl_list)
+    not_configured_acl_list = []
+
+    if ipv6_acl_number != number:
+        self.result.is_failure(f"Expected {number} SSH IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}")
+        return
+
+    for ipv6_acl in ipv6_acl_list:
+        if vrf not in ipv6_acl["configuredVrfs"] or vrf not in ipv6_acl["activeVrfs"]:
+            not_configured_acl_list.append(ipv6_acl["name"])
+
+    if not_configured_acl_list:
+        self.result.is_failure(f"SSH IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySSHStatus + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if the SSHD agent is disabled in the default VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if the SSHD agent is disabled in the default VRF.
  • +
  • failure: The test will fail if the SSHD agent is NOT disabled in the default VRF.
  • +
+
+
+ Source code in anta/tests/security.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
class VerifySSHStatus(AntaTest):
+    """
+    Verifies if the SSHD agent is disabled in the default VRF.
+
+    Expected Results:
+        * success: The test will pass if the SSHD agent is disabled in the default VRF.
+        * failure: The test will fail if the SSHD agent is NOT disabled in the default VRF.
+    """
+
+    name = "VerifySSHStatus"
+    description = "Verifies if the SSHD agent is disabled in the default VRF."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management ssh", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifySSHStatus validation.
+        """
+
+        command_output = self.instance_commands[0].text_output
+
+        line = [line for line in command_output.split("\n") if line.startswith("SSHD status")][0]
+        status = line.split("is ")[1]
+
+        if status == "disabled":
+            self.result.is_success()
+        else:
+            self.result.is_failure(line)
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifySSHStatus validation.

+ +
+ Source code in anta/tests/security.py +
25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifySSHStatus validation.
+    """
+
+    command_output = self.instance_commands[0].text_output
+
+    line = [line for line in command_output.split("\n") if line.startswith("SSHD status")][0]
+    status = line.split("is ")[1]
+
+    if status == "disabled":
+        self.result.is_success()
+    else:
+        self.result.is_failure(line)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTelnetStatus + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if Telnet is disabled in the default VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if Telnet is disabled in the default VRF.
  • +
  • failure: The test will fail if Telnet is NOT disabled in the default VRF.
  • +
+
+
+ Source code in anta/tests/security.py +
138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
class VerifyTelnetStatus(AntaTest):
+    """
+    Verifies if Telnet is disabled in the default VRF.
+
+    Expected Results:
+        * success: The test will pass if Telnet is disabled in the default VRF.
+        * failure: The test will fail if Telnet is NOT disabled in the default VRF.
+    """
+
+    name = "VerifyTelnetStatus"
+    description = "Verifies if Telnet is disabled in the default VRF."
+    categories = ["security"]
+    commands = [AntaCommand(command="show management telnet")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyTelnetStatus validation.
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["serverState"] == "disabled":
+            self.result.is_success()
+        else:
+            self.result.is_failure("Telnet status for Default VRF is enabled")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyTelnetStatus validation.

+ +
+ Source code in anta/tests/security.py +
152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyTelnetStatus validation.
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["serverState"] == "disabled":
+        self.result.is_success()
+    else:
+        self.result.is_failure("Telnet status for Default VRF is enabled")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.snmp/index.html b/0.6.0/api/tests.snmp/index.html new file mode 100644 index 000000000..6a6ecb5d4 --- /dev/null +++ b/0.6.0/api/tests.snmp/index.html @@ -0,0 +1,2389 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + SNMP - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

SNMP

+ +

ANTA catalog for SNMP tests

+ + +
+ + + +
+ +

Test functions related to the EOS various SNMP settings

+ + + +
+ + + + + + + + +
+ + + +

+ VerifySnmpIPv4Acl + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if the SNMP agent has the right number IPv4 ACL(s) configured for a specified VRF.

+ +
+ Expected results +
    +
  • success: The test will pass if the SNMP agent has the provided number of IPv4 ACL(s) in the specified VRF.
  • +
  • failure: The test will fail if the SNMP agent has not the right number of IPv4 ACL(s) in the specified VRF.
  • +
  • skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/snmp.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
class VerifySnmpIPv4Acl(AntaTest):
+    """
+    Verifies if the SNMP agent has the right number IPv4 ACL(s) configured for a specified VRF.
+
+    Expected results:
+        * success: The test will pass if the SNMP agent has the provided number of IPv4 ACL(s) in the specified VRF.
+        * failure: The test will fail if the SNMP agent has not the right number of IPv4 ACL(s) in the specified VRF.
+        * skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
+    """
+
+    name = "VerifySnmpIPv4Acl"
+    description = "Verifies if the SNMP agent has IPv4 ACL(s) configured."
+    categories = ["snmp"]
+    commands = [AntaCommand(command="show snmp ipv4 access-list summary")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+        """
+        Run VerifySnmpIPv4Acl validation.
+
+        Args:
+            number: The number of expected IPv4 ACL(s).
+            vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.
+        """
+        if not number or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        ipv4_acl_list = command_output["ipAclList"]["aclList"]
+        ipv4_acl_number = len(ipv4_acl_list)
+        not_configured_acl_list = []
+
+        if ipv4_acl_number != number:
+            self.result.is_failure(f"Expected {number} SNMP IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}")
+            return
+
+        for ipv4_acl in ipv4_acl_list:
+            if vrf not in ipv4_acl["configuredVrfs"] or vrf not in ipv4_acl["activeVrfs"]:
+                not_configured_acl_list.append(ipv4_acl["name"])
+
+        if not_configured_acl_list:
+            self.result.is_failure(f"SNMP IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    number: Optional[int] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifySnmpIPv4Acl validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The number of expected IPv4 ACL(s).

+
+
+ None +
vrf + str + +
+

The name of the VRF in which to check for the SNMP agent. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/snmp.py +
60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
@AntaTest.anta_test
+def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+    """
+    Run VerifySnmpIPv4Acl validation.
+
+    Args:
+        number: The number of expected IPv4 ACL(s).
+        vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.
+    """
+    if not number or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    ipv4_acl_list = command_output["ipAclList"]["aclList"]
+    ipv4_acl_number = len(ipv4_acl_list)
+    not_configured_acl_list = []
+
+    if ipv4_acl_number != number:
+        self.result.is_failure(f"Expected {number} SNMP IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}")
+        return
+
+    for ipv4_acl in ipv4_acl_list:
+        if vrf not in ipv4_acl["configuredVrfs"] or vrf not in ipv4_acl["activeVrfs"]:
+            not_configured_acl_list.append(ipv4_acl["name"])
+
+    if not_configured_acl_list:
+        self.result.is_failure(f"SNMP IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySnmpIPv6Acl + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if the SNMP agent has the right number IPv6 ACL(s) configured for a specified VRF.

+ +
+ Expected results +
    +
  • success: The test will pass if the SNMP agent has the provided number of IPv6 ACL(s) in the specified VRF.
  • +
  • failure: The test will fail if the SNMP agent has not the right number of IPv6 ACL(s) in the specified VRF.
  • +
  • skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/snmp.py +
 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
class VerifySnmpIPv6Acl(AntaTest):
+    """
+    Verifies if the SNMP agent has the right number IPv6 ACL(s) configured for a specified VRF.
+
+    Expected results:
+        * success: The test will pass if the SNMP agent has the provided number of IPv6 ACL(s) in the specified VRF.
+        * failure: The test will fail if the SNMP agent has not the right number of IPv6 ACL(s) in the specified VRF.
+        * skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
+    """
+
+    name = "VerifySnmpIPv6Acl"
+    description = "Verifies if the SNMP agent has IPv6 ACL(s) configured."
+    categories = ["snmp"]
+    commands = [AntaCommand(command="show snmp ipv6 access-list summary")]
+
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+        """
+        Run VerifySnmpIPv6Acl validation.
+
+        Args:
+            number: The number of expected IPv6 ACL(s).
+            vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.
+        """
+        if not number or not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
+        ipv6_acl_number = len(ipv6_acl_list)
+        not_configured_acl_list = []
+
+        if ipv6_acl_number != number:
+            self.result.is_failure(f"Expected {number} SNMP IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}")
+            return
+
+        for ipv6_acl in ipv6_acl_list:
+            if vrf not in ipv6_acl["configuredVrfs"] or vrf not in ipv6_acl["activeVrfs"]:
+                not_configured_acl_list.append(ipv6_acl["name"])
+
+        if not_configured_acl_list:
+            self.result.is_failure(f"SNMP IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    number: Optional[int] = None, vrf: str = "default"
+) -> None
+
+ +
+ +

Run VerifySnmpIPv6Acl validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
number + Optional[int] + +
+

The number of expected IPv6 ACL(s).

+
+
+ None +
vrf + str + +
+

The name of the VRF in which to check for the SNMP agent. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/snmp.py +
108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
@AntaTest.anta_test
+def test(self, number: Optional[int] = None, vrf: str = "default") -> None:
+    """
+    Run VerifySnmpIPv6Acl validation.
+
+    Args:
+        number: The number of expected IPv6 ACL(s).
+        vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.
+    """
+    if not number or not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because number or vrf was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
+    ipv6_acl_number = len(ipv6_acl_list)
+    not_configured_acl_list = []
+
+    if ipv6_acl_number != number:
+        self.result.is_failure(f"Expected {number} SNMP IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}")
+        return
+
+    for ipv6_acl in ipv6_acl_list:
+        if vrf not in ipv6_acl["configuredVrfs"] or vrf not in ipv6_acl["activeVrfs"]:
+            not_configured_acl_list.append(ipv6_acl["name"])
+
+    if not_configured_acl_list:
+        self.result.is_failure(f"SNMP IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySnmpStatus + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies whether the SNMP agent is enabled in a specified VRF.

+ +
+ Expected Results +
    +
  • success: The test will pass if the SNMP agent is enabled in the specified VRF.
  • +
  • failure: The test will fail if the SNMP agent is disabled in the specified VRF.
  • +
  • skipped: The test will be skipped if the VRF parameter is not provided.
  • +
+
+
+ Source code in anta/tests/snmp.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
class VerifySnmpStatus(AntaTest):
+    """
+    Verifies whether the SNMP agent is enabled in a specified VRF.
+
+    Expected Results:
+        * success: The test will pass if the SNMP agent is enabled in the specified VRF.
+        * failure: The test will fail if the SNMP agent is disabled in the specified VRF.
+        * skipped: The test will be skipped if the VRF parameter is not provided.
+    """
+
+    name = "VerifySnmpStatus"
+    description = "Verifies if the SNMP agent is enabled."
+    categories = ["snmp"]
+    commands = [AntaCommand(command="show snmp")]
+
+    @AntaTest.anta_test
+    def test(self, vrf: str = "default") -> None:
+        """
+        Run VerifySnmpStatus validation.
+
+        Args:
+            vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.
+        """
+        if not vrf:
+            self.result.is_skipped(f"{self.__class__.name} did not run because vrf was not supplied")
+        else:
+            command_output = self.instance_commands[0].json_output
+
+            if command_output["enabled"] and vrf in command_output["vrfs"]["snmpVrfs"]:
+                self.result.is_success()
+            else:
+                self.result.is_failure(f"SNMP agent disabled in vrf {vrf}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(vrf: str = 'default') -> None
+
+ +
+ +

Run VerifySnmpStatus validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
vrf + str + +
+

The name of the VRF in which to check for the SNMP agent. Defaults to ‘default’.

+
+
+ 'default' +
+ +
+ Source code in anta/tests/snmp.py +
26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
@AntaTest.anta_test
+def test(self, vrf: str = "default") -> None:
+    """
+    Run VerifySnmpStatus validation.
+
+    Args:
+        vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.
+    """
+    if not vrf:
+        self.result.is_skipped(f"{self.__class__.name} did not run because vrf was not supplied")
+    else:
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["enabled"] and vrf in command_output["vrfs"]["snmpVrfs"]:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"SNMP agent disabled in vrf {vrf}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.software/index.html b/0.6.0/api/tests.software/index.html new file mode 100644 index 000000000..64174f78e --- /dev/null +++ b/0.6.0/api/tests.software/index.html @@ -0,0 +1,2205 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Software - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Software

+ +

ANTA catalog for software tests

+ + +
+ + + +
+ +

Test functions related to the EOS software

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyEOSExtensions + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies all EOS extensions installed on the device are enabled for boot persistence.

+ +
+ Source code in anta/tests/software.py +
 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
class VerifyEOSExtensions(AntaTest):
+    """
+    Verifies all EOS extensions installed on the device are enabled for boot persistence.
+    """
+
+    name = "VerifyEOSExtensions"
+    description = "Verifies all EOS extensions installed on the device are enabled for boot persistence."
+    categories = ["software"]
+    commands = [AntaCommand(command="show extensions"), AntaCommand(command="show boot-extensions")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyEOSExtensions validation"""
+
+        boot_extensions = []
+
+        show_extensions_command_output = self.instance_commands[0].json_output
+        show_boot_extensions_command_output = self.instance_commands[1].json_output
+
+        installed_extensions = [
+            extension for extension, extension_data in show_extensions_command_output["extensions"].items() if extension_data["status"] == "installed"
+        ]
+
+        for extension in show_boot_extensions_command_output["extensions"]:
+            extension = extension.strip("\n")
+            if extension != "":
+                boot_extensions.append(extension)
+
+        installed_extensions.sort()
+        boot_extensions.sort()
+        if installed_extensions == boot_extensions:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyEOSExtensions validation

+ +
+ Source code in anta/tests/software.py +
 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyEOSExtensions validation"""
+
+    boot_extensions = []
+
+    show_extensions_command_output = self.instance_commands[0].json_output
+    show_boot_extensions_command_output = self.instance_commands[1].json_output
+
+    installed_extensions = [
+        extension for extension, extension_data in show_extensions_command_output["extensions"].items() if extension_data["status"] == "installed"
+    ]
+
+    for extension in show_boot_extensions_command_output["extensions"]:
+        extension = extension.strip("\n")
+        if extension != "":
+            boot_extensions.append(extension)
+
+    installed_extensions.sort()
+    boot_extensions.sort()
+    if installed_extensions == boot_extensions:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyEOSVersion + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device is running one of the allowed EOS version.

+ +
+ Source code in anta/tests/software.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
class VerifyEOSVersion(AntaTest):
+    """
+    Verifies the device is running one of the allowed EOS version.
+    """
+
+    name = "VerifyEOSVersion"
+    description = "Verifies the device is running one of the allowed EOS version."
+    categories = ["software"]
+    commands = [AntaCommand(command="show version")]
+
+    @AntaTest.anta_test
+    def test(self, versions: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyEOSVersion validation
+
+        Args:
+            versions: List of allowed EOS versions.
+        """
+        if not versions:
+            self.result.is_skipped("VerifyEOSVersion was not run as no versions were given")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        if command_output["version"] in versions:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f'device is running version {command_output["version"]} not in expected versions: {versions}')
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(versions: Optional[List[str]] = None) -> None
+
+ +
+ +

Run VerifyEOSVersion validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
versions + Optional[List[str]] + +
+

List of allowed EOS versions.

+
+
+ None +
+ +
+ Source code in anta/tests/software.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
@AntaTest.anta_test
+def test(self, versions: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyEOSVersion validation
+
+    Args:
+        versions: List of allowed EOS versions.
+    """
+    if not versions:
+        self.result.is_skipped("VerifyEOSVersion was not run as no versions were given")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    if command_output["version"] in versions:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f'device is running version {command_output["version"]} not in expected versions: {versions}')
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyTerminAttrVersion + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device is running one of the allowed TerminAttr version.

+ +
+ Source code in anta/tests/software.py +
40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
class VerifyTerminAttrVersion(AntaTest):
+    """
+    Verifies the device is running one of the allowed TerminAttr version.
+    """
+
+    name = "VerifyTerminAttrVersion"
+    description = "Verifies the device is running one of the allowed TerminAttr version."
+    categories = ["software"]
+    commands = [AntaCommand(command="show version detail")]
+
+    @AntaTest.anta_test
+    def test(self, versions: Optional[List[str]] = None) -> None:
+        """
+        Run VerifyTerminAttrVersion validation
+
+        Args:
+            versions: List of allowed TerminAttr versions.
+        """
+
+        if not versions:
+            self.result.is_skipped("VerifyTerminAttrVersion was not run as no versions were given")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        command_output_data = command_output["details"]["packages"]["TerminAttr-core"]["version"]
+        if command_output_data in versions:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"device is running TerminAttr version {command_output_data} and is not in the allowed list: {versions}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(versions: Optional[List[str]] = None) -> None
+
+ +
+ +

Run VerifyTerminAttrVersion validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
versions + Optional[List[str]] + +
+

List of allowed TerminAttr versions.

+
+
+ None +
+ +
+ Source code in anta/tests/software.py +
50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
@AntaTest.anta_test
+def test(self, versions: Optional[List[str]] = None) -> None:
+    """
+    Run VerifyTerminAttrVersion validation
+
+    Args:
+        versions: List of allowed TerminAttr versions.
+    """
+
+    if not versions:
+        self.result.is_skipped("VerifyTerminAttrVersion was not run as no versions were given")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    command_output_data = command_output["details"]["packages"]["TerminAttr-core"]["version"]
+    if command_output_data in versions:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"device is running TerminAttr version {command_output_data} and is not in the allowed list: {versions}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.stp/index.html b/0.6.0/api/tests.stp/index.html new file mode 100644 index 000000000..c87cb7f94 --- /dev/null +++ b/0.6.0/api/tests.stp/index.html @@ -0,0 +1,2783 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + STP - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

STP

+ +

ANTA catalog for STP tests

+ + +
+ + + +
+ +

Test functions related to various Spanning Tree Protocol (STP) settings

+ + + +
+ + + + + + + + +
+ + + +

+ VerifySTPBlockedPorts + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no STP blocked ports.

+ +
+ Expected Results +
    +
  • success: The test will pass if there are NO ports blocked by STP.
  • +
  • failure: The test will fail if there are ports blocked by STP.
  • +
+
+
+ Source code in anta/tests/stp.py +
67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
class VerifySTPBlockedPorts(AntaTest):
+    """
+    Verifies there is no STP blocked ports.
+
+    Expected Results:
+        * success: The test will pass if there are NO ports blocked by STP.
+        * failure: The test will fail if there are ports blocked by STP.
+    """
+
+    name = "VerifySTPBlockedPorts"
+    description = "Verifies there is no STP blocked ports."
+    categories = ["stp"]
+    commands = [AntaCommand(command="show spanning-tree blockedports")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifySTPBlockedPorts validation
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if not (stp_instances := command_output["spanningTreeInstances"]):
+            self.result.is_success()
+        else:
+            for key, value in stp_instances.items():
+                stp_instances[key] = value.pop("spanningTreeBlockedPorts")
+            self.result.is_failure(f"The following ports are blocked by STP: {stp_instances}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifySTPBlockedPorts validation

+ +
+ Source code in anta/tests/stp.py +
81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifySTPBlockedPorts validation
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if not (stp_instances := command_output["spanningTreeInstances"]):
+        self.result.is_success()
+    else:
+        for key, value in stp_instances.items():
+            stp_instances[key] = value.pop("spanningTreeBlockedPorts")
+        self.result.is_failure(f"The following ports are blocked by STP: {stp_instances}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySTPCounters + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no errors in STP BPDU packets.

+ +
+ Expected Results +
    +
  • success: The test will pass if there are NO STP BPDU packet errors under all interfaces participating in STP.
  • +
  • failure: The test will fail if there are STP BPDU packet errors on one or many interface(s).
  • +
+
+
+ Source code in anta/tests/stp.py +
 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
class VerifySTPCounters(AntaTest):
+    """
+    Verifies there is no errors in STP BPDU packets.
+
+    Expected Results:
+        * success: The test will pass if there are NO STP BPDU packet errors under all interfaces participating in STP.
+        * failure: The test will fail if there are STP BPDU packet errors on one or many interface(s).
+    """
+
+    name = "VerifySTPCounters"
+    description = "Verifies there is no errors in STP BPDU packets."
+    categories = ["stp"]
+    commands = [AntaCommand(command="show spanning-tree counters")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifySTPBlockedPorts validation
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        interfaces_with_errors = [
+            interface for interface, counters in command_output["interfaces"].items() if counters["bpduTaggedError"] or counters["bpduOtherError"] != 0
+        ]
+
+        if interfaces_with_errors:
+            self.result.is_failure(f"The following interfaces have STP BPDU packet errors: {interfaces_with_errors}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifySTPBlockedPorts validation

+ +
+ Source code in anta/tests/stp.py +
111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifySTPBlockedPorts validation
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    interfaces_with_errors = [
+        interface for interface, counters in command_output["interfaces"].items() if counters["bpduTaggedError"] or counters["bpduOtherError"] != 0
+    ]
+
+    if interfaces_with_errors:
+        self.result.is_failure(f"The following interfaces have STP BPDU packet errors: {interfaces_with_errors}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySTPForwardingPorts + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies that all interfaces are in a forwarding state for a provided list of VLAN(s).

+ +
+ Expected Results +
    +
  • success: The test will pass if all interfaces are in a forwarding state for the specified VLAN(s).
  • +
  • failure: The test will fail if one or many interfaces are NOT in a forwarding state in the specified VLAN(s).
  • +
  • error: The test will give an error if a list of VLAN(s) is not provided as template_params.
  • +
+
+
+ Source code in anta/tests/stp.py +
129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
class VerifySTPForwardingPorts(AntaTest):
+    """
+    Verifies that all interfaces are in a forwarding state for a provided list of VLAN(s).
+
+    Expected Results:
+        * success: The test will pass if all interfaces are in a forwarding state for the specified VLAN(s).
+        * failure: The test will fail if one or many interfaces are NOT in a forwarding state in the specified VLAN(s).
+        * error: The test will give an error if a list of VLAN(s) is not provided as template_params.
+    """
+
+    name = "VerifySTPForwardingPorts"
+    description = "Verifies that all interfaces are forwarding for a provided list of VLAN(s)."
+    categories = ["stp"]
+    template = AntaTemplate(template="show spanning-tree topology vlan {vlan} status")
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifySTPForwardingPorts validation.
+        """
+
+        self.result.is_success()
+
+        for command in self.instance_commands:
+            if command.params and "vlan" in command.params:
+                vlan_id = command.params["vlan"]
+
+            if not (topologies := get_value(command.json_output, "topologies")):
+                self.result.is_failure(f"STP instance for VLAN {vlan_id} is not configured")
+
+            else:
+                for value in topologies.values():
+                    if int(vlan_id) in value["vlans"]:
+                        interfaces_not_forwarding = [interface for interface, state in value["interfaces"].items() if state["state"] != "forwarding"]
+
+                if interfaces_not_forwarding:
+                    self.result.is_failure(f"The following interface(s) are not in a forwarding state for VLAN {vlan_id}: {interfaces_not_forwarding}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifySTPForwardingPorts validation.

+ +
+ Source code in anta/tests/stp.py +
144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifySTPForwardingPorts validation.
+    """
+
+    self.result.is_success()
+
+    for command in self.instance_commands:
+        if command.params and "vlan" in command.params:
+            vlan_id = command.params["vlan"]
+
+        if not (topologies := get_value(command.json_output, "topologies")):
+            self.result.is_failure(f"STP instance for VLAN {vlan_id} is not configured")
+
+        else:
+            for value in topologies.values():
+                if int(vlan_id) in value["vlans"]:
+                    interfaces_not_forwarding = [interface for interface, state in value["interfaces"].items() if state["state"] != "forwarding"]
+
+            if interfaces_not_forwarding:
+                self.result.is_failure(f"The following interface(s) are not in a forwarding state for VLAN {vlan_id}: {interfaces_not_forwarding}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySTPMode + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the configured STP mode for a provided list of VLAN(s).

+ +
+ Expected Results +
    +
  • success: The test will pass if the STP mode is configured properly in the specified VLAN(s).
  • +
  • failure: The test will fail if the STP mode is NOT configured properly for one or more specified VLAN(s).
  • +
  • skipped: The test will be skipped if the STP mode is not provided.
  • +
  • error: The test will give an error if a list of VLAN(s) is not provided as template_params.
  • +
+
+
+ Source code in anta/tests/stp.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
class VerifySTPMode(AntaTest):
+    """
+    Verifies the configured STP mode for a provided list of VLAN(s).
+
+    Expected Results:
+        * success: The test will pass if the STP mode is configured properly in the specified VLAN(s).
+        * failure: The test will fail if the STP mode is NOT configured properly for one or more specified VLAN(s).
+        * skipped: The test will be skipped if the STP mode is not provided.
+        * error: The test will give an error if a list of VLAN(s) is not provided as template_params.
+    """
+
+    name = "VerifySTPMode"
+    description = "Verifies the configured STP mode for a provided list of VLAN(s)."
+    categories = ["stp"]
+    template = AntaTemplate(template="show spanning-tree vlan {vlan}")
+
+    @staticmethod
+    def _check_stp_mode(mode: str) -> None:
+        """
+        Verifies if the provided STP mode is compatible with Arista EOS devices.
+
+        Args:
+            mode: The STP mode to verify.
+        """
+        stp_modes = ["mstp", "rstp", "rapidPvst"]
+
+        if mode not in stp_modes:
+            raise ValueError(f"Wrong STP mode provided. Valid modes are: {stp_modes}")
+
+    @AntaTest.anta_test
+    def test(self, mode: str = "mstp") -> None:
+        """
+        Run VerifySTPVersion validation.
+
+        Args:
+            mode: STP mode to verify. Defaults to 'mstp'.
+        """
+        if not mode:
+            self.result.is_skipped(f"{self.__class__.name} did not run because mode was not supplied")
+            return
+
+        self._check_stp_mode(mode)
+
+        self.result.is_success()
+
+        for command in self.instance_commands:
+            if command.params and "vlan" in command.params:
+                vlan_id = command.params["vlan"]
+            if not (stp_mode := get_value(command.json_output, f"spanningTreeVlanInstances.{vlan_id}.spanningTreeVlanInstance.protocol")):
+                self.result.is_failure(f"STP mode '{mode}' not configured for VLAN {vlan_id}")
+
+            elif stp_mode != mode:
+                self.result.is_failure(f"Wrong STP mode configured for VLAN {vlan_id}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(mode: str = 'mstp') -> None
+
+ +
+ +

Run VerifySTPVersion validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
mode + str + +
+

STP mode to verify. Defaults to ‘mstp’.

+
+
+ 'mstp' +
+ +
+ Source code in anta/tests/stp.py +
41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
@AntaTest.anta_test
+def test(self, mode: str = "mstp") -> None:
+    """
+    Run VerifySTPVersion validation.
+
+    Args:
+        mode: STP mode to verify. Defaults to 'mstp'.
+    """
+    if not mode:
+        self.result.is_skipped(f"{self.__class__.name} did not run because mode was not supplied")
+        return
+
+    self._check_stp_mode(mode)
+
+    self.result.is_success()
+
+    for command in self.instance_commands:
+        if command.params and "vlan" in command.params:
+            vlan_id = command.params["vlan"]
+        if not (stp_mode := get_value(command.json_output, f"spanningTreeVlanInstances.{vlan_id}.spanningTreeVlanInstance.protocol")):
+            self.result.is_failure(f"STP mode '{mode}' not configured for VLAN {vlan_id}")
+
+        elif stp_mode != mode:
+            self.result.is_failure(f"Wrong STP mode configured for VLAN {vlan_id}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySTPRootPriority + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the STP root priority for a provided list of VLAN or MST instance ID(s).

+ +
+ Expected Results +
    +
  • success: The test will pass if the STP root priority is configured properly for the specified VLAN or MST instance ID(s).
  • +
  • failure: The test will fail if the STP root priority is NOT configured properly for the specified VLAN or MST instance ID(s).
  • +
  • skipped: The test will be skipped if the STP root priority is not provided.
  • +
+
+
+ Source code in anta/tests/stp.py +
168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
class VerifySTPRootPriority(AntaTest):
+    """
+    Verifies the STP root priority for a provided list of VLAN or MST instance ID(s).
+
+    Expected Results:
+        * success: The test will pass if the STP root priority is configured properly for the specified VLAN or MST instance ID(s).
+        * failure: The test will fail if the STP root priority is NOT configured properly for the specified VLAN or MST instance ID(s).
+        * skipped: The test will be skipped if the STP root priority is not provided.
+    """
+
+    name = "VerifySTPRootPriority"
+    description = "Verifies the STP root priority for a provided list of VLAN or MST instance ID(s)."
+    categories = ["stp"]
+    commands = [AntaCommand(command="show spanning-tree root detail")]
+
+    @AntaTest.anta_test
+    def test(self, priority: Optional[int] = None, instances: Optional[List[int]] = None) -> None:
+        """
+        Run VerifySTPRootPriority validation.
+
+        Args:
+            priority: STP root priority to verify.
+            instances: List of VLAN or MST instance ID(s). By default, ALL VLAN or MST instance ID(s) will be verified.
+        """
+        if not priority:
+            self.result.is_skipped(f"{self.__class__.name} did not run because priority was not supplied")
+            return
+
+        command_output = self.instance_commands[0].json_output
+
+        if not (stp_instances := command_output["instances"]):
+            self.result.is_failure("No STP instances configured")
+            return
+
+        for instance in stp_instances:
+            if instance.startswith("MST"):
+                prefix = "MST"
+                break
+            if instance.startswith("VL"):
+                prefix = "VL"
+                break
+
+        check_instances = [f"{prefix}{instance_id}" for instance_id in instances] if instances else command_output["instances"].keys()
+
+        wrong_priority_instances = [instance for instance in check_instances if get_value(command_output, f"instances.{instance}.rootBridge.priority") != priority]
+
+        if wrong_priority_instances:
+            self.result.is_failure(f"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(
+    priority: Optional[int] = None,
+    instances: Optional[List[int]] = None,
+) -> None
+
+ +
+ +

Run VerifySTPRootPriority validation.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
priority + Optional[int] + +
+

STP root priority to verify.

+
+
+ None +
instances + Optional[List[int]] + +
+

List of VLAN or MST instance ID(s). By default, ALL VLAN or MST instance ID(s) will be verified.

+
+
+ None +
+ +
+ Source code in anta/tests/stp.py +
183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
@AntaTest.anta_test
+def test(self, priority: Optional[int] = None, instances: Optional[List[int]] = None) -> None:
+    """
+    Run VerifySTPRootPriority validation.
+
+    Args:
+        priority: STP root priority to verify.
+        instances: List of VLAN or MST instance ID(s). By default, ALL VLAN or MST instance ID(s) will be verified.
+    """
+    if not priority:
+        self.result.is_skipped(f"{self.__class__.name} did not run because priority was not supplied")
+        return
+
+    command_output = self.instance_commands[0].json_output
+
+    if not (stp_instances := command_output["instances"]):
+        self.result.is_failure("No STP instances configured")
+        return
+
+    for instance in stp_instances:
+        if instance.startswith("MST"):
+            prefix = "MST"
+            break
+        if instance.startswith("VL"):
+            prefix = "VL"
+            break
+
+    check_instances = [f"{prefix}{instance_id}" for instance_id in instances] if instances else command_output["instances"].keys()
+
+    wrong_priority_instances = [instance for instance in check_instances if get_value(command_output, f"instances.{instance}.rootBridge.priority") != priority]
+
+    if wrong_priority_instances:
+        self.result.is_failure(f"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.system/index.html b/0.6.0/api/tests.system/index.html new file mode 100644 index 000000000..ea8708355 --- /dev/null +++ b/0.6.0/api/tests.system/index.html @@ -0,0 +1,3206 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + System - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

System

+ +

ANTA catalog for system tests

+ + +
+ + + +
+ +

Test functions related to system-level features and protocols

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyAgentLogs + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no agent crash reported on the device.

+ +
+ Source code in anta/tests/system.py +
108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
class VerifyAgentLogs(AntaTest):
+    """
+    Verifies there is no agent crash reported on the device.
+    """
+
+    name = "VerifyAgentLogs"
+    description = "Verifies there is no agent crash reported on the device."
+    categories = ["system"]
+    commands = [AntaCommand(command="show agent logs crash", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyAgentLogs validation
+        """
+        command_output = self.instance_commands[0].text_output
+
+        if len(command_output) == 0:
+            self.result.is_success()
+        else:
+            pattern = re.compile(r"^===> (.*?) <===$", re.MULTILINE)
+            agents = "\n * ".join(pattern.findall(command_output))
+            self.result.is_failure(f"device reported some agent logs:\n * {agents}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyAgentLogs validation

+ +
+ Source code in anta/tests/system.py +
118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyAgentLogs validation
+    """
+    command_output = self.instance_commands[0].text_output
+
+    if len(command_output) == 0:
+        self.result.is_success()
+    else:
+        pattern = re.compile(r"^===> (.*?) <===$", re.MULTILINE)
+        agents = "\n * ".join(pattern.findall(command_output))
+        self.result.is_failure(f"device reported some agent logs:\n * {agents}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyCPUUtilization + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the CPU utilization is less than 75%.

+ +
+ Source code in anta/tests/system.py +
156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
class VerifyCPUUtilization(AntaTest):
+    """
+    Verifies the CPU utilization is less than 75%.
+    """
+
+    name = "VerifyCPUUtilization"
+    description = "Verifies the CPU utilization is less than 75%."
+    categories = ["system"]
+    commands = [AntaCommand(command="show processes top once")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyCPUUtilization validation
+        """
+        command_output = self.instance_commands[0].json_output
+        command_output_data = command_output["cpuInfo"]["%Cpu(s)"]["idle"]
+
+        if command_output_data > 25:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"device reported a high CPU utilization ({100 - command_output_data}%)")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyCPUUtilization validation

+ +
+ Source code in anta/tests/system.py +
166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyCPUUtilization validation
+    """
+    command_output = self.instance_commands[0].json_output
+    command_output_data = command_output["cpuInfo"]["%Cpu(s)"]["idle"]
+
+    if command_output_data > 25:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"device reported a high CPU utilization ({100 - command_output_data}%)")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyCoredump + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies there is no core file.

+ +
+ Source code in anta/tests/system.py +
 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
class VerifyCoredump(AntaTest):
+    """
+    Verifies there is no core file.
+    """
+
+    name = "VerifyCoredump"
+    description = "Verifies there is no core file."
+    categories = ["system"]
+    commands = [AntaCommand(command="bash timeout 10 ls /var/core", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyCoredump validation
+        """
+        command_output = self.instance_commands[0].text_output
+
+        if len(command_output) == 0:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Core-dump(s) have been found: {command_output}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyCoredump validation

+ +
+ Source code in anta/tests/system.py +
 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyCoredump validation
+    """
+    command_output = self.instance_commands[0].text_output
+
+    if len(command_output) == 0:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Core-dump(s) have been found: {command_output}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyFileSystemUtilization + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies each partition on the disk is used less than 75%.

+ +
+ Source code in anta/tests/system.py +
204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
class VerifyFileSystemUtilization(AntaTest):
+    """
+    Verifies each partition on the disk is used less than 75%.
+    """
+
+    name = "VerifyFileSystemUtilization"
+    description = "Verifies each partition on the disk is used less than 75%."
+    categories = ["system"]
+    commands = [AntaCommand(command="bash timeout 10 df -h", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyFileSystemUtilization validation
+        """
+        command_output = self.instance_commands[0].text_output
+
+        self.result.is_success()
+
+        for line in command_output.split("\n")[1:]:
+            if "loop" not in line and len(line) > 0 and (percentage := int(line.split()[4].replace("%", ""))) > 75:
+                self.result.is_failure(f"mount point {line} is higher than 75% (reported {percentage})")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyFileSystemUtilization validation

+ +
+ Source code in anta/tests/system.py +
214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyFileSystemUtilization validation
+    """
+    command_output = self.instance_commands[0].text_output
+
+    self.result.is_success()
+
+    for line in command_output.split("\n")[1:]:
+        if "loop" not in line and len(line) > 0 and (percentage := int(line.split()[4].replace("%", ""))) > 75:
+            self.result.is_failure(f"mount point {line} is higher than 75% (reported {percentage})")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyMemoryUtilization + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the Memory utilization is less than 75%.

+ +
+ Source code in anta/tests/system.py +
180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
class VerifyMemoryUtilization(AntaTest):
+    """
+    Verifies the Memory utilization is less than 75%.
+    """
+
+    name = "VerifyMemoryUtilization"
+    description = "Verifies the Memory utilization is less than 75%."
+    categories = ["system"]
+    commands = [AntaCommand(command="show version")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyMemoryUtilization validation
+        """
+        command_output = self.instance_commands[0].json_output
+
+        memory_usage = command_output["memFree"] / command_output["memTotal"]
+        if memory_usage > 0.25:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"device report a high memory usage: {(1 - memory_usage)*100:.2f}%")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyMemoryUtilization validation

+ +
+ Source code in anta/tests/system.py +
190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyMemoryUtilization validation
+    """
+    command_output = self.instance_commands[0].json_output
+
+    memory_usage = command_output["memFree"] / command_output["memTotal"]
+    if memory_usage > 0.25:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"device report a high memory usage: {(1 - memory_usage)*100:.2f}%")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyNTP + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies NTP is synchronised.

+ +
+ Source code in anta/tests/system.py +
228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
class VerifyNTP(AntaTest):
+    """
+    Verifies NTP is synchronised.
+    """
+
+    name = "VerifyNTP"
+    description = "Verifies NTP is synchronised."
+    categories = ["system"]
+    commands = [AntaCommand(command="show ntp status", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyNTP validation
+        """
+        command_output = self.instance_commands[0].text_output
+
+        if command_output.split("\n")[0].split(" ")[0] == "synchronised":
+            self.result.is_success()
+        else:
+            data = command_output.split("\n")[0]
+            self.result.is_failure(f"not sync with NTP server ({data})")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyNTP validation

+ +
+ Source code in anta/tests/system.py +
238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyNTP validation
+    """
+    command_output = self.instance_commands[0].text_output
+
+    if command_output.split("\n")[0].split(" ")[0] == "synchronised":
+        self.result.is_success()
+    else:
+        data = command_output.split("\n")[0]
+        self.result.is_failure(f"not sync with NTP server ({data})")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyReloadCause + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the last reload of the device was requested by a user.

+

Test considers the following messages as normal and will return success. Failure is for other messages +* Reload requested by the user. +* Reload requested after FPGA upgrade

+ +
+ Source code in anta/tests/system.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
class VerifyReloadCause(AntaTest):
+    """
+    Verifies the last reload of the device was requested by a user.
+
+    Test considers the following messages as normal and will return success. Failure is for other messages
+    * Reload requested by the user.
+    * Reload requested after FPGA upgrade
+    """
+
+    name = "VerifyReloadCause"
+    description = "Verifies the device uptime is higher than a value."
+    categories = ["system"]
+    commands = [AntaCommand(command="show reload cause")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifyReloadCause validation
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if "resetCauses" not in command_output.keys():
+            self.result.is_error("no reload cause available")
+            return
+
+        if len(command_output["resetCauses"]) == 0:
+            # No reload causes
+            self.result.is_success()
+            return
+
+        reset_causes = command_output["resetCauses"]
+        command_output_data = reset_causes[0].get("description")
+        if command_output_data in [
+            "Reload requested by the user.",
+            "Reload requested after FPGA upgrade",
+        ]:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Reload cause is {command_output_data}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyReloadCause validation

+ +
+ Source code in anta/tests/system.py +
57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifyReloadCause validation
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if "resetCauses" not in command_output.keys():
+        self.result.is_error("no reload cause available")
+        return
+
+    if len(command_output["resetCauses"]) == 0:
+        # No reload causes
+        self.result.is_success()
+        return
+
+    reset_causes = command_output["resetCauses"]
+    command_output_data = reset_causes[0].get("description")
+    if command_output_data in [
+        "Reload requested by the user.",
+        "Reload requested after FPGA upgrade",
+    ]:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Reload cause is {command_output_data}")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifySyslog + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device had no syslog message with a severity of warning (or a more severe message) during the last 7 days.

+ +
+ Source code in anta/tests/system.py +
133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
class VerifySyslog(AntaTest):
+    """
+    Verifies the device had no syslog message with a severity of warning (or a more severe message) during the last 7 days.
+    """
+
+    name = "VerifySyslog"
+    description = "Verifies the device had no syslog message with a severity of warning (or a more severe message) during the last 7 days."
+    categories = ["system"]
+    commands = [AntaCommand(command="show logging last 7 days threshold warnings", ofmt="text")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """
+        Run VerifySyslog validation
+        """
+        command_output = self.instance_commands[0].text_output
+
+        if len(command_output) == 0:
+            self.result.is_success()
+        else:
+            self.result.is_failure("Device has some log messages with a severity WARNING or higher")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifySyslog validation

+ +
+ Source code in anta/tests/system.py +
143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
@AntaTest.anta_test
+def test(self) -> None:
+    """
+    Run VerifySyslog validation
+    """
+    command_output = self.instance_commands[0].text_output
+
+    if len(command_output) == 0:
+        self.result.is_success()
+    else:
+        self.result.is_failure("Device has some log messages with a severity WARNING or higher")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyUptime + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies the device uptime is higher than a value.

+ +
+ Source code in anta/tests/system.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
class VerifyUptime(AntaTest):
+    """
+    Verifies the device uptime is higher than a value.
+    """
+
+    name = "VerifyUptime"
+    description = "Verifies the device uptime is higher than a value."
+    categories = ["system"]
+    commands = [AntaCommand(command="show uptime")]
+
+    @AntaTest.anta_test
+    def test(self, minimum: Optional[int] = None) -> None:
+        """
+        Run VerifyUptime validation
+
+        Args:
+            minimum: Minimum uptime in seconds.
+        """
+
+        command_output = self.instance_commands[0].json_output
+
+        if not (isinstance(minimum, (int, float))) or minimum < 0:
+            self.result.is_skipped("VerifyUptime was not run as incorrect minimum uptime was given")
+            return
+
+        if command_output["upTime"] > minimum:
+            self.result.is_success()
+        else:
+            self.result.is_failure(f"Uptime is {command_output['upTime']}")
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test(minimum: Optional[int] = None) -> None
+
+ +
+ +

Run VerifyUptime validation

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
minimum + Optional[int] + +
+

Minimum uptime in seconds.

+
+
+ None +
+ +
+ Source code in anta/tests/system.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
@AntaTest.anta_test
+def test(self, minimum: Optional[int] = None) -> None:
+    """
+    Run VerifyUptime validation
+
+    Args:
+        minimum: Minimum uptime in seconds.
+    """
+
+    command_output = self.instance_commands[0].json_output
+
+    if not (isinstance(minimum, (int, float))) or minimum < 0:
+        self.result.is_skipped("VerifyUptime was not run as incorrect minimum uptime was given")
+        return
+
+    if command_output["upTime"] > minimum:
+        self.result.is_success()
+    else:
+        self.result.is_failure(f"Uptime is {command_output['upTime']}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests.vxlan/index.html b/0.6.0/api/tests.vxlan/index.html new file mode 100644 index 000000000..7dcff36f3 --- /dev/null +++ b/0.6.0/api/tests.vxlan/index.html @@ -0,0 +1,1923 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + VxLAN - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

VxLAN

+ +

ANTA catalog for VxLAN tests

+ + +
+ + + +
+ +

Test functions related to VXLAN

+ + + +
+ + + + + + + + +
+ + + +

+ VerifyVxlan + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies if Vxlan1 interface is configured, and is up/up

+ +
+ Source code in anta/tests/vxlan.py +
 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
class VerifyVxlan(AntaTest):
+    """
+    Verifies if Vxlan1 interface is configured, and is up/up
+    """
+
+    name = "VerifyVxlan"
+    description = "Verifies Vxlan1 status"
+    categories = ["vxlan"]
+    commands = [AntaCommand(command="show interfaces description", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyVxlan validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        if "Vxlan1" not in command_output["interfaceDescriptions"]:
+            self.result.is_skipped("Vxlan1 interface is not configured")
+        elif (
+            command_output["interfaceDescriptions"]["Vxlan1"]["lineProtocolStatus"] == "up"
+            and command_output["interfaceDescriptions"]["Vxlan1"]["interfaceStatus"] == "up"
+        ):
+            self.result.is_success()
+        else:
+            self.result.is_failure(
+                f"Vxlan1 interface is {command_output['interfaceDescriptions']['Vxlan1']['lineProtocolStatus']}"
+                f"/{command_output['interfaceDescriptions']['Vxlan1']['interfaceStatus']}"
+            )
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyVxlan validation

+ +
+ Source code in anta/tests/vxlan.py +
18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyVxlan validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    if "Vxlan1" not in command_output["interfaceDescriptions"]:
+        self.result.is_skipped("Vxlan1 interface is not configured")
+    elif (
+        command_output["interfaceDescriptions"]["Vxlan1"]["lineProtocolStatus"] == "up"
+        and command_output["interfaceDescriptions"]["Vxlan1"]["interfaceStatus"] == "up"
+    ):
+        self.result.is_success()
+    else:
+        self.result.is_failure(
+            f"Vxlan1 interface is {command_output['interfaceDescriptions']['Vxlan1']['lineProtocolStatus']}"
+            f"/{command_output['interfaceDescriptions']['Vxlan1']['interfaceStatus']}"
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VerifyVxlanConfigSanity + + +

+ + +
+

+ Bases: AntaTest

+ + +

Verifies that there are no VXLAN config-sanity issues flagged

+ +
+ Source code in anta/tests/vxlan.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
class VerifyVxlanConfigSanity(AntaTest):
+    """
+    Verifies that there are no VXLAN config-sanity issues flagged
+    """
+
+    name = "VerifyVxlanConfigSanity"
+    description = "Verifies VXLAN config-sanity"
+    categories = ["vxlan"]
+    commands = [AntaCommand(command="show vxlan config-sanity", ofmt="json")]
+
+    @AntaTest.anta_test
+    def test(self) -> None:
+        """Run VerifyVxlanConfigSanity validation"""
+
+        command_output = self.instance_commands[0].json_output
+
+        if "categories" not in command_output or len(command_output["categories"]) == 0:
+            self.result.is_skipped("VXLAN is not configured on this device")
+            return
+
+        failed_categories = {
+            category: content
+            for category, content in command_output["categories"].items()
+            if category in ["localVtep", "mlag", "pd"] and content["allCheckPass"] is not True
+        }
+
+        if len(failed_categories) > 0:
+            self.result.is_failure(f"Vxlan config sanity check is not passing: {failed_categories}")
+        else:
+            self.result.is_success()
+
+
+ + + +
+ + + + + + + + + + +
+ + + +

+ test + + +

+
test() -> None
+
+ +
+ +

Run VerifyVxlanConfigSanity validation

+ +
+ Source code in anta/tests/vxlan.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
@AntaTest.anta_test
+def test(self) -> None:
+    """Run VerifyVxlanConfigSanity validation"""
+
+    command_output = self.instance_commands[0].json_output
+
+    if "categories" not in command_output or len(command_output["categories"]) == 0:
+        self.result.is_skipped("VXLAN is not configured on this device")
+        return
+
+    failed_categories = {
+        category: content
+        for category, content in command_output["categories"].items()
+        if category in ["localVtep", "mlag", "pd"] and content["allCheckPass"] is not True
+    }
+
+    if len(failed_categories) > 0:
+        self.result.is_failure(f"Vxlan config sanity check is not passing: {failed_categories}")
+    else:
+        self.result.is_success()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/api/tests/index.html b/0.6.0/api/tests/index.html new file mode 100644 index 000000000..852a8ec7f --- /dev/null +++ b/0.6.0/api/tests/index.html @@ -0,0 +1,1501 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/assets/_mkdocstrings.css b/0.6.0/assets/_mkdocstrings.css new file mode 100644 index 000000000..049a254b9 --- /dev/null +++ b/0.6.0/assets/_mkdocstrings.css @@ -0,0 +1,64 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Keep headings consistent. */ +h1.doc-heading, +h2.doc-heading, +h3.doc-heading, +h4.doc-heading, +h5.doc-heading, +h6.doc-heading { + font-weight: 400; + line-height: 1.5; + color: inherit; + text-transform: none; +} + +h1.doc-heading { + font-size: 1.6rem; +} + +h2.doc-heading { + font-size: 1.2rem; +} + +h3.doc-heading { + font-size: 1.15rem; +} + +h4.doc-heading { + font-size: 1.10rem; +} + +h5.doc-heading { + font-size: 1.05rem; +} + +h6.doc-heading { + font-size: 1rem; +} \ No newline at end of file diff --git a/0.6.0/assets/images/favicon.png b/0.6.0/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/0.6.0/assets/images/favicon.png differ diff --git a/0.6.0/assets/javascripts/bundle.220ee61c.min.js b/0.6.0/assets/javascripts/bundle.220ee61c.min.js new file mode 100644 index 000000000..116072a11 --- /dev/null +++ b/0.6.0/assets/javascripts/bundle.220ee61c.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ci=Object.create;var gr=Object.defineProperty;var Ri=Object.getOwnPropertyDescriptor;var ki=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Hi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,nn=Object.prototype.propertyIsEnumerable;var rn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&rn(e,r,t[r]);if(Ht)for(var r of Ht(t))nn.call(t,r)&&rn(e,r,t[r]);return e};var on=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&nn.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Pi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of ki(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=Ri(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ci(Hi(e)):{},Pi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var sn=Pt((xr,an)=>{(function(e,t){typeof xr=="object"&&typeof an!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(O){return!!(O&&O!==document&&O.nodeName!=="HTML"&&O.nodeName!=="BODY"&&"classList"in O&&"contains"in O.classList)}function f(O){var Qe=O.type,De=O.tagName;return!!(De==="INPUT"&&s[Qe]&&!O.readOnly||De==="TEXTAREA"&&!O.readOnly||O.isContentEditable)}function c(O){O.classList.contains("focus-visible")||(O.classList.add("focus-visible"),O.setAttribute("data-focus-visible-added",""))}function u(O){O.hasAttribute("data-focus-visible-added")&&(O.classList.remove("focus-visible"),O.removeAttribute("data-focus-visible-added"))}function p(O){O.metaKey||O.altKey||O.ctrlKey||(a(r.activeElement)&&c(r.activeElement),n=!0)}function m(O){n=!1}function d(O){a(O.target)&&(n||f(O.target))&&c(O.target)}function h(O){a(O.target)&&(O.target.classList.contains("focus-visible")||O.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(O.target))}function v(O){document.visibilityState==="hidden"&&(o&&(n=!0),Y())}function Y(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function B(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(O){O.target.nodeName&&O.target.nodeName.toLowerCase()==="html"||(n=!1,B())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),Y(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var cn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},s=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(B,N){d.append(N,B)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(O){throw new Error("URL unable to set base "+c+" due to "+O)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,Y=!0,B=this;["append","delete","set"].forEach(function(O){var Qe=h[O];h[O]=function(){Qe.apply(h,arguments),v&&(Y=!1,B.search=h.toString(),Y=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,Y&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(f){Object.defineProperty(s,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){a(f)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var qr=Pt((Mt,Nr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof Nr=="object"?Nr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ai}});var s=i(279),a=i.n(s),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(T){return!1}}var d=function(T){var E=p()(T);return m("cut"),E},h=d;function v(j){var T=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[T?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var Y=function(T,E){var H=v(T);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},B=function(T){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof T=="string"?H=Y(T,E):T instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(T==null?void 0:T.type)?H=Y(T.value,E):(H=p()(T),m("copy")),H},N=B;function O(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?O=function(E){return typeof E}:O=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},O(j)}var Qe=function(){var T=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=T.action,H=E===void 0?"copy":E,I=T.container,q=T.target,Me=T.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&O(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function Ei(j,T){if(!(j instanceof T))throw new TypeError("Cannot call a class as a function")}function tn(j,T){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=c()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ai=Li},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,f){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(f))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return c(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),s=f.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var f=this;function c(){f.off(i,c),s.apply(a,arguments)}return c._=s,this.on(i,c,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=a.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var rs=/["'&<>]/;Yo.exports=ns;function ns(e){var t=""+e,r=rs.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof et?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function pn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,f){s=e[i](s),o(a,f,s.done,s.value)})}}function o(i,s,a,f){Promise.resolve(f).then(function(c){i({value:c,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),f=a.next();!f.done;f=a.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{ln(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ln(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function ln(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new xn(r,n)},t}(F);var xn=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,f=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Sn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Oe=new Sn(wn);var M=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Te(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=zi();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return un(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return Ni(e);if(pt(e))return qi(e);if(Nt(e))return Ki(e);if(Kt(e))return On(e);if(Gt(e))return Qi(e);if(Jt(e))return Yi(e)}throw Qt(e)}function Ni(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function qi(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Dn(function(){return new Zt}))}}function Vn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,f=a===void 0?!0:a;return function(c){var u,p,m,d=0,h=!1,v=!1,Y=function(){p==null||p.unsubscribe(),p=void 0},B=function(){Y(),u=m=void 0,h=v=!1},N=function(){var O=u;B(),O==null||O.unsubscribe()};return y(function(O,Qe){d++,!v&&!h&&Y();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,f))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,Y(),p=$r(B,o,$e),De.error($e)},complete:function(){h=!0,Y(),p=$r(B,s),De.complete()}}),U(O).subscribe(u))})(c)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),J())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Kn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>rr(e)),V(rr(e)))}var Yn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Wr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),va?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Wr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ba.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Gn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Jn=typeof WeakMap!="undefined"?new WeakMap:new Yn,Xn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ga.getInstance(),n=new La(t,r,this);Jn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Xn.prototype[e]=function(){var t;return(t=Jn.get(this))[e].apply(t,arguments)}});var Aa=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Xn}(),Zn=Aa;var eo=new x,Ca=$(()=>k(new Zn(e=>{for(let t of e)eo.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ca.pipe(S(t=>t.observe(e)),g(t=>eo.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var to=new x,Ra=$(()=>k(new IntersectionObserver(e=>{for(let t of e)to.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function sr(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function ro(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),J())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function no(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function ka(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ha(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function oo(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:no("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!ka(n,r)}return!0}),pe());return Ha().pipe(g(t=>t?M:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function io(){return new x}function ao(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)ao(e,r)}function _(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)ao(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function so(){return location.hash.substring(1)}function Dr(e){let t=_("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Pa(e){return L(b(window,"hashchange"),e).pipe(l(so),V(so()),A(t=>t.length>0),X(1))}function co(e){return Pa(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function Vr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function fo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function zr(e,t){return e.pipe(g(r=>r?t():M))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>M),g(r=>r.status!==200?Ot(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),X(1))}function uo(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),X(1))}function pr(e){let t=_("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Ot(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function po(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function lo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(po),V(po()))}function mo(){return{width:innerWidth,height:innerHeight}}function ho(){return b(window,"resize",{passive:!0}).pipe(l(mo),V(mo()))}function bo(){return G([lo(),ho()]).pipe(l(([e,t])=>({offset:e,size:t})),X(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(ee("size")),o=G([n,r]).pipe(l(()=>Xe(e)));return G([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:f,y:c}])=>({offset:{x:s.x-f,y:s.y-c+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,f,c)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:f,error:c});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Helpers

+ +

ANTA debug commands

+

The ANTA CLI includes a set of debugging tools, making it easier to build and test ANTA content. This functionality is accessed via the debug subcommand and offers the following options:

+
    +
  • Executing a command on a device from your inventory and retrieving the result.
  • +
  • Running a templated command on a device from your inventory and retrieving the result.
  • +
+

These tools are especially helpful in building the tests, as they give a visual access to the output received from the eAPI. They also facilitate the extraction of output content for use in unit tests, as described in our contribution guide.

+
+

Warning

+

The debug tools require a device from your inventory. Thus, you MUST use a valid ANTA Inventory.

+
+

Executing an EOS command

+

You can use the run-cmd entrypoint to run a command, which includes the following options:

+
Command overview
+
$ anta debug run-cmd --help
+Usage: anta debug run-cmd [OPTIONS]
+
+  Run arbitrary command to an ANTA device
+
+Options:
+  -c, --command TEXT        Command to run  [required]
+  --ofmt [json|text]        EOS eAPI format to use. can be text or json
+  -v, --version [1|latest]  EOS eAPI version
+  -r, --revision INTEGER    eAPI command revision
+  -d, --device TEXT         Device from inventory to use  [required]
+  --help                    Show this message and exit.
+
+
Example
+

This example illustrates how to run the show interfaces description command with a JSON format (default):

+
anta debug run-cmd --command "show interfaces description" --device DC1-SPINE1
+Run command show interfaces description on DC1-SPINE1
+{
+    'interfaceDescriptions': {
+        'Ethernet1': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-LEAF1A_Ethernet1', 'interfaceStatus': 'up'},
+        'Ethernet2': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-LEAF1B_Ethernet1', 'interfaceStatus': 'up'},
+        'Ethernet3': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-BL1_Ethernet1', 'interfaceStatus': 'up'},
+        'Ethernet4': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-BL2_Ethernet1', 'interfaceStatus': 'up'},
+        'Loopback0': {'lineProtocolStatus': 'up', 'description': 'EVPN_Overlay_Peering', 'interfaceStatus': 'up'},
+        'Management0': {'lineProtocolStatus': 'up', 'description': 'oob_management', 'interfaceStatus': 'up'}
+    }
+}
+
+

Executing an EOS command using templates

+

The run-template entrypoint allows the user to provide an f-string templated command. It is followed by a list of arguments (key-value pairs) that build a dictionary used as template parameters.

+
Command overview
+
$ anta debug run-template --help
+Usage: anta debug run-template [OPTIONS] PARAMS...
+
+  Run arbitrary templated command to an ANTA device.
+
+  Takes a list of arguments (keys followed by a value) to build a dictionary
+  used as template parameters. Example:
+
+  anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' vlan_id 1
+
+Options:
+  -t, --template TEXT       Command template to run. E.g. 'show vlan
+                            {vlan_id}'  [required]
+  --ofmt [json|text]        EOS eAPI format to use. can be text or json
+  -v, --version [1|latest]  EOS eAPI version
+  -r, --revision INTEGER    eAPI command revision
+  -d, --device TEXT         Device from inventory to use  [required]
+  --help                    Show this message and exit.
+
+
Example
+

This example uses the show vlan {vlan_id} command in a JSON format:

+
anta debug run-template --template "show vlan {vlan_id}" vlan_id 10 --device DC1-LEAF1A
+Run templated command 'show vlan {vlan_id}' with {'vlan_id': '10'} on DC1-LEAF1A
+{
+    'vlans': {
+        '10': {
+            'name': 'VRFPROD_VLAN10',
+            'dynamic': False,
+            'status': 'active',
+            'interfaces': {
+                'Cpu': {'privatePromoted': False, 'blocked': None},
+                'Port-Channel11': {'privatePromoted': False, 'blocked': None},
+                'Vxlan1': {'privatePromoted': False, 'blocked': None}
+            }
+        }
+    },
+    'sourceDetail': ''
+}
+
+
+

Warning

+

If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters.

+
+
Example of multiple arguments
+
anta --log DEBUG debug run-template --template "ping {dst} source {src}" dst "8.8.8.8" src Loopback0 --device DC1-SPINE1    
+> {'dst': '8.8.8.8', 'src': 'Loopback0'}
+
+anta --log DEBUG debug run-template --template "ping {dst} source {src}" dst "8.8.8.8" src Loopback0 dst "1.1.1.1" src Loopback1 --device DC1-SPINE1           
+> {'dst': '1.1.1.1', 'src': 'Loopback1'}
+# Notice how `src` and `dst` keep only the latest value
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/cli/exec/index.html b/0.6.0/cli/exec/index.html new file mode 100644 index 000000000..09c2de5f7 --- /dev/null +++ b/0.6.0/cli/exec/index.html @@ -0,0 +1,1843 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Execute commands - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Execute commands

+ +

Executing Commands on Devices

+

ANTA CLI provides a set of entrypoints to facilitate remote command execution on EOS devices.

+
EXEC Command overview
+
anta exec --help
+Usage: anta exec [OPTIONS] COMMAND [ARGS]...
+
+  Execute commands to inventory devices
+
+Options:
+  --help  Show this message and exit.
+
+Commands:
+  clear-counters        Clear counter statistics on EOS devices
+  collect-tech-support  Collect scheduled tech-support from EOS devices
+  snapshot              Collect commands output from devices in inventory
+
+

Clear interfaces counters

+

This command clears interface counters on EOS devices specified in your inventory.

+
Command overview
+
anta exec clear-counters --help
+Usage: anta exec clear-counters [OPTIONS]
+
+  Clear counter statistics on EOS devices
+
+Options:
+  -t, --tags TEXT  List of tags using comma as separator: tag1,tag2,tag3
+  --help           Show this message and exit.
+
+
Example
+
anta exec clear-counters --tags SPINE
+[20:19:13] INFO     Connecting to devices...                                                                                                                         utils.py:43
+           INFO     Clearing counters on remote devices...                                                                                                           utils.py:46
+           INFO     Cleared counters on DC1-SPINE2 (cEOSLab)                                                                                                         utils.py:41
+           INFO     Cleared counters on DC2-SPINE1 (cEOSLab)                                                                                                         utils.py:41
+           INFO     Cleared counters on DC1-SPINE1 (cEOSLab)                                                                                                         utils.py:41
+           INFO     Cleared counters on DC2-SPINE2 (cEOSLab)
+
+

Collect a set of commands

+

This command collects all the commands specified in a commands-list file, which can be in either json or text format.

+
Command overview
+
anta exec snapshot --help
+Usage: anta exec snapshot [OPTIONS]
+
+  Collect commands output from devices in inventory
+
+Options:
+  -t, --tags TEXT           List of tags using comma as separator:
+                            tag1,tag2,tag3
+  -c, --commands-list FILE  File with list of commands to collect  [env var:
+                            ANTA_EXEC_SNAPSHOT_COMMANDS_LIST; required]
+  -o, --output DIRECTORY    Directory to save commands output. Will have a
+                            suffix with the format _YEAR-MONTH-DAY_HOUR-
+                            MINUTES-SECONDS'  [env var:
+                            ANTA_EXEC_SNAPSHOT_OUTPUT; default: anta_snapshot]
+  --help                    Show this message and exit.
+
+

The commands-list file should follow this structure:

+
---
+json_format:
+  - show version
+text_format:
+  - show bfd peers
+
+
Example
+
anta exec snapshot --tags SPINE --commands-list ./commands.yaml --output ./
+[20:25:15] INFO     Connecting to devices...                                                                                                                         utils.py:78
+           INFO     Collecting commands from remote devices                                                                                                          utils.py:81
+           INFO     Collected command 'show version' from device DC2-SPINE1 (cEOSLab)                                                                                utils.py:76
+           INFO     Collected command 'show version' from device DC2-SPINE2 (cEOSLab)                                                                                utils.py:76
+           INFO     Collected command 'show version' from device DC1-SPINE1 (cEOSLab)                                                                                utils.py:76
+           INFO     Collected command 'show version' from device DC1-SPINE2 (cEOSLab)                                                                                utils.py:76
+[20:25:16] INFO     Collected command 'show bfd peers' from device DC2-SPINE2 (cEOSLab)                                                                              utils.py:76
+           INFO     Collected command 'show bfd peers' from device DC2-SPINE1 (cEOSLab)                                                                              utils.py:76
+           INFO     Collected command 'show bfd peers' from device DC1-SPINE1 (cEOSLab)                                                                              utils.py:76
+           INFO     Collected command 'show bfd peers' from device DC1-SPINE2 (cEOSLab)
+
+

The results of the executed commands will be stored in the output directory specified during command execution:

+
tree _2023-07-14_20_25_15
+_2023-07-14_20_25_15
+├── DC1-SPINE1
+│   ├── json
+│      └── show version.json
+│   └── text
+│       └── show bfd peers.log
+├── DC1-SPINE2
+│   ├── json
+│      └── show version.json
+│   └── text
+│       └── show bfd peers.log
+├── DC2-SPINE1
+│   ├── json
+│      └── show version.json
+│   └── text
+│       └── show bfd peers.log
+└── DC2-SPINE2
+    ├── json
+       └── show version.json
+    └── text
+        └── show bfd peers.log
+
+12 directories, 8 files
+
+

Get Scheduled tech-support

+

EOS offers a feature that automatically creates a tech-support archive every hour by default. These archives are stored under /mnt/flash/schedule/tech-support.

+
leaf1#show schedule summary
+Maximum concurrent jobs  1
+Prepend host name to logfile: Yes
+Name                 At Time       Last        Interval       Timeout        Max        Max     Logfile Location                  Status
+                                   Time         (mins)        (mins)         Log        Logs
+                                                                            Files       Size
+----------------- ------------- ----------- -------------- ------------- ----------- ---------- --------------------------------- ------
+tech-support           now         08:37          60            30           100         -      flash:schedule/tech-support/      Success
+
+
+leaf1#bash ls /mnt/flash/schedule/tech-support
+leaf1_tech-support_2023-03-09.1337.log.gz  leaf1_tech-support_2023-03-10.0837.log.gz  leaf1_tech-support_2023-03-11.0337.log.gz
+
+

For Network Readiness for Use (NRFU) tests and to keep a comprehensive report of the system state before going live, ANTA provides a command-line interface that efficiently retrieves these files.

+
Command overview
+
anta exec collect-tech-support --help
+Usage: anta exec collect-tech-support [OPTIONS]
+
+  Collect scheduled tech-support from EOS devices
+
+Options:
+  -o, --output PATH              Path for tests catalog  [default: ./tech-
+                                 support]
+  --latest INTEGER               Number of scheduled show-tech to retrieve
+  --configure / --not-configure  Ensure device has 'aaa authorization exec
+                                 default local' configured (required for SCP)
+                                 [default: not-configure]
+  -t, --tags TEXT                List of tags using comma as separator:
+                                 tag1,tag2,tag3
+  --help                         Show this message and exit.
+
+

When executed, this command fetches tech-support files and downloads them locally into a device-specific subfolder within the designated folder. You can specify the output folder with the --output option.

+

ANTA uses SCP to download files from devices; ensure that all SSH Host Keys are trusted before running the command. Use the anta --insecure option if they are not.

+

The configuration aaa authorization exec default local must be enabled on devices for SCP to function. ANTA will not automatically configure this unless --configure is specified.

+

The --latest option allows retrieval of a specific number of the most recent tech-support files.

+
+

Warning

+

By default all the tech-support files present on the devices are retrieved.

+
+
Example
+
anta --insecure exec collect-tech-support
+[15:27:19] INFO     Connecting to devices...
+INFO     Copying '/mnt/flash/schedule/tech-support/spine1_tech-support_2023-06-09.1315.log.gz' from device spine1 to 'tech-support/spine1' locally
+INFO     Copying '/mnt/flash/schedule/tech-support/leaf3_tech-support_2023-06-09.1315.log.gz' from device leaf3 to 'tech-support/leaf3' locally
+INFO     Copying '/mnt/flash/schedule/tech-support/leaf1_tech-support_2023-06-09.1315.log.gz' from device leaf1 to 'tech-support/leaf1' locally
+INFO     Copying '/mnt/flash/schedule/tech-support/leaf2_tech-support_2023-06-09.1315.log.gz' from device leaf2 to 'tech-support/leaf2' locally
+INFO     Copying '/mnt/flash/schedule/tech-support/spine2_tech-support_2023-06-09.1315.log.gz' from device spine2 to 'tech-support/spine2' locally
+INFO     Copying '/mnt/flash/schedule/tech-support/leaf4_tech-support_2023-06-09.1315.log.gz' from device leaf4 to 'tech-support/leaf4' locally
+INFO     Collected 1 scheduled tech-support from leaf2
+INFO     Collected 1 scheduled tech-support from spine2
+INFO     Collected 1 scheduled tech-support from leaf3
+INFO     Collected 1 scheduled tech-support from spine1
+INFO     Collected 1 scheduled tech-support from leaf1
+INFO     Collected 1 scheduled tech-support from leaf4
+
+

The output folder structure is as follows:

+
tree tech-support/
+tech-support/
+├── leaf1
+│   └── leaf1_tech-support_2023-06-09.1315.log.gz
+├── leaf2
+│   └── leaf2_tech-support_2023-06-09.1315.log.gz
+├── leaf3
+│   └── leaf3_tech-support_2023-06-09.1315.log.gz
+├── leaf4
+│   └── leaf4_tech-support_2023-06-09.1315.log.gz
+├── spine1
+│   └── spine1_tech-support_2023-06-09.1315.log.gz
+└── spine2
+    └── spine2_tech-support_2023-06-09.1315.log.gz
+
+6 directories, 6 files
+
+

Each device has its own subdirectory containing the collected tech-support files.

+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/cli/get-inventory-information/index.html b/0.6.0/cli/get-inventory-information/index.html new file mode 100644 index 000000000..58b32653b --- /dev/null +++ b/0.6.0/cli/get-inventory-information/index.html @@ -0,0 +1,1774 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Get Inventory Information - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Get Inventory Information

+ +

Retrieving Inventory Information

+

The ANTA CLI offers multiple entrypoints to access data from your local inventory.

+

Inventory used of examples

+

Let’s consider the following inventory:

+
---
+anta_inventory:
+  hosts:
+    - host: 172.20.20.101
+      name: DC1-SPINE1
+      tags: ["SPINE", "DC1"]
+
+    - host: 172.20.20.102
+      name: DC1-SPINE2
+      tags: ["SPINE", "DC1"]
+
+    - host: 172.20.20.111
+      name: DC1-LEAF1A
+      tags: ["LEAF", "DC1"]
+
+    - host: 172.20.20.112
+      name: DC1-LEAF1B
+      tags: ["LEAF", "DC1"]
+
+    - host: 172.20.20.121
+      name: DC1-BL1
+      tags: ["BL", "DC1"]
+
+    - host: 172.20.20.122
+      name: DC1-BL2
+      tags: ["BL", "DC1"]
+
+    - host: 172.20.20.201
+      name: DC2-SPINE1
+      tags: ["SPINE", "DC2"]
+
+    - host: 172.20.20.202
+      name: DC2-SPINE2
+      tags: ["SPINE", "DC2"]
+
+    - host: 172.20.20.211
+      name: DC2-LEAF1A
+      tags: ["LEAF", "DC2"]
+
+    - host: 172.20.20.212
+      name: DC2-LEAF1B
+      tags: ["LEAF", "DC2"]
+
+    - host: 172.20.20.221
+      name: DC2-BL1
+      tags: ["BL", "DC2"]
+
+    - host: 172.20.20.222
+      name: DC2-BL2
+      tags: ["BL", "DC2"]
+
+

Obtaining all configured tags

+

As most of ANTA’s commands accommodate tag filtering, this particular command is useful for enumerating all tags configured in the inventory. Running the anta get tags command will return a list of all tags that have been configured in the inventory.

+
Command overview
+
anta get tags --help
+Usage: anta get tags [OPTIONS]
+
+  Get list of configured tags in user inventory.
+
+Options:
+  --help  Show this message and exit.
+
+
Example
+

To get the list of all configured tags in the inventory, run the following command:

+
anta get tags
+Tags found:
+[
+  "BL",
+  "DC1",
+  "DC2",
+  "LEAF",
+  "SPINE",
+  "all"
+]
+
+* note that tag all has been added by anta
+
+
+

Note

+

Even if you haven’t explicitly configured the all tag in the inventory, it is automatically added. This default tag allows to execute commands on all devices in the inventory when no tag is specified.

+
+

List devices in inventory

+

This command will list all devices available in the inventory. Using the --tags option, you can filter this list to only include devices with specific tags. The --connected option allows to display only the devices where a connection has been established.

+
Command overview
+
anta get inventory --help
+Usage: anta get inventory [OPTIONS]
+
+  Show inventory loaded in ANTA.
+
+Options:
+  -t, --tags TEXT                List of tags using comma as separator:
+                                 tag1,tag2,tag3
+  --connected / --not-connected  Display inventory after connection has been
+                                 created
+  --help                         Show this message and exit.
+
+
+

Tip

+

In its default mode, anta get inventory provides only information that doesn’t rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, please use the --connected option.

+
+
Example
+

To retrieve a comprehensive list of all devices along with their details, execute the following command. It will provide all the data loaded into the ANTA inventory from your inventory file.

+
anta get inventory --tags SPINE
+Current inventory content is:
+{
+    'DC1-SPINE1': AsyncEOSDevice(
+        name='DC1-SPINE1',
+        tags=['SPINE', 'DC1', 'all'],
+        hw_model=None,
+        is_online=False,
+        established=False,
+        host='172.20.20.101',
+        eapi_port=443,
+        username='arista',
+        password='arista',
+        enable_password='arista',
+        insecure=False
+    ),
+    'DC1-SPINE2': AsyncEOSDevice(
+        name='DC1-SPINE2',
+        tags=['SPINE', 'DC1', 'all'],
+        hw_model=None,
+        is_online=False,
+        established=False,
+        host='172.20.20.102',
+        eapi_port=443,
+        username='arista',
+        password='arista',
+        enable_password='arista',
+        insecure=False
+    ),
+    'DC2-SPINE1': AsyncEOSDevice(
+        name='DC2-SPINE1',
+        tags=['SPINE', 'DC2', 'all'],
+        hw_model=None,
+        is_online=False,
+        established=False,
+        host='172.20.20.201',
+        eapi_port=443,
+        username='arista',
+        password='arista',
+        enable_password='arista',
+        insecure=False
+    ),
+    'DC2-SPINE2': AsyncEOSDevice(
+        name='DC2-SPINE2',
+        tags=['SPINE', 'DC2', 'all'],
+        hw_model=None,
+        is_online=False,
+        established=False,
+        host='172.20.20.202',
+        eapi_port=443,
+        username='arista',
+        password='arista',
+        enable_password='arista',
+        insecure=False
+    )
+}
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/cli/inv-from-cvp/index.html b/0.6.0/cli/inv-from-cvp/index.html new file mode 100644 index 000000000..c63c10402 --- /dev/null +++ b/0.6.0/cli/inv-from-cvp/index.html @@ -0,0 +1,1573 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Inventory from CVP - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Inventory from CVP

+ +

Create an Inventory from CloudVision

+

In large setups, it might be beneficial to construct your inventory based on CloudVision. The from-cvp entrypoint of the get command enables the user to create an ANTA inventory from CloudVision.

+
Command overview
+
anta get from-cvp --help
+Usage: anta get from-cvp [OPTIONS]
+
+  Build ANTA inventory from Cloudvision
+
+Options:
+  -ip, --cvp-ip TEXT              CVP IP Address  [required]
+  -u, --cvp-username TEXT         CVP Username  [required]
+  -p, --cvp-password TEXT         CVP Password / token  [required]
+  -c, --cvp-container TEXT        Container where devices are configured
+  -d, --inventory-directory PATH  Path to save inventory file
+  --help                          Show this message and exit.
+
+

The output is an inventory where the name of the container is added as a tag for each host:

+
anta_inventory:
+  hosts:
+  - host: 192.168.0.13
+    name: leaf2
+    tags:
+    - pod1
+  - host: 192.168.0.15
+    name: leaf4
+    tags:
+    - pod2
+
+
+

Warning

+

The current implementation only considers devices directly attached to a specific container when using the --cvp-container option.

+
+
Creating an inventory from multiple containers
+

If you need to create an inventory from multiple containers, you can use a bash command and then manually concatenate files to create a single inventory file:

+
$ for container in pod01 pod02 spines; do anta get from-cvp -ip <cvp-ip> -u cvpadmin -p cvpadmin -c $container -d test-inventory; done
+
+[12:25:35] INFO     Getting auth token from cvp.as73.inetsix.net for user tom
+[12:25:36] INFO     Creating inventory folder /home/tom/Projects/arista/network-test-automation/test-inventory
+           WARNING  Using the new api_token parameter. This will override usage of the cvaas_token parameter if both are provided. This is because api_token and cvaas_token parameters
+                    are for the same use case and api_token is more generic
+           INFO     Connected to CVP cvp.as73.inetsix.net
+
+
+[12:25:37] INFO     Getting auth token from cvp.as73.inetsix.net for user tom
+[12:25:38] WARNING  Using the new api_token parameter. This will override usage of the cvaas_token parameter if both are provided. This is because api_token and cvaas_token parameters
+                    are for the same use case and api_token is more generic
+           INFO     Connected to CVP cvp.as73.inetsix.net
+
+
+[12:25:38] INFO     Getting auth token from cvp.as73.inetsix.net for user tom
+[12:25:39] WARNING  Using the new api_token parameter. This will override usage of the cvaas_token parameter if both are provided. This is because api_token and cvaas_token parameters
+                    are for the same use case and api_token is more generic
+           INFO     Connected to CVP cvp.as73.inetsix.net
+
+           INFO     Inventory file has been created in /home/tom/Projects/arista/network-test-automation/test-inventory/inventory-spines.yml
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/cli/nrfu/index.html b/0.6.0/cli/nrfu/index.html new file mode 100644 index 000000000..11a0836fb --- /dev/null +++ b/0.6.0/cli/nrfu/index.html @@ -0,0 +1,1851 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + NRFU - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

NRFU

+ +

Execute Network Readiness For Use (NRFU) Testing

+

ANTA provides a set of commands for performing NRFU tests on devices. These commands are under the anta nrfu namespace and offer multiple output format options:

+ +
NRFU Command overview
+
anta nrfu --help
+Usage: anta nrfu [OPTIONS] COMMAND [ARGS]...
+
+  Run NRFU against inventory devices
+
+Options:
+  -c, --catalog FILE  Path to the tests catalog YAML file  [env var:
+                      ANTA_NRFU_CATALOG; required]
+  --help              Show this message and exit.
+
+Commands:
+  json        ANTA command to check network state with JSON result
+  table       ANTA command to check network states with table result
+  text        ANTA command to check network states with text result
+  tpl-report  ANTA command to check network state with templated report
+
+

All commands under the anta nrfu namespace require a catalog yaml file specified with the --catalog option.

+

Performing NRFU with text rendering

+

The text subcommand provides a straightforward text report for each test executed on all devices in your inventory.

+
Command overview
+
anta nrfu text --help
+Usage: anta nrfu text [OPTIONS]
+
+  ANTA command to check network states with text result
+
+Options:
+  -t, --tags TEXT                 List of tags using comma as separator:
+                                  tag1,tag2,tag3
+  -s, --search TEXT               Regular expression to search in both name
+                                  and test
+  --skip-error / --no-skip-error  Hide tests in errors due to connectivity
+                                  issue  [default: no-skip-error]
+  --help                          Show this message and exit.
+
+

The --tags option allows to target specific devices in your inventory, while the --search option permits filtering based on a regular expression pattern in both the hostname and the test name.

+

The --skip-error option can be used to exclude tests that failed due to connectivity issues or unsupported commands.

+
Example
+

anta nrfu text --tags LEAF --search DC1-LEAF1A
+
+anta nrfu text results

+

Performing NRFU with table rendering

+

The table command under the anta nrfu namespace offers a clear and organized table view of the test results, suitable for filtering. It also has its own set of options for better control over the output.

+
Command overview
+
anta nrfu table --help
+Usage: anta nrfu table [OPTIONS]
+
+  ANTA command to check network states with table result
+
+Options:
+  -t, --tags TEXT    List of tags using comma as separator: tag1,tag2,tag3
+  -d, --device TEXT  Show a summary for this device
+  -t, --test TEXT    Show a summary for this test
+  --help             Show this message and exit.
+
+

The --tags option can be used to target specific devices in your inventory.

+

The --device and --test options show a summarized view of the test results for a specific host or test case, respectively.

+
Example
+

anta nrfu table --tags LEAF
+
+anta nrfu table results

+

For larger setups, you can also group the results by host or test to get a summarized view:

+

anta nrfu table --tags LEAF --device DC1-LEAF1A
+
+anta nrfu table per host results

+

Performing NRFU with JSON rendering

+

The JSON rendering command in NRFU testing is useful in generating a JSON output that can subsequently be passed on to another tool for reporting purposes.

+
Command overview
+
anta nrfu json --help
+Usage: anta nrfu json [OPTIONS]
+
+  ANTA command to check network state with JSON result
+
+Options:
+  -t, --tags TEXT    List of tags using comma as separator: tag1,tag2,tag3
+  -o, --output FILE  Path to save report as a file  [env var:
+                     ANTA_NRFU_JSON_OUTPUT]
+  --help             Show this message and exit.
+
+

The --tags option can be used to target specific devices in your inventory.

+

The --output option allows you to save the JSON report as a file.

+
Example
+

anta nrfu json --tags LEAF
+
+anta nrfu json results

+

Performing NRFU with custom reports

+

ANTA offers a CLI option for creating custom reports. This leverages the Jinja2 template system, allowing you to tailor reports to your specific needs.

+
Command overview
+

anta nrfu tpl-report --help
+Usage: anta nrfu tpl-report [OPTIONS]
+
+  ANTA command to check network state with templated report
+
+Options:
+  -tpl, --template FILE  Path to the template to use for the report  [env var:
+                         ANTA_NRFU_TPL_REPORT_TEMPLATE; required]
+  -o, --output FILE      Path to save report as a file  [env var:
+                         ANTA_NRFU_TPL_REPORT_OUTPUT]
+  -t, --tags TEXT        List of tags using comma as separator: tag1,tag2,tag3
+  --help                 Show this message and exit.
+
+The --template option is used to specify the Jinja2 template file for generating the custom report.

+

The --output option allows you to choose the path where the final report will be saved.

+

The --tags option can be used to target specific devices in your inventory.

+
Example
+

anta nrfu tpl-report --tags LEAF --template ./custom_template.j2
+
+anta nrfu json results

+

The template ./custom_template.j2 is a simple Jinja2 template:

+
{% for d in data %}
+* {{ d.test }} is [green]{{ d.result | upper}}[/green] for {{ d.name }}
+{% endfor %}
+
+

The Jinja2 template has access to all TestResult elements and their values, as described in this documentation.

+

You can also save the report result to a file using the --output option:

+
anta nrfu tpl-report --tags LEAF --template ./custom_template.j2 --output nrfu-tpl-report.txt
+
+

The resulting output might look like this:

+
cat nrfu-tpl-report.txt
+* VerifyMlagStatus is [green]SUCCESS[/green] for DC1-LEAF1A
+* VerifyMlagInterfaces is [green]SUCCESS[/green] for DC1-LEAF1A
+* VerifyMlagConfigSanity is [green]SUCCESS[/green] for DC1-LEAF1A
+* VerifyMlagReloadDelay is [green]SUCCESS[/green] for DC1-LEAF1A
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/cli/overview/index.html b/0.6.0/cli/overview/index.html new file mode 100644 index 000000000..cc7bc373a --- /dev/null +++ b/0.6.0/cli/overview/index.html @@ -0,0 +1,1625 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Overview

+ +

Overview of ANTA’s Command-Line Interface (CLI)

+

ANTA provides a powerful Command-Line Interface (CLI) to perform a wide range of operations. This document provides a comprehensive overview of ANTA CLI usage and its commands.

+

ANTA can also be used as a Python library, allowing you to build your own tools based on it. Visit this page for more details.

+

To start using the ANTA CLI, open your terminal and type anta.

+

Invoking ANTA CLI

+
anta --help
+Usage: anta [OPTIONS] COMMAND [ARGS]...
+
+  Arista Network Test Automation (ANTA) CLI
+
+Options:
+  --version                       Show the version and exit.
+  --username TEXT                 Username to connect to EOS  [env var:
+                                  ANTA_USERNAME; required]
+  --password TEXT                 Password to connect to EOS  [env var:
+                                  ANTA_PASSWORD; required]
+  --timeout INTEGER               Global connection timeout  [env var:
+                                  ANTA_TIMEOUT; default: 5]
+  --insecure                      Disable SSH Host Key validation  [env var:
+                                  ANTA_INSECURE]
+  --enable-password TEXT          Enable password if required to connect  [env
+                                  var: ANTA_ENABLE_PASSWORD]
+  -i, --inventory FILE            Path to the inventory YAML file  [env var:
+                                  ANTA_INVENTORY; required]
+  --log-level, --log [CRITICAL|ERROR|WARNING|INFO|DEBUG]
+                                  ANTA logging level  [env var:
+                                  ANTA_LOG_LEVEL; default: INFO]
+  --ignore-status                 Always exit with success  [env var:
+                                  ANTA_IGNORE_STATUS]
+  --ignore-error                  Only report failures and not errors  [env
+                                  var: ANTA_IGNORE_ERROR]
+  --help                          Show this message and exit.
+
+Commands:
+  debug  Debug commands for building ANTA
+  exec   Execute commands to inventory devices
+  get    Get data from/to ANTA
+  nrfu   Run NRFU against inventory devices
+
+

ANTA Global Parameters

+

Certain parameters are globally required and can be either passed to the ANTA CLI or set as an environment variable (ENV VAR).

+

To pass the parameters via the CLI:

+
anta --username tom --password arista123 --inventory inventory.yml <anta cli>
+
+

To set them as ENV VAR:

+
export ANTA_USERNAME=tom
+export ANTA_PASSWORD=arista123
+export ANTA_INVENTORY=inventory.yml
+
+

Then, run the CLI:

+
anta <anta cli>
+
+

ANTA Exit Codes

+

ANTA utilizes different exit codes to indicate the status of the test runs.

+

For all subcommands, ANTA will return the exit code 0, indicating a successful operation, except for the nrfu command.

+

For the nrfu command, ANTA uses the following exit codes:

+
    +
  • Exit code 0 - All tests passed successfully.
  • +
  • Exit code 1 - Tests were run, but at least one test returned a failure.
  • +
  • Exit code 2 - Tests were run, but at least one test returned an error.
  • +
  • Exit code 3 - An internal error occurred while executing tests.
  • +
+

To ignore the test status, use anta --ignore-status nrfu, and the exit code will always be 0.

+

To ignore errors, use anta --ignore-error nrfu, and the exit code will be 0 if all tests succeeded or 1 if any test failed.

+

Shell Completion

+

You can enable shell completion for the ANTA CLI:

+
+
+
+

If you use ZSH shell, add the following line in your ~/.zshrc:

+
eval "$(_ANTA_COMPLETE=zsh_source anta)" > /dev/null
+
+
+
+

With bash, add the following line in your ~/.bashrc:

+
eval "$(_ANTA_COMPLETE=bash_source anta)" > /dev/null
+
+
+
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/contribution/index.html b/0.6.0/contribution/index.html new file mode 100644 index 000000000..48f245da5 --- /dev/null +++ b/0.6.0/contribution/index.html @@ -0,0 +1,1845 @@ + + + + + + + + + + + + + + + + + + + + + + + + Contributions - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

Contributions

+ +

How to contribute to ANTA

+

Contribution model is based on a fork-model. Don’t push to arista-netdevops-community/anta directly. Always do a branch in your repository and create a PR.

+

To help development, open your PR as soon as possible even in draft mode. It helps other to know on what you are working on and avoid duplicate PRs.

+

Install repository

+

Run these commands to install:

+
    +
  • The package ANTA and its dependencies
  • +
  • ANTA cli executable.
  • +
+
# Clone repository
+git clone https://github.com/arista-netdevops-community/anta.git
+cd anta
+
+# Install module in editable mode
+pip install -e .
+
+

Run these commands to verify:

+
# Check python installation
+$ pip list
+
+# Check version using cli
+$ anta --version
+anta, version 0.6.0
+
+
Install development requirements
+

Run pip to install anta and its developement tools.

+
pip install 'anta[dev]'
+
+
+

This command has to be done after you install repository with commands provided in previous section.

+
+

Then, tox is configued with few environment to run CI locally:

+
default environments:
+clean  -> Erase previous coverage reports
+lint   -> Check the code style
+type   -> Check typing
+py38   -> Run pytest with py38
+py39   -> Run pytest with py39
+py310  -> Run pytest with py310
+py311  -> Run pytest with py311
+report -> Generate coverage report
+
+additional environments:
+3.8    -> Run pytest with 3.8
+3.9    -> Run pytest with 3.9
+3.10   -> Run pytest with 3.10
+3.11   -> Run pytest with 3.11
+
+

Code linting

+
tox -e lint
+[...]
+lint: commands[0]> black --check --diff --color .
+All done!  🍰 ✨
+104 files would be left unchanged.
+lint: commands[1]> isort --check --diff --color .
+Skipped 7 files
+lint: commands[2]> flake8 --max-line-length=165 --config=/dev/null anta
+lint: commands[3]> flake8 --max-line-length=165 --config=/dev/null tests
+lint: commands[4]> pylint anta
+
+--------------------------------------------------------------------
+Your code has been rated at 10.00/10 (previous run: 10.00/10, +0.00)
+
+.pkg: _exit> python /Users/guillaumemulocher/.pyenv/versions/3.8.13/envs/anta/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+  lint: OK (19.26=setup[5.83]+cmd[1.50,0.76,1.19,1.20,8.77] seconds)
+  congratulations :) (19.56 seconds)
+
+

Code Typing

+
tox -e type
+
+[...]
+type: commands[0]> mypy --config-file=pyproject.toml anta
+Success: no issues found in 52 source files
+.pkg: _exit> python /Users/guillaumemulocher/.pyenv/versions/3.8.13/envs/anta/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+  type: OK (46.66=setup[24.20]+cmd[22.46] seconds)
+  congratulations :) (47.01 seconds)
+
+
+

NOTE: Typing is configured quite strictly, do not hesitate to reach out if you have any questions, struggles, nightmares.

+
+

Unit tests

+

To keep high quality code, we require to provide a Pytest for every tests implemented in ANTA.

+

All submodule should have its own pytest section under tests/units/anta_tests/<submodule-name>. In this directory, you should have 3 files:

+
    +
  • __init__.py: Just because it is used as a python module
  • +
  • data.py: Where all your parametrize go. So all your test information should be located here
  • +
  • test_exc.py: Pytest file with test definition.
  • +
+

A pytest definition should be similar to this template:

+
"""
+Tests for anta.tests.hardware.py
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+from typing import Any
+from unittest.mock import MagicMock
+
+import pytest
+
+from anta.tests.hardware import VerifyAdverseDrops
+from tests.lib.utils import generate_test_ids_list
+
+from .data import INPUT_<TEST_NAME>
+
+@pytest.mark.parametrize("test_data", INPUT_<TEST_NAME>, ids=generate_test_ids_list(INPUT_<TEST_NAME>))
+def test_<TEST_CASE>(mocked_device: MagicMock, test_data: Any) -> None:
+    """Check <TEST_CASE>."""
+
+    test = <TEST_CASE>(mocked_device, eos_data=test_data["eos_data"])
+    asyncio.run(test.test())
+
+    logging.debug(f"test result is: {test.result}")
+
+    assert str(test.result.name) == mocked_device.name
+    assert test.result.result == test_data["expected_result"]
+
+

The mocked_device object is a fixture defined in Pytest to represent an InventoryDevice and the parametrize test_data is a list of dictionries with structure:

+
INPUT_RUNNING_CONFIG: List[Dict[str, Any]] = [
+  # Test Case #1
+    {
+        "name": "failure",
+        "eos_data": ["blah blah"],
+        "side_effect": None,
+        "expected_result": "failure",
+        "expected_messages": ["blah blah"]
+    },
+    # Test Case #2
+    {
+      ...
+    },
+]
+
+

Where we have:

+
    +
  • name: Name of the test displayed by Pytest
  • +
  • eos_data: a list of data coming from EOS.
  • +
  • side_effect: used to inject template and test parameters (look for some examples in the existing tests)
  • +
  • expected_result: Result we expect for this test
  • +
  • expected_messages: Optional messages we expect for the test.
  • +
+
+

Use Anta CLI to get test data

+

To complete this block, you can use anta debug commands to get AntaCommand output to use in your test.

+
+

Git Pre-commit hook

+
pip install pre-commit
+pre-commit install
+
+

When running a commit or a pre-commit check:

+
❯ echo "import foobaz" > test.py && git add test.py
+❯ pre-commit
+pylint...................................................................Failed
+- hook id: pylint
+- exit code: 22
+
+************* Module test
+test.py:1:0: C0114: Missing module docstring (missing-module-docstring)
+test.py:1:0: E0401: Unable to import 'foobaz' (import-error)
+test.py:1:0: W0611: Unused import foobaz (unused-import)
+
+
+

NOTE: It could happen that pre-commit and tox disagree on something, in that case please open an issue on Github so we can take a look.. It is most probably wrong configuration on our side.

+
+

Documentation

+

mkdocs is used to generate the documentation. A PR should always update the documentation to avoid documentation debt.

+
Install documentation requirements
+

Run pip to install the documentation requirements from the root of the repo:

+
pip install -r docs/requirements.txt
+
+
Testing documentation
+

You can then check locally the documentation using the following command from the root of the repo:

+
mkdocs serve
+
+ +

Writing documentation is crucial but managing links can be cumbersome. To be sure there is no 404, you can use muffet with this cli:

+
muffet -c 2 --color=always http://127.0.0.1:8000 -e fonts.gstatic.com
+
+

Continuous Integration

+

GitHub actions is used to test git pushes and pull requests. The workflows are defined in this directory. +We can view the result here

+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/getting-started/index.html b/0.6.0/getting-started/index.html new file mode 100644 index 000000000..e809c0138 --- /dev/null +++ b/0.6.0/getting-started/index.html @@ -0,0 +1,1865 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Getting Started - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Getting Started

+ +

Getting Started

+

This section shows how to use ANTA with basic configuration. All examples are based on Arista Test Drive (ATD) topology you can access by reaching out to your prefered SE.

+

Installation

+

The easiest way to intall ANTA package is to run Python (>=3.8) and its pip package to install:

+
pip install anta
+
+

For more details about how to install package, please see the requirements and intallation section.

+

Configure Arista EOS devices

+

For ANTA to be able to connect to your target devices, you need to configure your management interface

+
vrf instance MGMT
+!
+interface Management0
+   description oob_management
+   vrf MGMT
+   ip address 192.168.0.10/24
+!
+
+

Then, configure access to eAPI:

+
!
+management api http-commands
+   protocol https port 443
+   no shutdown
+   vrf MGMT
+      no shutdown
+   !
+!
+
+

Create your inventory

+

ANTA uses an inventory to list the target devices for the tests. You can create a file manually with this format:

+
anta_inventory:
+  hosts:
+  - host: 192.168.0.10
+    name: spine01
+    tags: ['fabric', 'spine']
+  - host: 192.168.0.11
+    name: spine02
+    tags: ['fabric', 'spine']
+  - host: 192.168.0.12
+    name: leaf01
+    tags: ['fabric', 'leaf']
+  - host: 192.168.0.13
+    name: leaf02
+    tags: ['fabric', 'leaf']
+  - host: 192.168.0.14
+    name: leaf03
+    tags: ['fabric', 'leaf']
+  - host: 192.168.0.15
+    name: leaf04
+    tags: ['fabric', 'leaf']
+
+
+

You can read more details about how to build your inventory here

+
+

Test Catalog

+

To test your network, ANTA relies on a test catalog to list all the tests to run against your inventory. A test catalog references python functions into a yaml file.

+

The structure to follow is like:

+
<anta_tests_submodule>:
+  - <anta_tests_submodule function name>:
+      <test function option>:
+        <test function option value>
+
+
+

You can read more details about how to build your catalog here

+
+

Here is an example for basic tests:

+
# Load anta.tests.software
+anta.tests.software:
+  - VerifyEOSVersion: # Verifies the device is running one of the allowed EOS version.
+      versions: # List of allowed EOS versions.
+        - 4.25.4M
+        - 4.26.1F
+        - '4.28.3M-28837868.4283M (engineering build)'
+  - VerifyTerminAttrVersion:
+      versions:
+        - v1.22.1
+
+anta.tests.system:
+  - VerifyUptime: # Verifies the device uptime is higher than a value.
+      minimum: 1
+  - VerifyNTP:
+  - VerifySyslog:
+
+anta.tests.mlag:
+  - VerifyMlagStatus:
+  - VerifyMlagInterfaces:
+  - VerifyMlagConfigSanity:
+
+anta.tests.configuration:
+  - VerifyZeroTouch: # Verifies ZeroTouch is disabled.
+  - VerifyRunningConfigDiffs:
+
+

Test your network

+

ANTA comes with a generic CLI entrypoint to run tests in your network. It requires an inventory file as well as a test catalog.

+

This entrypoint has multiple options to manage test coverage and reporting.

+
# Generic ANTA options
+$ anta
+Usage: anta [OPTIONS] COMMAND [ARGS]...
+
+  Arista Network Test Automation (ANTA) CLI
+
+Options:
+  --version                       Show the version and exit.
+  --username TEXT                 Username to connect to EOS  [env var:
+                                  ANTA_USERNAME; required]
+  --password TEXT                 Password to connect to EOS  [env var:
+                                  ANTA_PASSWORD; required]
+  --timeout INTEGER               Global connection timeout  [env var:
+                                  ANTA_TIMEOUT; default: 5]
+  --insecure                      Disable SSH Host Key validation  [env var:
+                                  ANTA_INSECURE]
+  --enable-password TEXT          Enable password if required to connect  [env
+                                  var: ANTA_ENABLE_PASSWORD]
+  -i, --inventory FILE            Path to the inventory YAML file  [env var:
+                                  ANTA_INVENTORY; required]
+  --log-level, --log [CRITICAL|ERROR|WARNING|INFO|DEBUG]
+                                  ANTA logging level  [env var:
+                                  ANTA_LOG_LEVEL; default: INFO]
+  --ignore-status                 Always exit with success  [env var:
+                                  ANTA_IGNORE_STATUS]
+  --ignore-error                  Only report failures and not errors  [env
+                                  var: ANTA_IGNORE_ERROR]
+  --help                          Show this message and exit.
+
+Commands:
+  debug  Debug commands for building ANTA
+  exec   Execute commands to inventory devices
+  get    Get data from/to ANTA
+  nrfu   Run NRFU against inventory devices
+
+
# NRFU part of ANTA
+$ anta nrfu --help
+Usage: anta nrfu [OPTIONS] COMMAND [ARGS]...
+
+  Run NRFU against inventory devices
+
+Options:
+  -c, --catalog FILE  Path to the tests catalog YAML file  [env var:
+                      ANTA_NRFU_CATALOG; required]
+  --help              Show this message and exit.
+
+Commands:
+  json        ANTA command to check network state with JSON result
+  table       ANTA command to check network states with table result
+  text        ANTA command to check network states with text result
+  tpl-report  ANTA command to check network state with templated report
+
+
+

Currently to be able to run anta nrfu --help you need to have given to ANTA the mandatory input parameters: username, password and inventory otherwise the CLI will report an issue. This is tracked in: https://github.com/arista-netdevops-community/anta/issues/263

+
+

To run the NRFU, you need to select an output format amongst [“json”, “table”, “text”, “tpl-report”]. For a first usage, table is recommended. By default all test results for all devices are rendered but it can be changed to a report per test case or per host

+
Default report using table
+
anta \
+    --username tom \
+    --password arista123 \
+    --enable-password t \
+    --inventory .personal/inventory_atd.yml \
+    nrfu --catalog .personal/tests-bases.yml table --tags leaf
+
+
+╭────────────────────── Settings ──────────────────────╮
+│ Running ANTA tests:                                  │
+│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │
+│ - Tests catalog contains 10 tests                    │
+╰──────────────────────────────────────────────────────╯
+[10:17:24] INFO     Running ANTA tests...                                                                                                           runner.py:75
+   Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40/40  0:00:02  0:00:00
+
+                                                                       All tests results                                                                        
+┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
+┃ Device IP  Test Name                 Test Status  Message(s)        Test description                                                      Test category ┃
+┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
+│ leaf01     VerifyEOSVersion          success                        Verifies the device is running one of the allowed EOS version.        software      │
+│ leaf01     VerifyTerminAttrVersion   success                        Verifies the device is running one of the allowed TerminAttr          software      │
+│                                                                     version.                                                                            │
+│ leaf01     VerifyUptime              success                        Verifies the device uptime is higher than a value.                    system        │
+│ leaf01     VerifyNTP                 success                        Verifies NTP is synchronised.                                         system        │
+│ leaf01     VerifySyslog              success                        Verifies the device had no syslog message with a severity of warning  system        │
+│                                                                     (or a more severe message) during the last 7 days.                                  │
+│ leaf01     VerifyMlagStatus          skipped      MLAG is disabled  This test verifies the health status of the MLAG configuration.       mlag          │
+│ leaf01     VerifyMlagInterfaces      skipped      MLAG is disabled  This test verifies there are no inactive or active-partial MLAG       mlag          │
+[...]
+│ leaf04     VerifyMlagConfigSanity    skipped      MLAG is disabled  This test verifies there are no MLAG config-sanity inconsistencies.   mlag          │
+│ leaf04     VerifyZeroTouch           success                        Verifies ZeroTouch is disabled.                                       configuration │
+│ leaf04     VerifyRunningConfigDiffs  success                                                                                              configuration │
+└───────────┴──────────────────────────┴─────────────┴──────────────────┴──────────────────────────────────────────────────────────────────────┴───────────────┘
+
+
Report in text mode
+
$ anta \
+    --username tom \
+    --password arista123 \
+    --enable-password t \
+    --inventory .personal/inventory_atd.yml \
+    nrfu --catalog .personal/tests-bases.yml text --tags leaf
+
+╭────────────────────── Settings ──────────────────────╮
+│ Running ANTA tests:                                  │
+│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │
+│ - Tests catalog contains 10 tests                    │
+╰──────────────────────────────────────────────────────╯
+[10:20:47] INFO     Running ANTA tests...                                                                                                           runner.py:75
+   Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40/40  0:00:01  0:00:00
+leaf01 :: VerifyEOSVersion :: SUCCESS
+leaf01 :: VerifyTerminAttrVersion :: SUCCESS
+leaf01 :: VerifyUptime :: SUCCESS
+leaf01 :: VerifyNTP :: SUCCESS
+leaf01 :: VerifySyslog :: SUCCESS
+leaf01 :: VerifyMlagStatus :: SKIPPED (MLAG is disabled)
+leaf01 :: VerifyMlagInterfaces :: SKIPPED (MLAG is disabled)
+leaf01 :: VerifyMlagConfigSanity :: SKIPPED (MLAG is disabled)
+[...]
+
+
Report per host
+
$ anta \
+    --username tom \
+    --password arista123 \
+    --enable-password t \
+    --inventory .personal/inventory_atd.yml \
+    nrfu --catalog .personal/tests-bases.yml json --tags leaf
+
+╭────────────────────── Settings ──────────────────────╮
+│ Running ANTA tests:                                  │
+│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │
+│ - Tests catalog contains 10 tests                    │
+╰──────────────────────────────────────────────────────╯
+[10:21:51] INFO     Running ANTA tests...                                                                                                           runner.py:75
+   Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40/40  0:00:02  0:00:00
+╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ JSON results of all tests                                                                                                                                    │
+╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+[
+  {
+    "name": "leaf01",
+    "test": "VerifyEOSVersion",
+    "test_category": [
+      "software"
+    ],
+    "test_description": "Verifies the device is running one of the allowed EOS version.",
+    "result": "success",
+    "messages": []
+  },
+  {
+    "name": "leaf01",
+    "test": "VerifyTerminAttrVersion",
+    "test_category": [
+      "software"
+    ],
+    "test_description": "Verifies the device is running one of the allowed TerminAttr version.",
+    "result": "success",
+    "messages": []
+  },
+[...]
+]
+
+

You can find more information under the usage section of the website

+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/imgs/anta-check-devices-by-host-demo.png b/0.6.0/imgs/anta-check-devices-by-host-demo.png new file mode 100644 index 000000000..29947bb3d Binary files /dev/null and b/0.6.0/imgs/anta-check-devices-by-host-demo.png differ diff --git a/0.6.0/imgs/anta-check-devices-by-test-demo.png b/0.6.0/imgs/anta-check-devices-by-test-demo.png new file mode 100644 index 000000000..f06e66076 Binary files /dev/null and b/0.6.0/imgs/anta-check-devices-by-test-demo.png differ diff --git a/0.6.0/imgs/anta-check-devices-table-demo.png b/0.6.0/imgs/anta-check-devices-table-demo.png new file mode 100644 index 000000000..d6126a13d Binary files /dev/null and b/0.6.0/imgs/anta-check-devices-table-demo.png differ diff --git a/0.6.0/imgs/anta-getting-started.png b/0.6.0/imgs/anta-getting-started.png new file mode 100644 index 000000000..4abcb6fb9 Binary files /dev/null and b/0.6.0/imgs/anta-getting-started.png differ diff --git a/0.6.0/imgs/anta-nrfu-json-output.png b/0.6.0/imgs/anta-nrfu-json-output.png new file mode 100644 index 000000000..7ab22b998 Binary files /dev/null and b/0.6.0/imgs/anta-nrfu-json-output.png differ diff --git a/0.6.0/imgs/anta-nrfu-table-group-by-test-output.png b/0.6.0/imgs/anta-nrfu-table-group-by-test-output.png new file mode 100644 index 000000000..70a7bd7c6 Binary files /dev/null and b/0.6.0/imgs/anta-nrfu-table-group-by-test-output.png differ diff --git a/0.6.0/imgs/anta-nrfu-table-output.png b/0.6.0/imgs/anta-nrfu-table-output.png new file mode 100644 index 000000000..1c9ff624e Binary files /dev/null and b/0.6.0/imgs/anta-nrfu-table-output.png differ diff --git a/0.6.0/imgs/anta-nrfu-table-per-host-output.png b/0.6.0/imgs/anta-nrfu-table-per-host-output.png new file mode 100644 index 000000000..c82ce4de0 Binary files /dev/null and b/0.6.0/imgs/anta-nrfu-table-per-host-output.png differ diff --git a/0.6.0/imgs/anta-nrfu-text-output.png b/0.6.0/imgs/anta-nrfu-text-output.png new file mode 100644 index 000000000..2a4d6be5f Binary files /dev/null and b/0.6.0/imgs/anta-nrfu-text-output.png differ diff --git a/0.6.0/imgs/anta-nrfu-tpl-report-output.png b/0.6.0/imgs/anta-nrfu-tpl-report-output.png new file mode 100644 index 000000000..df1c30a31 Binary files /dev/null and b/0.6.0/imgs/anta-nrfu-tpl-report-output.png differ diff --git a/0.6.0/imgs/favicon.ico b/0.6.0/imgs/favicon.ico new file mode 100644 index 000000000..cf55b0c85 Binary files /dev/null and b/0.6.0/imgs/favicon.ico differ diff --git a/0.6.0/index.html b/0.6.0/index.html new file mode 100644 index 000000000..1b318405f --- /dev/null +++ b/0.6.0/index.html @@ -0,0 +1,1540 @@ + + + + + + + + + + + + + + + + + + + + + + + + Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Home

+ +

ANTA Documentation

+

This website provides generic documentation related to the Arista Network Test Automation framework (ANTA)

+

+

Arista Network Test Automation (ANTA) Framework

+

This repository is a Python package to automate tests on Arista devices.

+
    +
  • The package name is ANTA, which stands for Arista Network Test Automation.
  • +
  • This package provides a set of tests to validate the state of your network.
  • +
  • This package can be imported in Python scripts:
      +
    • To automate NRFU (Network Ready For Use) test on a preproduction network
    • +
    • To automate tests on a live network (periodically or on demand)
    • +
    +
  • +
+

This repository comes with a cli to run Arista Network Test Automation (ANTA) framework using your preferred shell:

+
# Install ANTA
+pip install anta
+
+
# Run ANTA cli
+$ anta
+Usage: anta [OPTIONS] COMMAND [ARGS]...
+
+  Arista Network Test Automation (ANTA) CLI
+
+Options:
+  --version                       Show the version and exit.
+  --username TEXT                 Username to connect to EOS  [env var:
+                                  ANTA_USERNAME; required]
+  --password TEXT                 Password to connect to EOS  [env var:
+                                  ANTA_PASSWORD; required]
+  --timeout INTEGER               Global connection timeout  [env var:
+                                  ANTA_TIMEOUT; default: 5]
+  --insecure                      Disable SSH Host Key validation  [env var:
+                                  ANTA_INSECURE]
+  --enable-password TEXT          Enable password if required to connect  [env
+                                  var: ANTA_ENABLE_PASSWORD]
+  -i, --inventory FILE            Path to the inventory YAML file  [env var:
+                                  ANTA_INVENTORY; required]
+  --log-level, --log [CRITICAL|ERROR|WARNING|INFO|DEBUG]
+                                  ANTA logging level  [env var:
+                                  ANTA_LOG_LEVEL; default: INFO]
+  --ignore-status                 Always exit with success  [env var:
+                                  ANTA_IGNORE_STATUS]
+  --ignore-error                  Only report failures and not errors  [env
+                                  var: ANTA_IGNORE_ERROR]
+  --help                          Show this message and exit.
+
+Commands:
+  debug  Debug commands for building ANTA
+  exec   Execute commands to inventory devices
+  get    Get data from/to ANTA
+  nrfu   Run NRFU against inventory devices
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/objects.inv b/0.6.0/objects.inv new file mode 100644 index 000000000..753824732 Binary files /dev/null and b/0.6.0/objects.inv differ diff --git a/0.6.0/requirements-and-installation/index.html b/0.6.0/requirements-and-installation/index.html new file mode 100644 index 000000000..48b654363 --- /dev/null +++ b/0.6.0/requirements-and-installation/index.html @@ -0,0 +1,1646 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Installation - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Installation

+ +

ANTA Requirements

+

Python version

+

Python 3 (>=3.8) is required:

+
python --version
+Python 3.9.9
+
+

Install ANTA package

+

This installation will deploy tests collection, scripts and all their Python requirements.

+

The ANTA package and the cli require some packages that are not part of the Python standard library. They are indicated in the pyproject.toml file, under dependencies.

+
Install from Pypi server
+
pip install anta
+
+
Install ANTA from github
+
pip install git+https://github.com/arista-netdevops-community/anta.git
+
+# You can even specify the branch, tag or commit:
+pip install git+https://github.com/arista-netdevops-community/anta.git@<cool-feature-branch>
+pip install git+https://github.com/arista-netdevops-community/anta.git@<cool-tag>
+pip install git+https://github.com/arista-netdevops-community/anta.git@<more-or-less-cool-hash>
+
+
Check installation
+

Run these commands to verify:

+
# Check ANTA has been installed in your python path
+pip list | grep anta
+
+# Check scripts are in your $PATH
+# Path may differ but it means CLI is in your path
+which anta
+/home/tom/.pyenv/shims/anta
+
+# Chck ANTA version
+anta --version
+anta, version 0.6.0
+
+

EOS Requirements

+

To get ANTA working, the targetted Arista EOS devices must have the following configuration (assuming you connect to the device using Management interface in MGMT VRF):

+
configure
+!
+vrf instance MGMT
+!
+interface Management1
+   description oob_management
+   vrf MGMT
+   ip address 10.73.1.105/24
+!
+end
+
+

Enable eAPI on the MGMT vrf:

+
configure
+!
+management api http-commands
+   protocol https port 443
+   no shutdown
+   vrf MGMT
+      no shutdown
+!
+end
+
+

Now the swicth accepts on port 443 in the MGMT VRF HTTPS requests containing a list of CLI commands.

+

Run these EOS commands to verify:

+
show management http-server
+show management api http-commands
+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.6.0/requirements.txt b/0.6.0/requirements.txt new file mode 100644 index 000000000..898b31fa9 --- /dev/null +++ b/0.6.0/requirements.txt @@ -0,0 +1,11 @@ +mkdocs>=1.3.1 +mkdocs-autorefs>=0.4.1 +mkdocs-bootswatch>=1.1 +mkdocs-git-revision-date-localized-plugin>=1.1.0 +mkdocs-git-revision-date-plugin>=0.3.2 +mkdocs-material>=8.3.9 +mkdocs-material-extensions>=1.0.3 +mkdocstrings[python]>=0.20.0 +mdx_truly_sane_lists +fontawesome_markdown +mike==1.1.2 diff --git a/0.6.0/search/search_index.json b/0.6.0/search/search_index.json new file mode 100644 index 000000000..949a56c77 --- /dev/null +++ b/0.6.0/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#anta-documentation","title":"ANTA Documentation","text":"

This website provides generic documentation related to the Arista Network Test Automation framework (ANTA)

"},{"location":"#arista-network-test-automation-anta-framework","title":"Arista Network Test Automation (ANTA) Framework","text":"

This repository is a Python package to automate tests on Arista devices.

  • The package name is ANTA, which stands for Arista Network Test Automation.
  • This package provides a set of tests to validate the state of your network.
  • This package can be imported in Python scripts:
    • To automate NRFU (Network Ready For Use) test on a preproduction network
    • To automate tests on a live network (periodically or on demand)

This repository comes with a cli to run Arista Network Test Automation (ANTA) framework using your preferred shell:

# Install ANTA\npip install anta\n
# Run ANTA cli\n$ anta\nUsage: anta [OPTIONS] COMMAND [ARGS]...\n\n  Arista Network Test Automation (ANTA) CLI\n\nOptions:\n  --version                       Show the version and exit.\n  --username TEXT                 Username to connect to EOS  [env var:\n                                  ANTA_USERNAME; required]\n--password TEXT                 Password to connect to EOS  [env var:\n                                  ANTA_PASSWORD; required]\n--timeout INTEGER               Global connection timeout  [env var:\n                                  ANTA_TIMEOUT; default: 5]\n--insecure                      Disable SSH Host Key validation  [env var:\n                                  ANTA_INSECURE]\n--enable-password TEXT          Enable password if required to connect  [env\n                                  var: ANTA_ENABLE_PASSWORD]\n-i, --inventory FILE            Path to the inventory YAML file  [env var:\n                                  ANTA_INVENTORY; required]\n--log-level, --log [CRITICAL|ERROR|WARNING|INFO|DEBUG]\nANTA logging level  [env var:\n                                  ANTA_LOG_LEVEL; default: INFO]\n--ignore-status                 Always exit with success  [env var:\n                                  ANTA_IGNORE_STATUS]\n--ignore-error                  Only report failures and not errors  [env\n                                  var: ANTA_IGNORE_ERROR]\n--help                          Show this message and exit.\n\nCommands:\n  debug  Debug commands for building ANTA\n  exec   Execute commands to inventory devices\n  get    Get data from/to ANTA\n  nrfu   Run NRFU against inventory devices\n
"},{"location":"contribution/","title":"Contributions","text":""},{"location":"contribution/#how-to-contribute-to-anta","title":"How to contribute to ANTA","text":"

Contribution model is based on a fork-model. Don\u2019t push to arista-netdevops-community/anta directly. Always do a branch in your repository and create a PR.

To help development, open your PR as soon as possible even in draft mode. It helps other to know on what you are working on and avoid duplicate PRs.

"},{"location":"contribution/#install-repository","title":"Install repository","text":"

Run these commands to install:

  • The package ANTA and its dependencies
  • ANTA cli executable.
# Clone repository\ngit clone https://github.com/arista-netdevops-community/anta.git\ncd anta\n\n# Install module in editable mode\npip install -e .\n

Run these commands to verify:

# Check python installation\n$ pip list\n\n# Check version using cli\n$ anta --version\nanta, version 0.6.0\n
"},{"location":"contribution/#install-development-requirements","title":"Install development requirements","text":"

Run pip to install anta and its developement tools.

pip install 'anta[dev]'\n

This command has to be done after you install repository with commands provided in previous section.

Then, tox is configued with few environment to run CI locally:

default environments:\nclean  -> Erase previous coverage reports\nlint   -> Check the code style\ntype   -> Check typing\npy38   -> Run pytest with py38\npy39   -> Run pytest with py39\npy310  -> Run pytest with py310\npy311  -> Run pytest with py311\nreport -> Generate coverage report\n\nadditional environments:\n3.8    -> Run pytest with 3.8\n3.9    -> Run pytest with 3.9\n3.10   -> Run pytest with 3.10\n3.11   -> Run pytest with 3.11\n
"},{"location":"contribution/#code-linting","title":"Code linting","text":"
tox -e lint\n[...]\nlint: commands[0]> black --check --diff --color .\nAll done! \u2728 \ud83c\udf70 \u2728\n104 files would be left unchanged.\nlint: commands[1]> isort --check --diff --color .\nSkipped 7 files\nlint: commands[2]> flake8 --max-line-length=165 --config=/dev/null anta\nlint: commands[3]> flake8 --max-line-length=165 --config=/dev/null tests\nlint: commands[4]> pylint anta\n\n--------------------------------------------------------------------\nYour code has been rated at 10.00/10 (previous run: 10.00/10, +0.00)\n\n.pkg: _exit> python /Users/guillaumemulocher/.pyenv/versions/3.8.13/envs/anta/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta\n  lint: OK (19.26=setup[5.83]+cmd[1.50,0.76,1.19,1.20,8.77] seconds)\ncongratulations :) (19.56 seconds)\n
"},{"location":"contribution/#code-typing","title":"Code Typing","text":"
tox -e type\n\n[...]\ntype: commands[0]> mypy --config-file=pyproject.toml anta\nSuccess: no issues found in 52 source files\n.pkg: _exit> python /Users/guillaumemulocher/.pyenv/versions/3.8.13/envs/anta/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta\n  type: OK (46.66=setup[24.20]+cmd[22.46] seconds)\ncongratulations :) (47.01 seconds)\n

NOTE: Typing is configured quite strictly, do not hesitate to reach out if you have any questions, struggles, nightmares.

"},{"location":"contribution/#unit-tests","title":"Unit tests","text":"

To keep high quality code, we require to provide a Pytest for every tests implemented in ANTA.

All submodule should have its own pytest section under tests/units/anta_tests/<submodule-name>. In this directory, you should have 3 files:

  • __init__.py: Just because it is used as a python module
  • data.py: Where all your parametrize go. So all your test information should be located here
  • test_exc.py: Pytest file with test definition.

A pytest definition should be similar to this template:

\"\"\"\nTests for anta.tests.hardware.py\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nfrom typing import Any\nfrom unittest.mock import MagicMock\n\nimport pytest\n\nfrom anta.tests.hardware import VerifyAdverseDrops\nfrom tests.lib.utils import generate_test_ids_list\n\nfrom .data import INPUT_<TEST_NAME>\n\n@pytest.mark.parametrize(\"test_data\", INPUT_<TEST_NAME>, ids=generate_test_ids_list(INPUT_<TEST_NAME>))\ndef test_<TEST_CASE>(mocked_device: MagicMock, test_data: Any) -> None:\n\"\"\"Check <TEST_CASE>.\"\"\"\n\n    test = <TEST_CASE>(mocked_device, eos_data=test_data[\"eos_data\"])\n    asyncio.run(test.test())\n\n    logging.debug(f\"test result is: {test.result}\")\n\n    assert str(test.result.name) == mocked_device.name\n    assert test.result.result == test_data[\"expected_result\"]\n

The mocked_device object is a fixture defined in Pytest to represent an InventoryDevice and the parametrize test_data is a list of dictionries with structure:

INPUT_RUNNING_CONFIG: List[Dict[str, Any]] = [\n  # Test Case #1\n    {\n        \"name\": \"failure\",\n        \"eos_data\": [\"blah blah\"],\n        \"side_effect\": None,\n        \"expected_result\": \"failure\",\n        \"expected_messages\": [\"blah blah\"]\n    },\n    # Test Case #2\n    {\n      ...\n    },\n]\n

Where we have:

  • name: Name of the test displayed by Pytest
  • eos_data: a list of data coming from EOS.
  • side_effect: used to inject template and test parameters (look for some examples in the existing tests)
  • expected_result: Result we expect for this test
  • expected_messages: Optional messages we expect for the test.

Use Anta CLI to get test data

To complete this block, you can use anta debug commands to get AntaCommand output to use in your test.

"},{"location":"contribution/#git-pre-commit-hook","title":"Git Pre-commit hook","text":"
pip install pre-commit\npre-commit install\n

When running a commit or a pre-commit check:

\u276f echo \"import foobaz\" > test.py && git add test.py\n\u276f pre-commit\npylint...................................................................Failed\n- hook id: pylint\n- exit code: 22\n\n************* Module test\ntest.py:1:0: C0114: Missing module docstring (missing-module-docstring)\ntest.py:1:0: E0401: Unable to import 'foobaz' (import-error)\ntest.py:1:0: W0611: Unused import foobaz (unused-import)\n

NOTE: It could happen that pre-commit and tox disagree on something, in that case please open an issue on Github so we can take a look.. It is most probably wrong configuration on our side.

"},{"location":"contribution/#documentation","title":"Documentation","text":"

mkdocs is used to generate the documentation. A PR should always update the documentation to avoid documentation debt.

"},{"location":"contribution/#install-documentation-requirements","title":"Install documentation requirements","text":"

Run pip to install the documentation requirements from the root of the repo:

pip install -r docs/requirements.txt\n
"},{"location":"contribution/#testing-documentation","title":"Testing documentation","text":"

You can then check locally the documentation using the following command from the root of the repo:

mkdocs serve\n
"},{"location":"contribution/#checking-links","title":"Checking links","text":"

Writing documentation is crucial but managing links can be cumbersome. To be sure there is no 404, you can use muffet with this cli:

muffet -c 2 --color=always http://127.0.0.1:8000 -e fonts.gstatic.com\n
"},{"location":"contribution/#continuous-integration","title":"Continuous Integration","text":"

GitHub actions is used to test git pushes and pull requests. The workflows are defined in this directory. We can view the result here

"},{"location":"getting-started/","title":"Getting Started","text":""},{"location":"getting-started/#getting-started","title":"Getting Started","text":"

This section shows how to use ANTA with basic configuration. All examples are based on Arista Test Drive (ATD) topology you can access by reaching out to your prefered SE.

"},{"location":"getting-started/#installation","title":"Installation","text":"

The easiest way to intall ANTA package is to run Python (>=3.8) and its pip package to install:

pip install anta\n

For more details about how to install package, please see the requirements and intallation section.

"},{"location":"getting-started/#configure-arista-eos-devices","title":"Configure Arista EOS devices","text":"

For ANTA to be able to connect to your target devices, you need to configure your management interface

vrf instance MGMT\n!\ninterface Management0\n   description oob_management\n   vrf MGMT\n   ip address 192.168.0.10/24\n!\n

Then, configure access to eAPI:

!\nmanagement api http-commands\n   protocol https port 443\n   no shutdown\n   vrf MGMT\n      no shutdown\n   !\n!\n
"},{"location":"getting-started/#create-your-inventory","title":"Create your inventory","text":"

ANTA uses an inventory to list the target devices for the tests. You can create a file manually with this format:

anta_inventory:\nhosts:\n- host: 192.168.0.10\nname: spine01\ntags: ['fabric', 'spine']\n- host: 192.168.0.11\nname: spine02\ntags: ['fabric', 'spine']\n- host: 192.168.0.12\nname: leaf01\ntags: ['fabric', 'leaf']\n- host: 192.168.0.13\nname: leaf02\ntags: ['fabric', 'leaf']\n- host: 192.168.0.14\nname: leaf03\ntags: ['fabric', 'leaf']\n- host: 192.168.0.15\nname: leaf04\ntags: ['fabric', 'leaf']\n

You can read more details about how to build your inventory here

"},{"location":"getting-started/#test-catalog","title":"Test Catalog","text":"

To test your network, ANTA relies on a test catalog to list all the tests to run against your inventory. A test catalog references python functions into a yaml file.

The structure to follow is like:

<anta_tests_submodule>:\n- <anta_tests_submodule function name>:\n<test function option>:\n<test function option value>\n

You can read more details about how to build your catalog here

Here is an example for basic tests:

# Load anta.tests.software\nanta.tests.software:\n- VerifyEOSVersion: # Verifies the device is running one of the allowed EOS version.\nversions: # List of allowed EOS versions.\n- 4.25.4M\n- 4.26.1F\n- '4.28.3M-28837868.4283M (engineering build)'\n- VerifyTerminAttrVersion:\nversions:\n- v1.22.1\n\nanta.tests.system:\n- VerifyUptime: # Verifies the device uptime is higher than a value.\nminimum: 1\n- VerifyNTP:\n- VerifySyslog:\n\nanta.tests.mlag:\n- VerifyMlagStatus:\n- VerifyMlagInterfaces:\n- VerifyMlagConfigSanity:\n\nanta.tests.configuration:\n- VerifyZeroTouch: # Verifies ZeroTouch is disabled.\n- VerifyRunningConfigDiffs:\n
"},{"location":"getting-started/#test-your-network","title":"Test your network","text":"

ANTA comes with a generic CLI entrypoint to run tests in your network. It requires an inventory file as well as a test catalog.

This entrypoint has multiple options to manage test coverage and reporting.

# Generic ANTA options\n$ anta\nUsage: anta [OPTIONS] COMMAND [ARGS]...\n\n  Arista Network Test Automation (ANTA) CLI\n\nOptions:\n  --version                       Show the version and exit.\n  --username TEXT                 Username to connect to EOS  [env var:\n                                  ANTA_USERNAME; required]\n--password TEXT                 Password to connect to EOS  [env var:\n                                  ANTA_PASSWORD; required]\n--timeout INTEGER               Global connection timeout  [env var:\n                                  ANTA_TIMEOUT; default: 5]\n--insecure                      Disable SSH Host Key validation  [env var:\n                                  ANTA_INSECURE]\n--enable-password TEXT          Enable password if required to connect  [env\n                                  var: ANTA_ENABLE_PASSWORD]\n-i, --inventory FILE            Path to the inventory YAML file  [env var:\n                                  ANTA_INVENTORY; required]\n--log-level, --log [CRITICAL|ERROR|WARNING|INFO|DEBUG]\nANTA logging level  [env var:\n                                  ANTA_LOG_LEVEL; default: INFO]\n--ignore-status                 Always exit with success  [env var:\n                                  ANTA_IGNORE_STATUS]\n--ignore-error                  Only report failures and not errors  [env\n                                  var: ANTA_IGNORE_ERROR]\n--help                          Show this message and exit.\n\nCommands:\n  debug  Debug commands for building ANTA\n  exec   Execute commands to inventory devices\n  get    Get data from/to ANTA\n  nrfu   Run NRFU against inventory devices\n
# NRFU part of ANTA\n$ anta nrfu --help\nUsage: anta nrfu [OPTIONS] COMMAND [ARGS]...\n\n  Run NRFU against inventory devices\n\nOptions:\n  -c, --catalog FILE  Path to the tests catalog YAML file  [env var:\n                      ANTA_NRFU_CATALOG; required]\n--help              Show this message and exit.\n\nCommands:\n  json        ANTA command to check network state with JSON result\n  table       ANTA command to check network states with table result\n  text        ANTA command to check network states with text result\n  tpl-report  ANTA command to check network state with templated report\n

Currently to be able to run anta nrfu --help you need to have given to ANTA the mandatory input parameters: username, password and inventory otherwise the CLI will report an issue. This is tracked in: https://github.com/arista-netdevops-community/anta/issues/263

To run the NRFU, you need to select an output format amongst [\u201cjson\u201d, \u201ctable\u201d, \u201ctext\u201d, \u201ctpl-report\u201d]. For a first usage, table is recommended. By default all test results for all devices are rendered but it can be changed to a report per test case or per host

"},{"location":"getting-started/#default-report-using-table","title":"Default report using table","text":"
anta \\\n--username tom \\\n--password arista123 \\\n--enable-password t \\\n--inventory .personal/inventory_atd.yml \\\nnrfu --catalog .personal/tests-bases.yml table --tags leaf\n\n\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Settings \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\n\u2502 Running ANTA tests:                                  \u2502\n\u2502 - ANTA Inventory contains 6 devices (AsyncEOSDevice) \u2502\n\u2502 - Tests catalog contains 10 tests                    \u2502\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\n[10:17:24] INFO     Running ANTA tests...                                                                                                           runner.py:75\n  \u2022 Running NRFU Tests...100% \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 40/40 \u2022 0:00:02 \u2022 0:00:00\n\n                                                                       All tests results                                                                        \n\u250f\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2533\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513\n\u2503 Device IP \u2503 Test Name                \u2503 Test Status \u2503 Message(s)       \u2503 Test description                                                     \u2503 Test category \u2503\n\u2521\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2547\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2529\n\u2502 leaf01    \u2502 VerifyEOSVersion         \u2502 success     \u2502                  \u2502 Verifies the device is running one of the allowed EOS version.       \u2502 software      \u2502\n\u2502 leaf01    \u2502 VerifyTerminAttrVersion  \u2502 success     \u2502                  \u2502 Verifies the device is running one of the allowed TerminAttr         \u2502 software      \u2502\n\u2502           \u2502                          \u2502             \u2502                  \u2502 version.                                                             \u2502               \u2502\n\u2502 leaf01    \u2502 VerifyUptime             \u2502 success     \u2502                  \u2502 Verifies the device uptime is higher than a value.                   \u2502 system        \u2502\n\u2502 leaf01    \u2502 VerifyNTP                \u2502 success     \u2502                  \u2502 Verifies NTP is synchronised.                                        \u2502 system        \u2502\n\u2502 leaf01    \u2502 VerifySyslog             \u2502 success     \u2502                  \u2502 Verifies the device had no syslog message with a severity of warning \u2502 system        \u2502\n\u2502           \u2502                          \u2502             \u2502                  \u2502 (or a more severe message) during the last 7 days.                   \u2502               \u2502\n\u2502 leaf01    \u2502 VerifyMlagStatus         \u2502 skipped     \u2502 MLAG is disabled \u2502 This test verifies the health status of the MLAG configuration.      \u2502 mlag          \u2502\n\u2502 leaf01    \u2502 VerifyMlagInterfaces     \u2502 skipped     \u2502 MLAG is disabled \u2502 This test verifies there are no inactive or active-partial MLAG      \u2502 mlag          \u2502\n[...]\n\u2502 leaf04    \u2502 VerifyMlagConfigSanity   \u2502 skipped     \u2502 MLAG is disabled \u2502 This test verifies there are no MLAG config-sanity inconsistencies.  \u2502 mlag          \u2502\n\u2502 leaf04    \u2502 VerifyZeroTouch          \u2502 success     \u2502                  \u2502 Verifies ZeroTouch is disabled.                                      \u2502 configuration \u2502\n\u2502 leaf04    \u2502 VerifyRunningConfigDiffs \u2502 success     \u2502                  \u2502                                                                      \u2502 configuration \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"getting-started/#report-in-text-mode","title":"Report in text mode","text":"
$ anta \\\n--username tom \\\n--password arista123 \\\n--enable-password t \\\n--inventory .personal/inventory_atd.yml \\\nnrfu --catalog .personal/tests-bases.yml text --tags leaf\n\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Settings \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\n\u2502 Running ANTA tests:                                  \u2502\n\u2502 - ANTA Inventory contains 6 devices (AsyncEOSDevice) \u2502\n\u2502 - Tests catalog contains 10 tests                    \u2502\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\n[10:20:47] INFO     Running ANTA tests...                                                                                                           runner.py:75\n  \u2022 Running NRFU Tests...100% \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 40/40 \u2022 0:00:01 \u2022 0:00:00\nleaf01 :: VerifyEOSVersion :: SUCCESS\nleaf01 :: VerifyTerminAttrVersion :: SUCCESS\nleaf01 :: VerifyUptime :: SUCCESS\nleaf01 :: VerifyNTP :: SUCCESS\nleaf01 :: VerifySyslog :: SUCCESS\nleaf01 :: VerifyMlagStatus :: SKIPPED (MLAG is disabled)\nleaf01 :: VerifyMlagInterfaces :: SKIPPED (MLAG is disabled)\nleaf01 :: VerifyMlagConfigSanity :: SKIPPED (MLAG is disabled)\n[...]\n
"},{"location":"getting-started/#report-per-host","title":"Report per host","text":"
$ anta \\\n--username tom \\\n--password arista123 \\\n--enable-password t \\\n--inventory .personal/inventory_atd.yml \\\nnrfu --catalog .personal/tests-bases.yml json --tags leaf\n\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Settings \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\n\u2502 Running ANTA tests:                                  \u2502\n\u2502 - ANTA Inventory contains 6 devices (AsyncEOSDevice) \u2502\n\u2502 - Tests catalog contains 10 tests                    \u2502\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\n[10:21:51] INFO     Running ANTA tests...                                                                                                           runner.py:75\n  \u2022 Running NRFU Tests...100% \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 40/40 \u2022 0:00:02 \u2022 0:00:00\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\n\u2502 JSON results of all tests                                                                                                                                    \u2502\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\n[\n{\n\"name\": \"leaf01\",\n    \"test\": \"VerifyEOSVersion\",\n    \"test_category\": [\n\"software\"\n],\n    \"test_description\": \"Verifies the device is running one of the allowed EOS version.\",\n    \"result\": \"success\",\n    \"messages\": []\n},\n  {\n\"name\": \"leaf01\",\n    \"test\": \"VerifyTerminAttrVersion\",\n    \"test_category\": [\n\"software\"\n],\n    \"test_description\": \"Verifies the device is running one of the allowed TerminAttr version.\",\n    \"result\": \"success\",\n    \"messages\": []\n},\n[...]\n]\n

You can find more information under the usage section of the website

"},{"location":"requirements-and-installation/","title":"Installation","text":""},{"location":"requirements-and-installation/#anta-requirements","title":"ANTA Requirements","text":""},{"location":"requirements-and-installation/#python-version","title":"Python version","text":"

Python 3 (>=3.8) is required:

python --version\nPython 3.9.9\n
"},{"location":"requirements-and-installation/#install-anta-package","title":"Install ANTA package","text":"

This installation will deploy tests collection, scripts and all their Python requirements.

The ANTA package and the cli require some packages that are not part of the Python standard library. They are indicated in the pyproject.toml file, under dependencies.

"},{"location":"requirements-and-installation/#install-from-pypi-server","title":"Install from Pypi server","text":"
pip install anta\n
"},{"location":"requirements-and-installation/#install-anta-from-github","title":"Install ANTA from github","text":"
pip install git+https://github.com/arista-netdevops-community/anta.git\n\n# You can even specify the branch, tag or commit:\npip install git+https://github.com/arista-netdevops-community/anta.git@<cool-feature-branch>\npip install git+https://github.com/arista-netdevops-community/anta.git@<cool-tag>\npip install git+https://github.com/arista-netdevops-community/anta.git@<more-or-less-cool-hash>\n
"},{"location":"requirements-and-installation/#check-installation","title":"Check installation","text":"

Run these commands to verify:

# Check ANTA has been installed in your python path\npip list | grep anta\n\n# Check scripts are in your $PATH\n# Path may differ but it means CLI is in your path\nwhich anta\n/home/tom/.pyenv/shims/anta\n\n# Chck ANTA version\nanta --version\nanta, version 0.6.0\n
"},{"location":"requirements-and-installation/#eos-requirements","title":"EOS Requirements","text":"

To get ANTA working, the targetted Arista EOS devices must have the following configuration (assuming you connect to the device using Management interface in MGMT VRF):

configure\n!\nvrf instance MGMT\n!\ninterface Management1\n   description oob_management\n   vrf MGMT\n   ip address 10.73.1.105/24\n!\nend\n

Enable eAPI on the MGMT vrf:

configure\n!\nmanagement api http-commands\n   protocol https port 443\n   no shutdown\n   vrf MGMT\n      no shutdown\n!\nend\n

Now the swicth accepts on port 443 in the MGMT VRF HTTPS requests containing a list of CLI commands.

Run these EOS commands to verify:

show management http-server\nshow management api http-commands\n
"},{"location":"usage-inventory-catalog/","title":"Inventory & Tests catalog","text":""},{"location":"usage-inventory-catalog/#inventory-and-catalog-definition","title":"Inventory and Catalog definition","text":"

This page describes how to create an inventory and a tests catalog.

"},{"location":"usage-inventory-catalog/#create-an-inventory-file","title":"Create an inventory file","text":"

anta cli needs an inventory file to list all devices to tests. This inventory is a YAML file with the folowing keys:

anta_inventory:\nhosts:\n- host: < ip address value >\nport: < TCP port for eAPI. Default is 443 (Optional)>\nname: < name to display in report. Default is host:port (Optional) >\ntags: < list of tags to use to filter inventory during tests. Default is ['all']. (Optional) >\nnetworks:\n- network: < network using CIDR notation >\ntags: < list of tags to use to filter inventory during tests. Default is ['all']. (Optional) >\nranges:\n- start: < first ip address value of the range >\nend: < last ip address value of the range >\ntags: < list of tags to use to filter inventory during tests. Default is ['all']. (Optional) >\n

Your inventory file can be based on any of these 3 keys and MUST start with anta_inventory key. A full description of the inventory model is available in API documentation

An inventory example:

---\nanta_inventory:\nhosts:\n- host: 192.168.0.10\nname: spine01\ntags: ['fabric', 'spine']\n- host: 192.168.0.11\nname: spine02\ntags: ['fabric', 'spine']\nnetworks:\n- network: '192.168.110.0/24'\ntags: ['fabric', 'leaf']\nranges:\n- start: 10.0.0.9\nend: 10.0.0.11\ntags: ['fabric', 'l2leaf']\n
"},{"location":"usage-inventory-catalog/#test-catalog","title":"Test Catalog","text":"

In addition to your inventory file, you also have to define a catalog of tests to execute against all your devices. This catalog list all your tests and their parameters. Its format is a YAML file and keys are tests functions inherited from the python path.

"},{"location":"usage-inventory-catalog/#default-tests-catalog","title":"Default tests catalog","text":"

All tests are located under anta.tests module and are categorised per family (one submodule). So to run test for software version, you can do:

anta.tests.software:\n- VerifyEosVersion:\n

It will load the test VerifyEosVersion located in anta.tests.software. But since this function has parameters, we will create a catalog with the following structure:

anta.tests.software:\n- VerifyEosVersion:\n# List of allowed EOS versions.\nversions:\n- 4.25.4M\n- 4.26.1F\n

To get a list of all available tests and their respective parameters, you can read the tests section of this website.

The following example gives a very minimal tests catalog you can use in almost any situation

---\n# Load anta.tests.software\nanta.tests.software:\n# Verifies the device is running one of the allowed EOS version.\n- VerifyEosVersion:\n# List of allowed EOS versions.\nversions:\n- 4.25.4M\n- 4.26.1F\n\n# Load anta.tests.system\nanta.tests.system:\n# Verifies the device uptime is higher than a value.\n- VerifyUptime:\nminimum: 1\n\n# Load anta.tests.configuration\nanta.tests.configuration:\n# Verifies ZeroTouch is disabled.\n- VerifyZeroTouch:\n- VerifyRunningConfigDiffs:\n

If your test is based on AntaTemplate, you have to provide inputs for EOS CLI template by using template_params list:

anta.tests.routing.bgp:\n- VerifyBGPIPv4UnicastCount:\nnumber: 3\ntemplate_params:\n- vrf: default\n- vrf: customer-01\n

Which is required for the following test definition:

class VerifyBGPIPv4UnicastCount(AntaTest):\n\"\"\"\n    ...\n    \"\"\"\n\n    name = \"VerifyBGPIPv4UnicastCount\"\n    description = \"...\"\n    categories = [\"routing\", \"bgp\"]\n    template = AntaTemplate(template=\"show bgp ipv4 unicast summary vrf {vrf}\")\n\n    @check_bgp_family_enable(\"ipv4\")\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None) -> None:\n        pass\n

If you need to run the same test but with a different number of neighbors, you can write it as follow:

anta.tests.routing.bgp:\n- VerifyBGPIPv4UnicastCount:\nnumber: 2\ntemplate_params:\n- vrf: default\nanta.tests.routing.bgp:\n- VerifyBGPIPv4UnicastCount:\nnumber: 3\ntemplate_params:\n- vrf: customer-01\n
"},{"location":"usage-inventory-catalog/#custom-tests-catalog","title":"Custom tests catalog","text":"

In case you want to leverage your own tests collection, you can use the following syntax:

<your package name>:\n- <your test in your package name>:\n

So for instance, it could be:

titom73.tests.system:\n- VerifyPlatform:\ntype: ['cEOS-LAB']\n

How to create custom tests

To create your custom tests, you should refer to this following documentation

"},{"location":"advanced_usages/as-python-lib/","title":"ANTA as a Python Library","text":"

ANTA is a Python library that can be used in user applications. This section describes how you can leverage ANTA Python modules to help you create your own NRFU solution.

Tip

If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html

"},{"location":"advanced_usages/as-python-lib/#antadevice-abstract-class","title":"AntaDevice Abstract Class","text":"

A device is represented in ANTA as a instance of a subclass of the AntaDevice abstract class. There are few abstract methods that needs to be implemented by child classes:

  • The collect() coroutine is in charge of collecting outputs of AntaCommand instances.
  • The refresh() coroutine is in charge of updating attributes of the AntaDevice instance. These attributes are used by AntaInventory to filter out unreachable devices or by AntaTest to skip devices based on their hardware models.

The copy() coroutine is used to copy files to and from the device. It does not need to be implemented if tests are not using it.

"},{"location":"advanced_usages/as-python-lib/#asynceosdevice-class","title":"AsyncEOSDevice Class","text":"

The AsyncEOSDevice class is an implementation of AntaDevice for Arista EOS. It uses the aio-eapi eAPI client and the AsyncSSH library.

  • The collect() coroutine collects AntaCommand outputs using eAPI.
  • The refresh() coroutine tries to open a TCP connection on the eAPI port and update the is_online attribute accordingly. If the TCP connection succeeds, it sends a show version command to gather the hardware model of the device and updates the established and hw_model attributes.
  • The copy() coroutine copies files to and from the device using the SCP protocol.
"},{"location":"advanced_usages/as-python-lib/#antainventory-class","title":"AntaInventory Class","text":"

The AntaInventory class is a subclass of the standard Python type dict. The keys of this dictionary are the device names, the values are AntaDevice instances.

AntaInventory provides methods to interact with the ANTA inventory:

  • The add_device() method adds an AntaDevice instance to the inventory. Adding an entry to AntaInventory with a key different from the device name is not allowed.
  • The get_inventory() returns a new AntaInventory instance with filtered out devices based on the method inputs.
  • The connect_inventory() coroutine will execute the refresh() coroutines of all the devices in the inventory.
  • The parse() static method creates an AntaInventory instance from a YAML file and returns it. The devices are AsyncEOSDevice instances.

To parse a YAML inventory file and print the devices connection status:

\"\"\"\nExample\n\"\"\"\nimport asyncio\n\nfrom anta.inventory import AntaInventory\n\n\nasync def main(inv: AntaInventory) -> None:\n\"\"\"\n    Take an AntaInventory and:\n    1. try to connect to every device in the inventory\n    2. print a message for every device connection status\n    \"\"\"\n    await inv.connect_inventory()\n\n    for device in inv.values():\n        if device.established:\n            print(f\"Device {device.name} is online\")\n        else:\n            print(f\"Could not connect to device {device.name}\")\n\nif __name__ == \"__main__\":\n    # Create the AntaInventory instance\n    inventory = AntaInventory.parse(\n        inventory_file=\"inv.yml\",\n        username=\"arista\",\n        password=\"@rista123\",\n        timeout=15,\n    )\n\n    # Run the main coroutine\n    res = asyncio.run(main(inventory))\n
How to create your inventory file

Please visit this dedicated section for how to use inventory and catalog files.

To run an EOS commands list on the reachable devices from the inventory:

\"\"\"\nExample\n\"\"\"\n# This is needed to run the script for python < 3.10 for typing annotations\nfrom __future__ import annotations\n\nimport asyncio\nfrom pprint import pprint\n\nfrom anta.inventory import AntaInventory\nfrom anta.models import AntaCommand\n\n\nasync def main(inv: AntaInventory, commands: list[str]) -> dict[str, list[AntaCommand]]:\n\"\"\"\n    Take an AntaInventory and a list of commands as string and:\n    1. try to connect to every device in the inventory\n    2. collect the results of the commands from each device\n\n    Returns:\n      a dictionary where key is the device name and the value is the list of AntaCommand ran towards the device\n    \"\"\"\n    await inv.connect_inventory()\n\n    # Make a list of coroutine to run commands towards each connected device\n    coros = []\n    # dict to keep track of the commands per device\n    result_dict = {}\n    for name, device in inv.get_inventory(established_only=True).items():\n        anta_commands = [AntaCommand(command=command, ofmt=\"json\") for command in commands]\n        result_dict[name] = anta_commands\n        coros.append(device.collect_commands(anta_commands))\n\n    # Run the coroutines\n    await asyncio.gather(*coros)\n\n    return result_dict\n\n\nif __name__ == \"__main__\":\n    # Create the AntaInventory instance\n    inventory = AntaInventory.parse(\n        inventory_file=\"inv.yml\",\n        username=\"arista\",\n        password=\"@rista123\",\n        timeout=15,\n    )\n\n    # Create a list of commands with json output\n    commands = [\"show version\", \"show ip bgp summary\"]\n\n    # Run the main asyncio  entry point\n    res = asyncio.run(main(inventory, commands))\n\n    pprint(res)\n

"},{"location":"advanced_usages/as-python-lib/#use-tests-from-anta","title":"Use tests from ANTA","text":"

All the test classes inherit from the same abstract Base Class AntaTest. The Class definition indicates which commands are required for the test and the user should focus only on writing the test function with optional keywords argument. The instance of the class upon creation instantiates a TestResult object that can be accessed later on to check the status of the test ([unset, skipped, success, failure, error]).

"},{"location":"advanced_usages/as-python-lib/#test-structure","title":"Test structure","text":"

All tests are built on a class named AntaTest which provides a complete toolset for a test:

  • Object creation
  • Test definition
  • TestResult definition
  • Abstracted method to collect data

This approach means each time you create a test it will be based on this AntaTest class. Besides that, you will have to provide some elements:

  • name: Name of the test
  • description: A human readable description of your test
  • categories: a list of categories to sort test.
  • commands: a list of command to run. This list must be a list of AntaCommand which is described in the next part of this document.

Here is an example of a hardware test related to device temperature:

from __future__ import annotations\n\nimport logging\nfrom typing import Any, Dict, List, Optional, cast\n\nfrom anta.models import AntaTest, AntaCommand\n\n\nclass VerifyTemperature(AntaTest):\n\"\"\"\n    Verifies device temparture is currently OK.\n    \"\"\"\n\n    # The test name\n    name = \"VerifyTemperature\"\n    # A small description of the test, usually the first line of the class docstring\n    description = \"Verifies device temparture is currently OK\"\n    # The category of the test, usually the module name\n    categories = [\"hardware\"]\n    # The command(s) used for the test. Could be a template instead\n    commands = [AntaCommand(command=\"show system environment temperature\", ofmt=\"json\")]\n\n    # Decorator\n    @AntaTest.anta_test\n    # abstract method that must be defined by the child Test class\n    def test(self) -> None:\n\"\"\"Run VerifyTemperature validation\"\"\"\n        command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)\n        temperature_status = command_output[\"systemStatus\"] if \"systemStatus\" in command_output.keys() else \"\"\n        if temperature_status == \"temperatureOk\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Device temperature is not OK, systemStatus: {temperature_status }\")\n

When you run the test, object will automatically call its anta.models.AntaTest.collect() method to get device output for each command if no pre-collected data was given to the test. This method does a loop to call anta.inventory.models.InventoryDevice.collect() methods which is in charge of managing device connection and how to get data.

run test offline

You can also pass eos data directly to your test if you want to validate data collected in a different workflow. An example is provided below just for information:

test = VerifyTemperature(mocked_device, eos_data=test_data[\"eos_data\"])\nasyncio.run(test.test())\n

The test function is always the same and must be defined with the @AntaTest.anta_test decorator. This function takes at least one argument which is a anta.inventory.models.InventoryDevice object. In some cases a test would rely on some additional inputs from the user, for instance the number of expected peers or some expected numbers. All parameters must come with a default value and the test function should validate the parameters values (at this stage this is the only place where validation can be done but there are future plans to make this better).

class VerifyTemperature(AntaTest):\n    ...\n    @AntaTest.anta_test\n    def test(self) -> None:\n        pass\n\nclass VerifyTransceiversManufacturers(AntaTest):\n    ...\n    @AntaTest.anta_test\n    def test(self, manufacturers: Optional[List[str]] = None) -> None:\n        # validate the manufactures parameter\n        pass\n

The test itself does not return any value, but the result is directly availble from your AntaTest object and exposes a anta.result_manager.models.TestResult object with result, name of the test and optional messages:

  • name (str): Device name where the test has run.
  • test (str): Test name runs on the device.
  • test_category (List[str]): List of test categories the test belongs to.
  • test_description (str): Test description.
  • results (str): Result of the test. Can be one of [\u201cunset\u201d, \u201csuccess\u201d, \u201cfailure\u201d, \u201cerror\u201d, \u201cskipped\u201d].
  • messages (List[str], optional): Messages to report after the test if any.
from anta.tests.hardware import VerifyTemperature\n\ntest = VerifyTemperature(mocked_device, eos_data=test_data[\"eos_data\"])\nasyncio.run(test.test())\nassert test.result.result == \"success\"\n
"},{"location":"advanced_usages/as-python-lib/#commands-for-test","title":"Commands for test","text":"

To make it easier to get data, ANTA defines 2 different classes to manage commands to send to device:

"},{"location":"advanced_usages/as-python-lib/#antamodelsantacommand","title":"anta.models.AntaCommand","text":"

Abstract a command with following information:

  • Command to run,
  • Ouput format expected
  • eAPI version
  • Output of the command

Usage example:

from anta.models import AntaCommand\n\ncmd1 = AntaCommand(command=\"show zerotouch\")\ncmd2 = AntaCommand(command=\"show running-config diffs\", ofmt=\"text\")\n

Command revision and version

  • Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use text outformat sometimes.
  • The model can change across time (adding feature, \u2026 ) and when the model is changed in a non backward-compatible way, the revision number is bumped. The initial model starts with revision 1.
  • A revision applies to a particular CLI command whereas a version is global to an eAPI call. The version is internally translated to a specific revision for each CLI command in the RPC call. The currently supported version vaues are 1 and latest.
  • A revision takes precedence over a version (e.g. if a command is run with version=\u201dlatest\u201d and revision=1, the first revision of the model is returned)
  • By default eAPI returns the first revision of each model to ensure that when upgrading, intergation with existing tools is not broken. This is done by using by default version=1 in eAPI calls.

ANTA uses by default version=\"latest\" in AntaCommand. For some commands, you may want to run them with a different revision or version.

For instance the VerifyRoutingTableSize test leverages the first revision of show bfd peers:

# revision 1 as later revision introduce additional nesting for type\ncommands = [AntaCommand(command=\"show bfd peers\", revision=1)]\n
"},{"location":"advanced_usages/as-python-lib/#antamodelsantatemplate","title":"anta.models.AntaTemplate","text":"

Because some command can require more dynamic than just a command with no parameter provided by user, ANTA supports command template: you define a template in your test class and user provide parameters when creating test object.

Warning on AntaTemplate

  • In its current versiom, an AntaTest class supports only ONE AntaTemplate.
  • The current interface to pass template parameter to a template is an area of future improvements. Feedbacks are welcome.
class RunArbitraryTemplateCommand(AntaTest):\n\"\"\"\n    Run an EOS command and return result\n    Based on AntaTest to build relevant output for pytest\n    \"\"\"\n\n    name = \"Run aributrary EOS command\"\n    description = \"To be used only with anta debug commands\"\n    template = AntaTemplate(template=\"show interfaces {ifd}\")\n    categories = [\"debug\"]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n        errdisabled_interfaces = [interface for interface, value in response[\"interfaceStatuses\"].items() if value[\"linkStatus\"] == \"errdisabled\"]\n        ...\n\n\nparams = [{\"ifd\": \"Ethernet2\"}, {\"ifd\": \"Ethernet49/1\"}]\nrun_command1 = RunArbitraryTemplateCommand(device_anta, params)\n

In this example, test waits for interfaces to check from user setup and will only check for interfaces in params

"},{"location":"advanced_usages/custom-tests/","title":"Create your own Library","text":""},{"location":"advanced_usages/custom-tests/#create-your-own-custom-tests","title":"Create your own custom tests","text":"

This documentation applies for both create tests in ANTA package or your custom package.

ANTA is not only a CLI with a collection of built-in tests, it is also a framework you can extend by building your own tests library.

For that, you need to create your own Python package as described in this hitchhiker\u2019s guide to package Python code. We assume it is well known and we won\u2019t focus on this aspect. Thus, your package must be impartable by ANTA hence available in $PYTHONPATH by any method.

"},{"location":"advanced_usages/custom-tests/#generic-approach","title":"Generic approach","text":"

ANTA comes with a class to use to build test. This class provides all the toolset required to define, collect and test data. The next code is an example of how to use ANTA to build a test

from __future__ import annotations\n\nimport logging\nfrom typing import Any, Dict, List, Optional, cast\n\nfrom anta.models import AntaTest, AntaCommand\n\n\nclass VerifyTemperature(AntaTest):\n\"\"\"\n    Verifies device temparture is currently OK.\n    \"\"\"\n\n    name = \"VerifyTemperature\"\n    description = \"Verifies device temparture is currently OK\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show system environment temperature\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyTemperature validation\"\"\"\n        command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)\n        temperature_status = command_output[\"systemStatus\"] if \"systemStatus\" in command_output.keys() else \"\"\n        if temperature_status == \"temperatureOk\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Device temperature is not OK, systemStatus: {temperature_status }\")\n
"},{"location":"advanced_usages/custom-tests/#python-imports","title":"Python imports","text":""},{"location":"advanced_usages/custom-tests/#mandatory-imports","title":"Mandatory imports","text":"

The following elements have to be imported:

  • InventoryDevice: Where the eAPI session lives. It is used to send commands over HTTP/HTTPS define in your test.
  • anta.models.AntaTest: class that gives you all the tooling for your test
  • anta.models.AntaCommand: A class to abstract an Arista EOS command
from anta.models import AntaTest, AntaCommand\n\n\nclass VerifyTemperature(AntaTest):\n\"\"\"\n    Verifies device temparture is currently OK.\n    \"\"\"\n    ...\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n        pass\n
"},{"location":"advanced_usages/custom-tests/#optional-anta-imports","title":"Optional ANTA imports","text":"

Besides these 3 main imports, anta provides some additional and optional decorators:

  • anta.decorators.skip_on_platforms: To skip a test for a function not available for some platform
  • anta.decorators.check_bgp_family_enable: To run tests only if specific BGP family is active.
from anta.decorators import skip_on_platforms\n\n\nclass VerifyTransceiversManufacturers(AntaTest):\n    ...\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self, manufacturers: Optional[List[str]] = None) -> None:\n        pass\n
"},{"location":"advanced_usages/custom-tests/#optional-python-imports","title":"Optional python imports","text":"

And finally, you are free to import any other python library you may want to use in your package.

logging function

It is strongly recommended to import logging to help development process and being able to log some outputs usefull for test development.

If your test development is part of a pull request for ANTA, it is stringly advised to also import typing since our code testing requires to be compatible with Mypy.

"},{"location":"advanced_usages/custom-tests/#code-for-a-test","title":"Code for a test","text":"

A test is a python class where a test function is defined and will be run by the framework. So first you need to declare your class and then define your test function.

"},{"location":"advanced_usages/custom-tests/#create-test-class","title":"Create Test Class","text":"

To create class, you have to provide 4 elements:

Metadata information

  • name: Name of the test
  • description: A human readable description of your test
  • categories: a list of categories to sort test.

Commands to run

  • commands: a list of command to run. This list must be a list of AntaCommand which is described in the next part of this document.
  • template: a command template (AntaTemplate) to run where variables are provided during test execution.
from __future__ import annotations\n\nimport logging\nfrom typing import Any, Dict, List, Optional, cast\n\nfrom anta.models import AntaTest, AntaCommand\n\n\nclass <YourTestName>(AntaTest):\n\"\"\"\n    <a docstring description of your test>\n    \"\"\"\n\n    name = \"YourTestName\"                                           # should be your class name\n    description = \"<test description in human reading format>\"\n    categories = [\"<a list of arbitrary categories>\"]\n    commands = [\n        AntaCommand(\n            command=\"<eos command to run>\",\n            ofmt=\"<command format output>\",\n            version=\"<eapi version to use>\",\n            revision=\"<revision to use for the command>\",           # revision has precedence over version\n        )\n    ]\n

This class will inherit methods from AntaTest and specfically the __init__(self,...) method to build your object. This function takes following arguments when you instantiate an object:

  • device (InventoryDevice): Device object where to test happens.
  • template_params: If template is used in the test definition, then we provide data to build list of commands.
  • eos_data: Potential EOS data to pass if we don\u2019t want to connect to device to grab data.
  • labels: a list of labels. It is not used yet and it is for futur use.
"},{"location":"advanced_usages/custom-tests/#function-definition","title":"Function definition","text":"

The code here can be very simple as well as very complex and will depend of what you expect to do. But in all situation, the same baseline can be leverage:

class <YourTestName>(AntaTest):\n    ...\n    @AntaTest.anta_test\n    def test(self) -> None:\n        pass\n

If you want to support option in your test, just declare your options in your test method:

class <YourTestName>(AntaTest):\n    ...\n    @AntaTest.anta_test\n    def test(self, my_param1: Optional[str] = None) -> None:\n        pass\n

The options must be optional keyword arguments.

"},{"location":"advanced_usages/custom-tests/#check-inputs","title":"Check inputs","text":"

If your test has some user inputs, you first have to validate the supplied values are valid. If it is not valid, we expect TestResult to return skipped with a custom message.

class <YourTestName>(AntaTest):\n    ...\n    @AntaTest.anta_test\n    def test(self, minimum: Optional[int] = None) -> None:\n        # Check if test option is correct\n        if not minimum:\n            self.result.is_skipped(\"verify_dynamic_vlan was run without minimum value set\")\n            return\n        # continue test..\n        ...\n
"},{"location":"advanced_usages/custom-tests/#implement-your-logic","title":"Implement your logic","text":"

Here you implement your own logic. In general, the first action is to send command to devices and capture its response.

In the example below, we request the list of vlans configured on device and then count all the vlans marked as dynamic

class <YourTestName>(AntaTest):\n    ...\n    @AntaTest.anta_test\n    def test(self, minimum: Optional[int] = None) -> None:\n        # Check if test option is correct\n        if not minimum:\n            self.result.is_skipped(\"verify_dynamic_vlan was run without minimum value set\")\n            return\n\n        # Grab data for your command\n        command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)\n\n        # Do your test: In this example we count number of vlans with field dynamic set to true\n        num_dyn_vlan = len([ vlan for vlan,data in command_output['vlans'].items() if command_output['dynamic'] is True])\n        if num_dyn_vlan >= minimum:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Device has {num_dyn_vlan} configured, we expect at least {minimum}\")\n

As you can see there is no error management to do in your code. Everything is packaged in anta_tests and below is a simple example of error captured with an incorrect JSON key in the code above:

ERROR    Exception raised for test verify_dynamic_vlan (on device 192.168.0.10) - KeyError ('vlans')\n

Get stack trace for debugging

If you want to access to the full exception stack, you can run your test with logging level set to DEBUG. With ANTA cli, it is available with following option:

$ ANTA_DEBUG=True anta nrfu text --catalog test_custom.yml --log-level debug\n

"},{"location":"advanced_usages/custom-tests/#create-your-catalog","title":"Create your catalog","text":"

This section is required only if you are not merging your development into ANTA. Otherwise, just follow contribution guide.

It is very similar to what is documented in catalog section but you have to use your own package name.

Let say the custom catalog is anta_titom73 and the test is configured in anta_titom73.dc_project, the test catalog would look like:

anta_titom73.dc_project:\n- VerifyFeatureX:\nminimum: 1\n
And now you can run your NRFU tests with the CLI:

anta nrfu text --catalog test_custom.yml\nspine01 :: verify_dynamic_vlan :: FAILURE (Device has 0 configured, we expect at least 1)\nspine02 :: verify_dynamic_vlan :: FAILURE (Device has 0 configured, we expect at least 1)\nleaf01 :: verify_dynamic_vlan :: SUCCESS\nleaf02 :: verify_dynamic_vlan :: SUCCESS\nleaf03 :: verify_dynamic_vlan :: SUCCESS\nleaf04 :: verify_dynamic_vlan :: SUCCESS\n

Install your python package

Anta uses Python path to access to your test. So it is critical to have your tests library installed correctly as explained at the begining of this page (in short, your module should be in your PYTHONPATH to be able to be loaded).

"},{"location":"api/device/","title":"Device models","text":""},{"location":"api/device/#anta.device.AntaDevice","title":"AntaDevice","text":"
AntaDevice(name: str, tags: Optional[List[str]] = None)\n

Bases: ABC

Abstract class representing a device in ANTA. An implementation of this class needs must override the abstract coroutines collect() and refresh().

Attributes:

Name Type Description name str

Device name

is_online bool

True if the device IP is reachable and a port can be open

established bool

True if remote command execution succeeds

hw_model Optional[str]

Hardware model of the device

tags List[str]

List of tags for this device

Parameters:

Name Type Description Default name str

Device name

required tags Optional[List[str]]

List of tags for this device

None Source code in anta/device.py
def __init__(self, name: str, tags: Optional[List[str]] = None) -> None:\n\"\"\"\n    Constructor of AntaDevice\n\n    Args:\n        name: Device name\n        tags: List of tags for this device\n    \"\"\"\n    self.name: str = name\n    self.hw_model: Optional[str] = None\n    self.tags: List[str] = tags if tags is not None else []\n    self.is_online: bool = False\n    self.established: bool = False\n\n    # Ensure tag 'all' is always set\n    if DEFAULT_TAG not in self.tags:\n        self.tags.append(DEFAULT_TAG)\n
"},{"location":"api/device/#anta.device.AntaDevice.collect","title":"collect abstractmethod async","text":"
collect(command: AntaCommand) -> None\n

Collect device command output. This abstract coroutine can be used to implement any command collection method for a device in ANTA.

The collect() implementation needs to populate the output attribute of the AntaCommand object passed as argument.

If a failure occurs, the collect() implementation is expected to catch the exception and implement proper logging, the output attribute of the AntaCommand object passed as argument would be None in this case.

Parameters:

Name Type Description Default command AntaCommand

the command to collect

required Source code in anta/device.py
@abstractmethod\nasync def collect(self, command: AntaCommand) -> None:\n\"\"\"\n    Collect device command output.\n    This abstract coroutine can be used to implement any command collection method\n    for a device in ANTA.\n\n    The `collect()` implementation needs to populate the `output` attribute\n    of the `AntaCommand` object passed as argument.\n\n    If a failure occurs, the `collect()` implementation is expected to catch the\n    exception and implement proper logging, the `output` attribute of the\n    `AntaCommand` object passed as argument would be `None` in this case.\n\n    Args:\n        command: the command to collect\n    \"\"\"\n
"},{"location":"api/device/#anta.device.AntaDevice.collect_commands","title":"collect_commands async","text":"
collect_commands(commands: List[AntaCommand]) -> None\n

Collect multiple commands.

Parameters:

Name Type Description Default commands List[AntaCommand]

the commands to collect

required Source code in anta/device.py
async def collect_commands(self, commands: List[AntaCommand]) -> None:\n\"\"\"\n    Collect multiple commands.\n\n    Args:\n        commands: the commands to collect\n    \"\"\"\n    await asyncio.gather(*(self.collect(command=command) for command in commands))\n
"},{"location":"api/device/#anta.device.AntaDevice.copy","title":"copy async","text":"
copy(\n    sources: List[Path],\n    destination: Path,\n    direction: Literal[\"to\", \"from\"] = \"from\",\n) -> None\n

Copy files to and from the device, usually through SCP. It is not mandatory to implement this for a valid AntaDevice subclass.

Parameters:

Name Type Description Default sources List[Path]

List of files to copy to or from the device.

required destination Path

Local or remote destination when copying the files. Can be a folder.

required direction Literal['to', 'from']

Defines if this coroutine copies files to or from the device.

'from' Source code in anta/device.py
async def copy(self, sources: List[Path], destination: Path, direction: Literal[\"to\", \"from\"] = \"from\") -> None:\n\"\"\"\n    Copy files to and from the device, usually through SCP.\n    It is not mandatory to implement this for a valid AntaDevice subclass.\n\n    Args:\n        sources: List of files to copy to or from the device.\n        destination: Local or remote destination when copying the files. Can be a folder.\n        direction: Defines if this coroutine copies files to or from the device.\n    \"\"\"\n    raise NotImplementedError(f\"copy() method has not been implemented in {self.__class__.__name__} definition\")\n
"},{"location":"api/device/#anta.device.AntaDevice.refresh","title":"refresh abstractmethod async","text":"
refresh() -> None\n

Update attributes of an AntaDevice instance.

This coroutine must update the following attributes of AntaDevice
  • is_online: When the device IP is reachable and a port can be open
  • established: When a command execution succeeds
  • hw_model: The hardware model of the device
Source code in anta/device.py
@abstractmethod\nasync def refresh(self) -> None:\n\"\"\"\n    Update attributes of an AntaDevice instance.\n\n    This coroutine must update the following attributes of AntaDevice:\n        - `is_online`: When the device IP is reachable and a port can be open\n        - `established`: When a command execution succeeds\n        - `hw_model`: The hardware model of the device\n    \"\"\"\n
"},{"location":"api/device/#anta.device.AsyncEOSDevice","title":"AsyncEOSDevice","text":"
AsyncEOSDevice(\n    host: str,\n    username: str,\n    password: str,\n    name: Optional[str] = None,\n    enable_password: Optional[str] = None,\n    port: Optional[int] = None,\n    ssh_port: Optional[int] = 22,\n    tags: Optional[List[str]] = None,\n    timeout: Optional[float] = None,\n    insecure: bool = False,\n    proto: Literal[\"http\", \"https\"] = \"https\",\n)\n

Bases: AntaDevice

Implementation of AntaDevice for EOS using aio-eapi.

Attributes:

Name Type Description name

Device name

is_online

True if the device IP is reachable and a port can be open

established

True if remote command execution succeeds

hw_model

Hardware model of the device

tags

List of tags for this device

Parameters:

Name Type Description Default host str

Device FQDN or IP

required username str

Username to connect to eAPI and SSH

required password str

Password to connect to eAPI and SSH

required name Optional[str]

Device name

None enable_password Optional[str]

Password used to gain privileged access on EOS

None port Optional[int]

eAPI port. Defaults to 80 is proto is \u2018http\u2019 or 443 if proto is \u2018https\u2019.

None ssh_port Optional[int]

SSH port

22 tags Optional[List[str]]

List of tags for this device

None timeout Optional[float]

Timeout value in seconds for outgoing connections. Default to 10 secs.

None insecure bool

Disable SSH Host Key validation

False proto Literal['http', 'https']

eAPI protocol. Value can be \u2018http\u2019 or \u2018https\u2019

'https' Source code in anta/device.py
def __init__(  # pylint: disable=R0913\n    self,\n    host: str,\n    username: str,\n    password: str,\n    name: Optional[str] = None,\n    enable_password: Optional[str] = None,\n    port: Optional[int] = None,\n    ssh_port: Optional[int] = 22,\n    tags: Optional[List[str]] = None,\n    timeout: Optional[float] = None,\n    insecure: bool = False,\n    proto: Literal[\"http\", \"https\"] = \"https\",\n) -> None:\n\"\"\"\n    Constructor of AsyncEOSDevice\n\n    Args:\n        host: Device FQDN or IP\n        username: Username to connect to eAPI and SSH\n        password: Password to connect to eAPI and SSH\n        name: Device name\n        enable_password: Password used to gain privileged access on EOS\n        port: eAPI port. Defaults to 80 is proto is 'http' or 443 if proto is 'https'.\n        ssh_port: SSH port\n        tags: List of tags for this device\n        timeout: Timeout value in seconds for outgoing connections. Default to 10 secs.\n        insecure: Disable SSH Host Key validation\n        proto: eAPI protocol. Value can be 'http' or 'https'\n    \"\"\"\n    if name is None:\n        name = f\"{host}:{port}\"\n    super().__init__(name, tags)\n    self._enable_password = enable_password\n    self._session: Device = Device(host=host, port=port, username=username, password=password, proto=proto, timeout=timeout)\n    ssh_params: Dict[str, Any] = {}\n    if insecure:\n        ssh_params.update({\"known_hosts\": None})\n    self._ssh_opts: SSHClientConnectionOptions = SSHClientConnectionOptions(host=host, port=ssh_port, username=username, password=password, **ssh_params)\n
"},{"location":"api/device/#anta.device.AsyncEOSDevice.collect","title":"collect async","text":"
collect(command: AntaCommand) -> None\n

Collect device command output from EOS using aio-eapi.

Supports outformat json and text as output structure. Gain privileged access using the enable_password attribute of the AntaDevice instance if populated.

Parameters:

Name Type Description Default command AntaCommand

the command to collect

required Source code in anta/device.py
async def collect(self, command: AntaCommand) -> None:\n\"\"\"\n    Collect device command output from EOS using aio-eapi.\n\n    Supports outformat `json` and `text` as output structure.\n    Gain privileged access using the `enable_password` attribute\n    of the `AntaDevice` instance if populated.\n\n    Args:\n        command: the command to collect\n    \"\"\"\n    try:\n        commands = []\n        if self._enable_password is not None:\n            commands.append(\n                {\n                    \"cmd\": \"enable\",\n                    \"input\": str(self._enable_password),\n                }\n            )\n        else:\n            commands.append({\"cmd\": \"enable\"})\n        if command.revision:\n            commands.append({\"cmd\": command.command, \"revision\": command.revision})\n        else:\n            commands.append({\"cmd\": command.command})\n        response = await self._session.cli(\n            commands=commands,\n            ofmt=command.ofmt,\n            version=command.version,\n        )\n        # remove first dict related to enable command\n        # only applicable to json output\n        if command.ofmt in [\"json\", \"text\"]:\n            # selecting only our command output\n            response = response[1]\n        command.output = response\n        logger.debug(f\"{self.name}: {command}\")\n\n    except EapiCommandError as e:\n        message = f\"Command '{command.command}' failed on {self.name}\"\n        anta_log_exception(e, message, logger)\n        command.failed = e\n    except (HTTPError, ConnectError) as e:\n        message = f\"Cannot connect to device {self.name}\"\n        anta_log_exception(e, message, logger)\n        command.failed = e\n    except Exception as e:  # pylint: disable=broad-exception-caught\n        message = f\"Exception raised while collecting command '{command.command}' on device {self.name}\"\n        anta_log_exception(e, message, logger)\n        command.failed = e\n        logger.debug(command)\n
"},{"location":"api/device/#anta.device.AsyncEOSDevice.copy","title":"copy async","text":"
copy(\n    sources: List[Path],\n    destination: Path,\n    direction: Literal[\"to\", \"from\"] = \"from\",\n) -> None\n

Copy files to and from the device using asyncssh.scp().

Parameters:

Name Type Description Default sources List[Path]

List of files to copy to or from the device.

required destination Path

Local or remote destination when copying the files. Can be a folder.

required direction Literal['to', 'from']

Defines if this coroutine copies files to or from the device.

'from' Source code in anta/device.py
async def copy(self, sources: List[Path], destination: Path, direction: Literal[\"to\", \"from\"] = \"from\") -> None:\n\"\"\"\n    Copy files to and from the device using asyncssh.scp().\n\n    Args:\n        sources: List of files to copy to or from the device.\n        destination: Local or remote destination when copying the files. Can be a folder.\n        direction: Defines if this coroutine copies files to or from the device.\n    \"\"\"\n    async with asyncssh.connect(\n        host=self._ssh_opts.host,\n        port=self._ssh_opts.port,\n        tunnel=self._ssh_opts.tunnel,\n        family=self._ssh_opts.family,\n        local_addr=self._ssh_opts.local_addr,\n        options=self._ssh_opts,\n    ) as conn:\n        src: Union[List[Tuple[SSHClientConnection, Path]], List[Path]]\n        dst: Union[Tuple[SSHClientConnection, Path], Path]\n        if direction == \"from\":\n            src = [(conn, file) for file in sources]\n            dst = destination\n            for file in sources:\n                logger.info(f\"Copying '{file}' from device {self.name} to '{destination}' locally\")\n        elif direction == \"to\":\n            src = sources\n            dst = (conn, destination)\n            for file in sources:\n                logger.info(f\"Copying '{file}' to device {self.name} to '{destination}' remotely\")\n        else:\n            logger.critical(f\"'direction' argument to copy() fonction is invalid: {direction}\")\n            return\n        await asyncssh.scp(src, dst)\n
"},{"location":"api/device/#anta.device.AsyncEOSDevice.refresh","title":"refresh async","text":"
refresh() -> None\n

Update attributes of an AsyncEOSDevice instance.

This coroutine must update the following attributes of AsyncEOSDevice: - is_online: When a device IP is reachable and a port can be open - established: When a command execution succeeds - hw_model: The hardware model of the device

Source code in anta/device.py
async def refresh(self) -> None:\n\"\"\"\n    Update attributes of an AsyncEOSDevice instance.\n\n    This coroutine must update the following attributes of AsyncEOSDevice:\n    - is_online: When a device IP is reachable and a port can be open\n    - established: When a command execution succeeds\n    - hw_model: The hardware model of the device\n    \"\"\"\n    # Refresh command\n    COMMAND: str = \"show version\"\n    # Hardware model definition in show version\n    HW_MODEL_KEY: str = \"modelName\"\n    logger.debug(f\"Refreshing device {self.name}\")\n    self.is_online = await self._session.check_connection()\n    if self.is_online:\n        try:\n            response = await self._session.cli(command=COMMAND)\n        except EapiCommandError as e:\n            logger.warning(f\"Cannot get hardware information from device {self.name}: {e.errmsg}\")\n        except (HTTPError, ConnectError) as e:\n            logger.warning(f\"Cannot get hardware information from device {self.name}: {exc_to_str(e)}\")\n        else:\n            if HW_MODEL_KEY in response:\n                self.hw_model = response[HW_MODEL_KEY]\n            else:\n                logger.warning(f\"Cannot get hardware information from device {self.name}: cannot parse '{COMMAND}'\")\n    else:\n        logger.warning(f\"Could not connect to device {self.name}: cannot open eAPI port\")\n    self.established = bool(self.is_online and self.hw_model)\n
"},{"location":"api/inventory/","title":"Inventory module","text":""},{"location":"api/inventory/#anta.inventory.AntaInventory","title":"AntaInventory","text":"

Bases: dict

Inventory abstraction for ANTA framework.

"},{"location":"api/inventory/#anta.inventory.AntaInventory.add_device","title":"add_device","text":"
add_device(device: AntaDevice) -> None\n

Add a device to final inventory.

Parameters:

Name Type Description Default device AntaDevice

Device object to be added

required Source code in anta/inventory/__init__.py
def add_device(self, device: AntaDevice) -> None:\n\"\"\"Add a device to final inventory.\n\n    Args:\n        device: Device object to be added\n    \"\"\"\n    self[device.name] = device\n
"},{"location":"api/inventory/#anta.inventory.AntaInventory.connect_inventory","title":"connect_inventory async","text":"
connect_inventory() -> None\n

Run refresh() coroutines for all AntaDevice objects in this inventory.

Source code in anta/inventory/__init__.py
async def connect_inventory(self) -> None:\n\"\"\"Run `refresh()` coroutines for all AntaDevice objects in this inventory.\"\"\"\n    logger.debug(\"Refreshing devices...\")\n    results = await asyncio.gather(\n        *(device.refresh() for device in self.values()),\n        return_exceptions=True,\n    )\n    for r in results:\n        if isinstance(r, Exception):\n            message = \"Error when refreshing inventory\"\n            anta_log_exception(r, message, logger)\n
"},{"location":"api/inventory/#anta.inventory.AntaInventory.get_inventory","title":"get_inventory","text":"
get_inventory(\n    established_only: bool = False,\n    tags: Optional[List[str]] = None,\n) -> AntaInventory\n

Returns a filtered inventory.

Parameters:

Name Type Description Default established_only bool

Whether or not to include only established devices. Default False.

False tags Optional[List[str]]

List of tags to filter devices.

None

Returns:

Name Type Description AntaInventory AntaInventory

An inventory with filtered AntaDevice objects.

Source code in anta/inventory/__init__.py
def get_inventory(self, established_only: bool = False, tags: Optional[List[str]] = None) -> AntaInventory:\n\"\"\"\n    Returns a filtered inventory.\n\n    Args:\n        established_only: Whether or not to include only established devices. Default False.\n        tags: List of tags to filter devices.\n\n    Returns:\n        AntaInventory: An inventory with filtered AntaDevice objects.\n    \"\"\"\n\n    def _filter_devices(device: AntaDevice) -> bool:\n\"\"\"\n        Helper function to select the devices based on the input tags\n        and the requirement for an established connection.\n        \"\"\"\n        if tags is not None and all(tag not in tags for tag in device.tags):\n            return False\n        return bool(not established_only or device.established)\n\n    devices: List[AntaDevice] = list(filter(_filter_devices, self.values()))\n    result = AntaInventory()\n    for device in devices:\n        result.add_device(device)\n    return result\n
"},{"location":"api/inventory/#anta.inventory.AntaInventory.parse","title":"parse staticmethod","text":"
parse(\n    inventory_file: str,\n    username: str,\n    password: str,\n    enable_password: Optional[str] = None,\n    timeout: Optional[float] = None,\n    insecure: bool = False,\n) -> AntaInventory\n

Create an AntaInventory instance from an inventory file. The inventory devices are AsyncEOSDevice instances.

Parameters:

Name Type Description Default inventory_file str

Path to inventory YAML file where user has described his inputs

required username str

Username to use to connect to devices

required password str

Password to use to connect to devices

required timeout float

timeout in seconds for every API call.

None

Raises:

Type Description InventoryRootKeyError

Root key of inventory is missing.

InventoryIncorrectSchema

Inventory file is not following AntaInventory Schema.

InventoryUnknownFormat

Output format is not supported.

Source code in anta/inventory/__init__.py
@staticmethod\ndef parse(\n    inventory_file: str, username: str, password: str, enable_password: Optional[str] = None, timeout: Optional[float] = None, insecure: bool = False\n) -> AntaInventory:\n    # pylint: disable=too-many-arguments\n\"\"\"\n    Create an AntaInventory instance from an inventory file.\n    The inventory devices are AsyncEOSDevice instances.\n\n    Args:\n        inventory_file (str): Path to inventory YAML file where user has described his inputs\n        username (str): Username to use to connect to devices\n        password (str): Password to use to connect to devices\n        timeout (float, optional): timeout in seconds for every API call.\n\n    Raises:\n        InventoryRootKeyError: Root key of inventory is missing.\n        InventoryIncorrectSchema: Inventory file is not following AntaInventory Schema.\n        InventoryUnknownFormat: Output format is not supported.\n    \"\"\"\n\n    inventory = AntaInventory()\n    kwargs: Dict[str, Any] = {\"username\": username, \"password\": password, \"enable_password\": enable_password, \"timeout\": timeout, \"insecure\": insecure}\n    kwargs = {k: v for k, v in kwargs.items() if v is not None}\n\n    with open(inventory_file, \"r\", encoding=\"UTF-8\") as file:\n        data = safe_load(file)\n\n    # Load data using Pydantic\n    try:\n        inventory_input = AntaInventoryInput(**data[AntaInventory.INVENTORY_ROOT_KEY])\n    except KeyError as exc:\n        logger.error(f\"Inventory root key is missing: {AntaInventory.INVENTORY_ROOT_KEY}\")\n        raise InventoryRootKeyError(f\"Inventory root key ({AntaInventory.INVENTORY_ROOT_KEY}) is not defined in your inventory\") from exc\n    except ValidationError as exc:\n        logger.error(\"Inventory data are not compliant with inventory models\")\n        raise InventoryIncorrectSchema(f\"Inventory is not following the schema: {str(exc)}\") from exc\n\n    # Read data from input\n    AntaInventory._parse_hosts(inventory_input, inventory, **kwargs)\n    AntaInventory._parse_networks(inventory_input, inventory, **kwargs)\n    AntaInventory._parse_ranges(inventory_input, inventory, **kwargs)\n\n    return inventory\n
"},{"location":"api/inventory/#anta.inventory.exceptions","title":"exceptions","text":"

Manage Exception in Inventory module.

"},{"location":"api/inventory/#anta.inventory.exceptions.InventoryIncorrectSchema","title":"InventoryIncorrectSchema","text":"

Bases: Exception

Error when user data does not follow ANTA schema.

"},{"location":"api/inventory/#anta.inventory.exceptions.InventoryRootKeyError","title":"InventoryRootKeyError","text":"

Bases: Exception

Error raised when inventory root key is not found.

"},{"location":"api/inventory.models.input/","title":"Inventory models","text":""},{"location":"api/inventory.models.input/#anta.inventory.models.AntaInventoryInput","title":"AntaInventoryInput","text":"

Bases: BaseModel

User\u2019s inventory model.

Attributes:

Name Type Description networks List[AntaInventoryNetwork], Optional

List of AntaInventoryNetwork objects for networks.

hosts List[AntaInventoryHost], Optional

List of AntaInventoryHost objects for hosts.

range List[AntaInventoryRange], Optional

List of AntaInventoryRange objects for ranges.

"},{"location":"api/inventory.models.input/#anta.inventory.models.AntaInventoryHost","title":"AntaInventoryHost","text":"

Bases: BaseModel

Host definition for user\u2019s inventory.

Attributes:

Name Type Description host IPvAnyAddress

IPv4 or IPv6 address of the device

port int

(Optional) eAPI port to use Default is 443.

name str

(Optional) Name to display during tests report. Default is hostname:port

tags List[str]

List of attached tags read from inventory file.

"},{"location":"api/inventory.models.input/#anta.inventory.models.AntaInventoryNetwork","title":"AntaInventoryNetwork","text":"

Bases: BaseModel

Network definition for user\u2019s inventory.

Attributes:

Name Type Description network IPvAnyNetwork

Subnet to use for testing.

tags List[str]

List of attached tags read from inventory file.

"},{"location":"api/inventory.models.input/#anta.inventory.models.AntaInventoryRange","title":"AntaInventoryRange","text":"

Bases: BaseModel

IP Range definition for user\u2019s inventory.

Attributes:

Name Type Description start IPvAnyAddress

IPv4 or IPv6 address for the begining of the range.

stop IPvAnyAddress

IPv4 or IPv6 address for the end of the range.

tags List[str]

List of attached tags read from inventory file.

"},{"location":"api/models/","title":"Test models","text":""},{"location":"api/models/#anta.models.AntaTest","title":"AntaTest","text":"
AntaTest(\n    device: AntaDevice,\n    template_params: list[dict[str, Any]] | None = None,\n    eos_data: list[dict[Any, Any] | str] | None = None,\n    labels: list[str] | None = None,\n)\n

Bases: ABC

Abstract class defining a test for Anta

The goal of this class is to handle the heavy lifting and make writing a test as simple as possible.

TODO - complete doctstring with example

Source code in anta/models.py
def __init__(\n    self,\n    device: AntaDevice,\n    template_params: list[dict[str, Any]] | None = None,\n    # TODO document very well the order of eos_data\n    eos_data: list[dict[Any, Any] | str] | None = None,\n    labels: list[str] | None = None,\n):\n\"\"\"Class constructor\"\"\"\n    # Accept 6 input arguments\n    # pylint: disable=R0913\n    self.logger: logging.Logger = logging.getLogger(f\"{self.__module__}.{self.__class__.__name__}\")\n    self.device: AntaDevice = device\n    self.result: TestResult = TestResult(name=device.name, test=self.name, test_category=self.categories, test_description=self.description)\n    self.labels: List[str] = labels or []\n    self.instance_commands: List[AntaCommand] = []\n\n    # TODO - check optimization for deepcopy\n    # Generating instance_commands from list of commands and template\n    if hasattr(self.__class__, \"commands\") and (cmds := self.__class__.commands) is not None:\n        self.instance_commands.extend(deepcopy(cmds))\n    if hasattr(self.__class__, \"template\") and (tpl := self.__class__.template) is not None:\n        if template_params is None:\n            self.result.is_error(\"Command has template but no params were given\")\n            return\n        self.template_params = template_params\n        for param in template_params:\n            try:\n                self.instance_commands.append(tpl.render(param))\n            except KeyError:\n                self.result.is_error(f\"Cannot render template '{tpl.template}': wrong parameters\")\n                return\n\n    if eos_data is not None:\n        self.logger.debug(\"Test initialized with input data\")\n        self.save_commands_data(eos_data)\n
"},{"location":"api/models/#anta.models.AntaTest.all_data_collected","title":"all_data_collected","text":"
all_data_collected() -> bool\n

returns True if output is populated for every command

Source code in anta/models.py
def all_data_collected(self) -> bool:\n\"\"\"returns True if output is populated for every command\"\"\"\n    return all(command.collected for command in self.instance_commands)\n
"},{"location":"api/models/#anta.models.AntaTest.anta_test","title":"anta_test staticmethod","text":"
anta_test(\n    function: F,\n) -> Callable[..., Coroutine[Any, Any, TestResult]]\n

Decorator for anta_test that handles injecting test data if given and collecting it using asyncio if missing

Source code in anta/models.py
@staticmethod\ndef anta_test(function: F) -> Callable[..., Coroutine[Any, Any, TestResult]]:\n\"\"\"\n    Decorator for anta_test that handles injecting test data if given and collecting it using asyncio if missing\n    \"\"\"\n\n    @wraps(function)\n    async def wrapper(\n        self: AntaTest,\n        eos_data: list[dict[Any, Any] | str] | None = None,\n        **kwargs: Any,\n    ) -> TestResult:\n\"\"\"\n        Wraps the test function and implement (in this order):\n        1. Instantiate the command outputs if `eos_data` is provided\n        2. Collect missing command outputs from the device\n        3. Run the test function\n        4. Catches and set the result if the test function raises an exception\n\n        Returns:\n            TestResult: self.result, populated with the correct exit status\n        \"\"\"\n        if self.result.result != \"unset\":\n            return self.result\n\n        # TODO maybe_skip decorators\n\n        # Data\n        if eos_data is not None:\n            self.save_commands_data(eos_data)\n            self.logger.debug(f\"Test {self.name} initialized with input data {eos_data}\")\n\n        # If some data is missing, try to collect\n        if not self.all_data_collected():\n            await self.collect()\n            if self.result.result != \"unset\":\n                return self.result\n\n        try:\n            if cmds := self.get_failed_commands():\n                self.result.is_error(\n                    \"\\n\".join([f\"{cmd.command} has failed: {exc_to_str(cmd.failed)}\" if cmd.failed else f\"{cmd.command} has failed\" for cmd in cmds])\n                )\n                return self.result\n            function(self, **kwargs)\n        except Exception as e:  # pylint: disable=broad-exception-caught\n            message = f\"Exception raised for test {self.name} (on device {self.device.name})\"\n            anta_log_exception(e, message, self.logger)\n            self.result.is_error(exc_to_str(e))\n\n        AntaTest.update_progress()\n        return self.result\n\n    return wrapper\n
"},{"location":"api/models/#anta.models.AntaTest.collect","title":"collect async","text":"
collect() -> None\n

Method used to collect outputs of all commands of this test class from the device of this test instance.

Source code in anta/models.py
async def collect(self) -> None:\n\"\"\"\n    Method used to collect outputs of all commands of this test class from the device of this test instance.\n    \"\"\"\n    try:\n        await self.device.collect_commands(self.instance_commands)\n    except Exception as e:  # pylint: disable=broad-exception-caught\n        message = f\"Exception raised while collecting commands for test {self.name} (on device {self.device.name})\"\n        anta_log_exception(e, message, self.logger)\n        self.result.is_error(exc_to_str(e))\n
"},{"location":"api/models/#anta.models.AntaTest.get_failed_commands","title":"get_failed_commands","text":"
get_failed_commands() -> List[AntaCommand]\n

returns a list of all the commands that have a populated failed field

Source code in anta/models.py
def get_failed_commands(self) -> List[AntaCommand]:\n\"\"\"returns a list of all the commands that have a populated failed field\"\"\"\n    return [command for command in self.instance_commands if command.failed is not None]\n
"},{"location":"api/models/#anta.models.AntaTest.save_commands_data","title":"save_commands_data","text":"
save_commands_data(\n    eos_data: list[dict[Any, Any] | str]\n) -> None\n

Called at init or at test execution time

Source code in anta/models.py
def save_commands_data(self, eos_data: list[dict[Any, Any] | str]) -> None:\n\"\"\"Called at init or at test execution time\"\"\"\n    if len(eos_data) != len(self.instance_commands):\n        self.result.is_error(\"Test initialization error: Trying to save more data than there are commands for the test\")\n        return\n    for index, data in enumerate(eos_data or []):\n        self.instance_commands[index].output = data\n
"},{"location":"api/models/#anta.models.AntaTest.test","title":"test abstractmethod","text":"
test() -> Coroutine[Any, Any, TestResult]\n

This abstract method is the core of the test. It MUST set the correct status of self.result with the appropriate error messages

it must be implemented as follow

@AntaTest.anta_test def test(self) -> None: \u2018\u2019\u2018 assert code \u2018\u2019\u2018

Source code in anta/models.py
@abstractmethod\ndef test(self) -> Coroutine[Any, Any, TestResult]:\n\"\"\"\n    This abstract method is the core of the test.\n    It MUST set the correct status of self.result with the appropriate error messages\n\n    it must be implemented as follow\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n       '''\n       assert code\n       '''\n    \"\"\"\n
"},{"location":"api/models/#anta.models.AntaTest.update_progress","title":"update_progress classmethod","text":"
update_progress() -> None\n

Update progress bar for all AntaTest objects if it exists

Source code in anta/models.py
@classmethod\ndef update_progress(cls) -> None:\n\"\"\"\n    Update progress bar for all AntaTest objects if it exists\n    \"\"\"\n    if cls.progress and (cls.nrfu_task is not None):\n        cls.progress.update(cls.nrfu_task, advance=1)\n
"},{"location":"api/models/#anta.models.AntaCommand","title":"AntaCommand","text":"

Bases: BaseModel

Class to define a test command with its API version

Attributes:

Name Type Description command str

Device command

version Literal[1, 'latest']

eAPI version - valid values are 1 or \u201clatest\u201d - default is \u201clatest\u201d

revision Optional[conint(ge=1, le=99)]

Revision of the command. Valid values are 1 to 99. Revision has precedence over version.

ofmt Literal['json', 'text']

eAPI output - json or text - default is json

template Optional[AntaTemplate]

AntaTemplate object used to render this command

params Optional[Dict[str, Any]]

dictionary of variables with string values to render the template

failed Optional[Exception]

If the command execution fails, the Exception object is stored in this field

"},{"location":"api/models/#anta.models.AntaCommand.collected","title":"collected property","text":"
collected: bool\n

Return True if the command has been collected

"},{"location":"api/models/#anta.models.AntaCommand.json_output","title":"json_output property","text":"
json_output: Dict[str, Any]\n

Get the command output as JSON

"},{"location":"api/models/#anta.models.AntaCommand.text_output","title":"text_output property","text":"
text_output: str\n

Get the command output as a string

"},{"location":"api/models/#anta.models.AntaTemplate","title":"AntaTemplate","text":"

Bases: BaseModel

Class to define a test command with its API version

Attributes:

Name Type Description template str

Python f-string. Example: \u2018show vlan {vlan_id}\u2019

version Literal[1, 'latest']

eAPI version - valid values are 1 or \u201clatest\u201d - default is \u201clatest\u201d

revision Optional[conint(ge=1, le=99)]

Revision of the command. Valid values are 1 to 99. Revision has precedence over version.

ofmt Literal['json', 'text']

eAPI output - json or text - default is json

"},{"location":"api/models/#anta.models.AntaTemplate.render","title":"render","text":"
render(params: Dict[str, Any]) -> AntaCommand\n

Render an AntaCommand from an AntaTemplate instance. Keep the parameters used in the AntaTemplate instance.

Args: params: dictionary of variables with string values to render the Python f-string

Returns: AntaCommand: The rendered AntaCommand. This AntaCommand instance have a template attribute that references this AntaTemplate instance.

Source code in anta/models.py
def render(self, params: Dict[str, Any]) -> AntaCommand:\n\"\"\"Render an AntaCommand from an AntaTemplate instance.\n    Keep the parameters used in the AntaTemplate instance.\n\n     Args:\n         params: dictionary of variables with string values to render the Python f-string\n\n     Returns:\n         AntaCommand: The rendered AntaCommand.\n                      This AntaCommand instance have a template attribute that references this\n                      AntaTemplate instance.\n    \"\"\"\n    return AntaCommand(command=self.template.format(**params), ofmt=self.ofmt, version=self.version, revision=self.revision, template=self, params=params)\n
"},{"location":"api/report_manager/","title":"Report Manager module","text":""},{"location":"api/report_manager/#anta.reporter.ReportTable","title":"ReportTable","text":"
ReportTable()\n

TableReport Generate a Table based on TestResult.

Source code in anta/reporter/__init__.py
def __init__(self) -> None:\n\"\"\"\n    __init__ Class constructor\n    \"\"\"\n    self.colors = []\n    self.colors.append(ColorManager(level=\"success\", color=RICH_COLOR_PALETTE.SUCCESS))\n    self.colors.append(ColorManager(level=\"failure\", color=RICH_COLOR_PALETTE.FAILURE))\n    self.colors.append(ColorManager(level=\"error\", color=RICH_COLOR_PALETTE.ERROR))\n    self.colors.append(ColorManager(level=\"skipped\", color=RICH_COLOR_PALETTE.SKIPPED))\n
"},{"location":"api/report_manager/#anta.reporter.ReportTable.report_all","title":"report_all","text":"
report_all(\n    result_manager: ResultManager,\n    host: Optional[str] = None,\n    testcase: Optional[str] = None,\n    title: str = \"All tests results\",\n) -> Table\n

Create a table report with all tests for one or all devices.

Create table with full output: Host / Test / Status / Message

Parameters:

Name Type Description Default result_manager ResultManager

A manager with a list of tests.

required host str

IP Address of a host to search for. Defaults to None.

None testcase str

A test name to search for. Defaults to None.

None title str

Title for the report. Defaults to \u2018All tests results\u2019.

'All tests results'

Returns:

Name Type Description Table Table

A fully populated rich Table

Source code in anta/reporter/__init__.py
def report_all(\n    self,\n    result_manager: ResultManager,\n    host: Optional[str] = None,\n    testcase: Optional[str] = None,\n    title: str = \"All tests results\",\n) -> Table:\n\"\"\"\n    Create a table report with all tests for one or all devices.\n\n    Create table with full output: Host / Test / Status / Message\n\n    Args:\n        result_manager (ResultManager): A manager with a list of tests.\n        host (str, optional): IP Address of a host to search for. Defaults to None.\n        testcase (str, optional): A test name to search for. Defaults to None.\n        title (str, optional): Title for the report. Defaults to 'All tests results'.\n\n    Returns:\n        Table: A fully populated rich Table\n    \"\"\"\n    table = Table(title=title)\n    headers = [\"Device IP\", \"Test Name\", \"Test Status\", \"Message(s)\", \"Test description\", \"Test category\"]\n    table = self._build_headers(headers=headers, table=table)\n\n    for result in result_manager.get_results(output_format=\"list\"):\n        # pylint: disable=R0916\n        if (host is None and testcase is None) or (host is not None and str(result.name) == host) or (testcase is not None and testcase == str(result.test)):\n            state = self._color_result(status=str(result.result), output_type=\"str\")\n            message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else \"\"\n            test_categories = \", \".join(result.test_category)\n            table.add_row(str(result.name), result.test, state, message, result.test_description, test_categories)\n    return table\n
"},{"location":"api/report_manager/#anta.reporter.ReportTable.report_summary_hosts","title":"report_summary_hosts","text":"
report_summary_hosts(\n    result_manager: ResultManager,\n    host: Optional[str] = None,\n    title: str = \"Summary per host\",\n) -> Table\n

Create a table report with result agregated per host.

Create table with full output: Host / Number of success / Number of failure / Number of error / List of nodes in error or failure

Parameters:

Name Type Description Default result_manager ResultManager

A manager with a list of tests.

required host str

IP Address of a host to search for. Defaults to None.

None title str

Title for the report. Defaults to \u2018All tests results\u2019.

'Summary per host'

Returns:

Name Type Description Table Table

A fully populated rich Table

Source code in anta/reporter/__init__.py
def report_summary_hosts(\n    self,\n    result_manager: ResultManager,\n    host: Optional[str] = None,\n    title: str = \"Summary per host\",\n) -> Table:\n\"\"\"\n    Create a table report with result agregated per host.\n\n    Create table with full output: Host / Number of success / Number of failure / Number of error / List of nodes in error or failure\n\n    Args:\n        result_manager (ResultManager): A manager with a list of tests.\n        host (str, optional): IP Address of a host to search for. Defaults to None.\n        title (str, optional): Title for the report. Defaults to 'All tests results'.\n\n    Returns:\n        Table: A fully populated rich Table\n    \"\"\"\n    table = Table(title=title)\n    headers = [\n        \"Host IP\",\n        \"# of success\",\n        \"# of skipped\",\n        \"# of failure\",\n        \"# of errors\",\n        \"List of failed or error test cases\",\n    ]\n    table = self._build_headers(headers=headers, table=table)\n    for host_read in result_manager.get_hosts():\n        if host is None or str(host_read) == host:\n            results = result_manager.get_result_by_host(host_read)\n            logger.debug(\"data to use for computation\")\n            logger.debug(f\"{host}: {results}\")\n            nb_failure = len([result for result in results if result.result == \"failure\"])\n            nb_error = len([result for result in results if result.result == \"error\"])\n            list_failure = [str(result.test) for result in results if result.result in [\"failure\", \"error\"]]\n            nb_success = len([result for result in results if result.result == \"success\"])\n            nb_skipped = len([result for result in results if result.result == \"skipped\"])\n            table.add_row(\n                str(host_read),\n                str(nb_success),\n                str(nb_skipped),\n                str(nb_failure),\n                str(nb_error),\n                str(list_failure),\n            )\n    return table\n
"},{"location":"api/report_manager/#anta.reporter.ReportTable.report_summary_tests","title":"report_summary_tests","text":"
report_summary_tests(\n    result_manager: ResultManager,\n    testcase: Optional[str] = None,\n    title: str = \"Summary per test case\",\n) -> Table\n

Create a table report with result agregated per test.

Create table with full output: Test / Number of success / Number of failure / Number of error / List of nodes in error or failure

Parameters:

Name Type Description Default result_manager ResultManager

A manager with a list of tests.

required testcase str

A test name to search for. Defaults to None.

None title str

Title for the report. Defaults to \u2018All tests results\u2019.

'Summary per test case'

Returns:

Name Type Description Table Table

A fully populated rich Table

Source code in anta/reporter/__init__.py
def report_summary_tests(\n    self,\n    result_manager: ResultManager,\n    testcase: Optional[str] = None,\n    title: str = \"Summary per test case\",\n) -> Table:\n\"\"\"\n    Create a table report with result agregated per test.\n\n    Create table with full output: Test / Number of success / Number of failure / Number of error / List of nodes in error or failure\n\n    Args:\n        result_manager (ResultManager): A manager with a list of tests.\n        testcase (str, optional): A test name to search for. Defaults to None.\n        title (str, optional): Title for the report. Defaults to 'All tests results'.\n\n    Returns:\n        Table: A fully populated rich Table\n    \"\"\"\n    # sourcery skip: class-extract-method\n    table = Table(title=title)\n    headers = [\n        \"Test Case\",\n        \"# of success\",\n        \"# of skipped\",\n        \"# of failure\",\n        \"# of errors\",\n        \"List of failed or error nodes\",\n    ]\n    table = self._build_headers(headers=headers, table=table)\n    for testcase_read in result_manager.get_testcases():\n        if testcase is None or str(testcase_read) == testcase:\n            results = result_manager.get_result_by_test(testcase_read)\n            nb_failure = len([result for result in results if result.result == \"failure\"])\n            nb_error = len([result for result in results if result.result == \"error\"])\n            list_failure = [str(result.name) for result in results if result.result in [\"failure\", \"error\"]]\n            nb_success = len([result for result in results if result.result == \"success\"])\n            nb_skipped = len([result for result in results if result.result == \"skipped\"])\n            table.add_row(\n                testcase_read,\n                str(nb_success),\n                str(nb_skipped),\n                str(nb_failure),\n                str(nb_error),\n                str(list_failure),\n            )\n    return table\n
"},{"location":"api/report_manager_models/","title":"Report Manager models","text":""},{"location":"api/report_manager_models/#anta.reporter.models.ColorManager","title":"ColorManager","text":"

Bases: BaseModel

Color management for status report.

Attributes:

Name Type Description level str

Test result value.

color str

Associated color.

"},{"location":"api/report_manager_models/#anta.reporter.models.ColorManager.name_must_be_in","title":"name_must_be_in","text":"
name_must_be_in(v: str) -> str\n

Status validator

Validate status is a supported one

Parameters:

Name Type Description Default v str

User defined level

required

Raises:

Type Description ValueError

If level is unsupported

Returns:

Name Type Description str str

level value

Source code in anta/reporter/models.py
@validator(\"level\", allow_reuse=True)\ndef name_must_be_in(cls, v: str) -> str:\n\"\"\"\n    Status validator\n\n    Validate status is a supported one\n\n    Args:\n        v (str): User defined level\n\n    Raises:\n        ValueError: If level is unsupported\n\n    Returns:\n        str: level value\n    \"\"\"\n    if v not in RESULT_OPTIONS:\n        raise ValueError(f\"must be one of {RESULT_OPTIONS}\")\n    return v\n
"},{"location":"api/report_manager_models/#anta.reporter.models.ColorManager.string","title":"string","text":"
string() -> str\n

Build an str with color code

Returns:

Name Type Description str str

String with level and its associated color

Source code in anta/reporter/models.py
def string(self) -> str:\n\"\"\"\n    Build an str with color code\n\n    Returns:\n        str: String with level and its associated color\n    \"\"\"\n    return f\"[{self.color}]{self.level}\"\n
"},{"location":"api/report_manager_models/#anta.reporter.models.ColorManager.style_rich","title":"style_rich","text":"
style_rich() -> Text\n

Build a rich Text syntax with color

Returns:

Name Type Description Text Text

object with level string and its associated color.

Source code in anta/reporter/models.py
def style_rich(self) -> Text:\n\"\"\"\n    Build a rich Text syntax with color\n\n    Returns:\n        Text: object with level string and its associated color.\n    \"\"\"\n    return Text(self.level, style=self.color)\n
"},{"location":"api/result_manager/","title":"Result Manager module","text":""},{"location":"api/result_manager/#anta.result_manager.ResultManager","title":"ResultManager","text":"
ResultManager()\n

Helper to manage Test Results and generate reports.

Examples:

Create Inventory:

inventory_anta = AntaInventory.parse(\n    inventory_file='examples/inventory.yml',\n    username='ansible',\n    password='ansible',\n    timeout=0.5\n)\n

Create Result Manager:

manager = ResultManager()\n

Run tests for all connected devices:

for device in inventory_anta.get_inventory():\n    manager.add_test_result(\nVerifyNTP(device=device).test()\n)\nmanager.add_test_result(\nVerifyEOSVersion(device=device).test(version='4.28.3M')\n)\n

Print result in native format:

manager.get_results()\n[\n    TestResult(\n        host=IPv4Address('192.168.0.10'),\n        test='VerifyNTP',\n        result='failure',\n        message=\"device is not running NTP correctly\"\n    ),\n    TestResult(\n        host=IPv4Address('192.168.0.10'),\n        test='VerifyEOSVersion',\n        result='success',\n        message=None\n    ),\n]\n

The status of the class is initialized to \u201cunset\u201d

Then when adding a test with a status that is NOT \u2018error\u2019 the following table shows the updated status:

Current Status Added test Status Updated Status unset Any Any skipped unset, skipped skipped skipped success success skipped failure failure success unset, skipped, success success success failure failure failure unset, skipped success, failure failure

If the status of the added test is error, the status is untouched and the error_status is set to True.

Source code in anta/result_manager/__init__.py
def __init__(self) -> None:\n\"\"\"\n    Class constructor.\n\n    The status of the class is initialized to \"unset\"\n\n    Then when adding a test with a status that is NOT 'error' the following\n    table shows the updated status:\n\n    | Current Status |         Added test Status       | Updated Status |\n    | -------------- | ------------------------------- | -------------- |\n    |      unset     |              Any                |       Any      |\n    |     skipped    |         unset, skipped          |     skipped    |\n    |     skipped    |            success              |     success    |\n    |     skipped    |            failure              |     failure    |\n    |     success    |     unset, skipped, success     |     success    |\n    |     success    |            failure              |     failure    |\n    |     failure    | unset, skipped success, failure |     failure    |\n\n    If the status of the added test is error, the status is untouched and the\n    error_status is set to True.\n    \"\"\"\n    logger.debug(\"Instantiate result-manager\")\n    self._result_entries = ListResult()\n    # Initialize status\n    self.status = \"unset\"\n    self.error_status = False\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.__update_status","title":"__update_status","text":"
__update_status(test_status: str) -> None\n

Update ResultManager status based on the table above.

Source code in anta/result_manager/__init__.py
def __update_status(self, test_status: str) -> None:\n\"\"\"\n    Update ResultManager status based on the table above.\n    \"\"\"\n    if test_status not in RESULT_OPTIONS:\n        raise ValueError(\"{test_status} is not a valid result option\")\n    if test_status == \"error\":\n        self.error_status = True\n        return\n\n    if self.status == \"unset\":\n        self.status = test_status\n    elif self.status == \"skipped\" and test_status in {\"success\", \"failure\"}:\n        self.status = test_status\n    elif self.status == \"success\" and test_status == \"failure\":\n        self.status = \"failure\"\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.add_test_result","title":"add_test_result","text":"
add_test_result(entry: TestResult) -> None\n

Add a result to the list

Parameters:

Name Type Description Default entry TestResult

TestResult data to add to the report

required Source code in anta/result_manager/__init__.py
def add_test_result(self, entry: TestResult) -> None:\n\"\"\"Add a result to the list\n\n    Args:\n        entry (TestResult): TestResult data to add to the report\n    \"\"\"\n    logger.debug(entry)\n    self._result_entries.append(entry)\n    self.__update_status(entry.result)\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.add_test_results","title":"add_test_results","text":"
add_test_results(entries: List[TestResult]) -> None\n

Add a list of results to the list

Parameters:

Name Type Description Default entries List[TestResult]

list of TestResult data to add to the report

required Source code in anta/result_manager/__init__.py
def add_test_results(self, entries: List[TestResult]) -> None:\n\"\"\"Add a list of results to the list\n\n    Args:\n        entries (List[TestResult]): list of TestResult data to add to the report\n    \"\"\"\n    for e in entries:\n        self.add_test_result(e)\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.get_hosts","title":"get_hosts","text":"
get_hosts() -> List[str]\n

Get list of IP addresses in current manager.

Returns:

Type Description List[str]

List[str]: List of IP addresses.

Source code in anta/result_manager/__init__.py
def get_hosts(self) -> List[str]:\n\"\"\"\n    Get list of IP addresses in current manager.\n\n    Returns:\n        List[str]: List of IP addresses.\n    \"\"\"\n    result_list = []\n    for testcase in self._result_entries:\n        if str(testcase.name) not in result_list:\n            result_list.append(str(testcase.name))\n    return result_list\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.get_result_by_host","title":"get_result_by_host","text":"
get_result_by_host(\n    host_ip: str, output_format: str = \"native\"\n) -> Any\n

Get list of test result for a given host.

Parameters:

Name Type Description Default host_ip str

IP Address of the host to use to filter results.

required output_format str

format selector. Can be either native/list. Defaults to \u2018native\u2019.

'native'

Returns:

Name Type Description Any Any

List of results related to the host.

Source code in anta/result_manager/__init__.py
def get_result_by_host(self, host_ip: str, output_format: str = \"native\") -> Any:\n\"\"\"\n    Get list of test result for a given host.\n\n    Args:\n        host_ip (str): IP Address of the host to use to filter results.\n        output_format (str, optional): format selector. Can be either native/list. Defaults to 'native'.\n\n    Returns:\n        Any: List of results related to the host.\n    \"\"\"\n    if output_format == \"list\":\n        return [result for result in self._result_entries if str(result.name) == host_ip]\n\n    result_manager_filtered = ListResult()\n    for result in self._result_entries:\n        if str(result.name) == host_ip:\n            result_manager_filtered.append(result)\n    return result_manager_filtered\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.get_result_by_test","title":"get_result_by_test","text":"
get_result_by_test(\n    test_name: str, output_format: str = \"native\"\n) -> Any\n

Get list of test result for a given test.

Parameters:

Name Type Description Default test_name str

Test name to use to filter results

required output_format str

format selector. Can be either native/list. Defaults to \u2018native\u2019.

'native'

Returns:

Type Description Any

list[TestResult]: List of results related to the test.

Source code in anta/result_manager/__init__.py
def get_result_by_test(self, test_name: str, output_format: str = \"native\") -> Any:\n\"\"\"\n    Get list of test result for a given test.\n\n    Args:\n        test_name (str): Test name to use to filter results\n        output_format (str, optional): format selector. Can be either native/list. Defaults to 'native'.\n\n    Returns:\n        list[TestResult]: List of results related to the test.\n    \"\"\"\n    if output_format == \"list\":\n        return [result for result in self._result_entries if str(result.test) == test_name]\n\n    result_manager_filtered = ListResult()\n    for result in self._result_entries:\n        if result.test == test_name:\n            result_manager_filtered.append(result)\n    return result_manager_filtered\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.get_results","title":"get_results","text":"
get_results(output_format: str = 'native') -> Any\n

Expose list of all test results in different format

Support multiple format
  • native: ListResults format
  • list: a list of TestResult
  • json: a native JSON format

Parameters:

Name Type Description Default output_format str

format selector. Can be either native/list/json. Defaults to \u2018native\u2019.

'native'

Returns:

Name Type Description any Any

List of results.

Source code in anta/result_manager/__init__.py
def get_results(self, output_format: str = \"native\") -> Any:\n\"\"\"\n    Expose list of all test results in different format\n\n    Support multiple format:\n      - native: ListResults format\n      - list: a list of TestResult\n      - json: a native JSON format\n\n    Args:\n        output_format (str, optional): format selector. Can be either native/list/json. Defaults to 'native'.\n\n    Returns:\n        any: List of results.\n    \"\"\"\n    if output_format == \"list\":\n        return list(self._result_entries)\n\n    if output_format == \"json\":\n        return json.dumps(pydantic_to_dict(self._result_entries), indent=4)\n\n    # Default return for native format.\n    return self._result_entries\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.get_status","title":"get_status","text":"
get_status(ignore_error: bool = False) -> str\n

Returns the current status including error_status if ignore_error is False

Source code in anta/result_manager/__init__.py
def get_status(self, ignore_error: bool = False) -> str:\n\"\"\"\n    Returns the current status including error_status if ignore_error is False\n    \"\"\"\n    return \"error\" if self.error_status and not ignore_error else self.status\n
"},{"location":"api/result_manager/#anta.result_manager.ResultManager.get_testcases","title":"get_testcases","text":"
get_testcases() -> List[str]\n

Get list of name of all test cases in current manager.

Returns:

Type Description List[str]

List[str]: List of names for all tests.

Source code in anta/result_manager/__init__.py
def get_testcases(self) -> List[str]:\n\"\"\"\n    Get list of name of all test cases in current manager.\n\n    Returns:\n        List[str]: List of names for all tests.\n    \"\"\"\n    result_list = []\n    for testcase in self._result_entries:\n        if str(testcase.test) not in result_list:\n            result_list.append(str(testcase.test))\n    return result_list\n
"},{"location":"api/result_manager_models/","title":"Result Manager models","text":""},{"location":"api/result_manager_models/#anta.result_manager.models.TestResult","title":"TestResult","text":"

Bases: BaseModel

Describe the result of a test from a single device.

Attributes:

Name Type Description name str

Device name where the test has run.

test str

Test name runs on the device.

test_category List[str]

List of test categories the test belongs to.

test_description str

Test description.

results str

Result of the test. Can be one of [\u201cunset\u201d, \u201csuccess\u201d, \u201cfailure\u201d, \u201cerror\u201d, \u201cskipped\u201d].

message str

Message to report after the test if any.

"},{"location":"api/result_manager_models/#anta.result_manager.models.TestResult.is_error","title":"is_error","text":"
is_error(message: str = '') -> bool\n

Helper to set status to error

Parameters:

Name Type Description Default message str

Optional message related to the test

''

Returns:

Name Type Description bool bool

Always true

Source code in anta/result_manager/models.py
def is_error(self, message: str = \"\") -> bool:\n\"\"\"\n    Helper to set status to error\n\n    Args:\n        message (str): Optional message related to the test\n\n    Returns:\n        bool: Always true\n    \"\"\"\n    return self._set_status(\"error\", message)\n
"},{"location":"api/result_manager_models/#anta.result_manager.models.TestResult.is_failure","title":"is_failure","text":"
is_failure(message: str = '') -> bool\n

Helper to set status to failure

Parameters:

Name Type Description Default message str

Optional message related to the test

''

Returns:

Name Type Description bool bool

Always true

Source code in anta/result_manager/models.py
def is_failure(self, message: str = \"\") -> bool:\n\"\"\"\n    Helper to set status to failure\n\n    Args:\n        message (str): Optional message related to the test\n\n    Returns:\n        bool: Always true\n    \"\"\"\n    return self._set_status(\"failure\", message)\n
"},{"location":"api/result_manager_models/#anta.result_manager.models.TestResult.is_skipped","title":"is_skipped","text":"
is_skipped(message: str = '') -> bool\n

Helper to set status to skipped

Parameters:

Name Type Description Default message str

Optional message related to the test

''

Returns:

Name Type Description bool bool

Always true

Source code in anta/result_manager/models.py
def is_skipped(self, message: str = \"\") -> bool:\n\"\"\"\n    Helper to set status to skipped\n\n    Args:\n        message (str): Optional message related to the test\n\n    Returns:\n        bool: Always true\n    \"\"\"\n    return self._set_status(\"skipped\", message)\n
"},{"location":"api/result_manager_models/#anta.result_manager.models.TestResult.is_success","title":"is_success","text":"
is_success(message: str = '') -> bool\n

Helper to set status to success

Parameters:

Name Type Description Default message str

Optional message related to the test

''

Returns:

Name Type Description bool bool

Always true

Source code in anta/result_manager/models.py
def is_success(self, message: str = \"\") -> bool:\n\"\"\"\n    Helper to set status to success\n\n    Args:\n        message (str): Optional message related to the test\n\n    Returns:\n        bool: Always true\n    \"\"\"\n    return self._set_status(\"success\", message)\n
"},{"location":"api/result_manager_models/#anta.result_manager.models.TestResult.name_must_be_in","title":"name_must_be_in classmethod","text":"
name_must_be_in(v: str) -> str\n

Status validator

Validate status is a supported one

Parameters:

Name Type Description Default v str

User defined status

required

Raises:

Type Description ValueError

If status is unsupported

Returns:

Name Type Description str str

status value

Source code in anta/result_manager/models.py
@classmethod\n@field_validator(\"result\")\ndef name_must_be_in(cls, v: str) -> str:\n\"\"\"\n    Status validator\n\n    Validate status is a supported one\n\n    Args:\n        v (str): User defined status\n\n    Raises:\n        ValueError: If status is unsupported\n\n    Returns:\n        str: status value\n    \"\"\"\n    if v not in RESULT_OPTIONS:\n        raise ValueError(f\"must be one of {RESULT_OPTIONS}\")\n    return v\n
"},{"location":"api/result_manager_models/#anta.result_manager.models.ListResult","title":"ListResult","text":"

Bases: RootModel[List[TestResult]]

List result for all tests on all devices.

Attributes:

Name Type Description __root__ List[TestResult]

A list of TestResult objects.

"},{"location":"api/result_manager_models/#anta.result_manager.models.ListResult.append","title":"append","text":"
append(value: TestResult) -> None\n

Add support for append method.

Source code in anta/result_manager/models.py
def append(self, value: TestResult) -> None:\n\"\"\"Add support for append method.\"\"\"\n    self.root.append(value)\n
"},{"location":"api/result_manager_models/#anta.result_manager.models.ListResult.extend","title":"extend","text":"
extend(values: List[TestResult]) -> None\n

Add support for extend method.

Source code in anta/result_manager/models.py
def extend(self, values: List[TestResult]) -> None:\n\"\"\"Add support for extend method.\"\"\"\n    self.root.extend(values)\n
"},{"location":"api/tests.aaa/","title":"AAA","text":""},{"location":"api/tests.aaa/#anta-catalog-for-interfaces-tests","title":"ANTA catalog for interfaces tests","text":"

Test functions related to the EOS various AAA settings

"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAcctConsoleMethods","title":"VerifyAcctConsoleMethods","text":"

Bases: AntaTest

Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x).

Expected Results
  • success: The test will pass if the provided AAA accounting console method list is matching in the configured accounting types.
  • failure: The test will fail if the provided AAA accounting console method list is NOT matching in the configured accounting types.
  • skipped: The test will be skipped if the AAA accounting console method list or accounting type list are not provided.
Source code in anta/tests/aaa.py
class VerifyAcctConsoleMethods(AntaTest):\n\"\"\"\n    Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x).\n\n    Expected Results:\n        * success: The test will pass if the provided AAA accounting console method list is matching in the configured accounting types.\n        * failure: The test will fail if the provided AAA accounting console method list is NOT matching in the configured accounting types.\n        * skipped: The test will be skipped if the AAA accounting console method list or accounting type list are not provided.\n    \"\"\"\n\n    name = \"VerifyAcctConsoleMethods\"\n    description = \"Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x).\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show aaa methods accounting\")]\n\n    @AntaTest.anta_test\n    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyAcctConsoleMethods validation.\n\n        Args:\n            methods: List of AAA accounting console methods. Methods should be in the right order.\n            auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.\n        \"\"\"\n        if not methods or not auth_types:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n            return\n\n        methods_with_group = _check_group_methods(methods)\n\n        _check_auth_type(auth_types, [\"system\", \"exec\", \"commands\", \"dot1x\"])\n\n        command_output = self.instance_commands[0].json_output\n\n        not_matching = []\n        not_configured = []\n\n        for auth_type in auth_types:\n            auth_type_key = f\"{auth_type}AcctMethods\"\n\n            method_key = list(command_output[auth_type_key].keys())[0]\n\n            if not command_output[auth_type_key][method_key].get(\"consoleAction\"):\n                not_configured.append(auth_type)\n\n            if command_output[auth_type_key][method_key][\"consoleMethods\"] != methods_with_group:\n                not_matching.append(auth_type)\n\n        if not_configured:\n            self.result.is_failure(f\"AAA console accounting is not configured for {not_configured}\")\n            return\n\n        if not not_matching:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"AAA accounting console methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAcctConsoleMethods.test","title":"test","text":"
test(\n    methods: Optional[List[str]] = None,\n    auth_types: Optional[List[str]] = None,\n) -> None\n

Run VerifyAcctConsoleMethods validation.

Parameters:

Name Type Description Default methods Optional[List[str]]

List of AAA accounting console methods. Methods should be in the right order.

None auth_types Optional[List[str]]

List of accounting types to verify. List elements must be: commands, exec, system, dot1x.

None Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyAcctConsoleMethods validation.\n\n    Args:\n        methods: List of AAA accounting console methods. Methods should be in the right order.\n        auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.\n    \"\"\"\n    if not methods or not auth_types:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n        return\n\n    methods_with_group = _check_group_methods(methods)\n\n    _check_auth_type(auth_types, [\"system\", \"exec\", \"commands\", \"dot1x\"])\n\n    command_output = self.instance_commands[0].json_output\n\n    not_matching = []\n    not_configured = []\n\n    for auth_type in auth_types:\n        auth_type_key = f\"{auth_type}AcctMethods\"\n\n        method_key = list(command_output[auth_type_key].keys())[0]\n\n        if not command_output[auth_type_key][method_key].get(\"consoleAction\"):\n            not_configured.append(auth_type)\n\n        if command_output[auth_type_key][method_key][\"consoleMethods\"] != methods_with_group:\n            not_matching.append(auth_type)\n\n    if not_configured:\n        self.result.is_failure(f\"AAA console accounting is not configured for {not_configured}\")\n        return\n\n    if not not_matching:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"AAA accounting console methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAcctDefaultMethods","title":"VerifyAcctDefaultMethods","text":"

Bases: AntaTest

Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x).

Expected Results
  • success: The test will pass if the provided AAA accounting default method list is matching in the configured accounting types.
  • failure: The test will fail if the provided AAA accounting default method list is NOT matching in the configured accounting types.
  • skipped: The test will be skipped if the AAA accounting default method list or accounting type list are not provided.
Source code in anta/tests/aaa.py
class VerifyAcctDefaultMethods(AntaTest):\n\"\"\"\n    Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x).\n\n    Expected Results:\n        * success: The test will pass if the provided AAA accounting default method list is matching in the configured accounting types.\n        * failure: The test will fail if the provided AAA accounting default method list is NOT matching in the configured accounting types.\n        * skipped: The test will be skipped if the AAA accounting default method list or accounting type list are not provided.\n    \"\"\"\n\n    name = \"VerifyAcctDefaultMethods\"\n    description = \"Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x).\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show aaa methods accounting\")]\n\n    @AntaTest.anta_test\n    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyAcctDefaultMethods validation.\n\n        Args:\n            methods: List of AAA accounting default methods. Methods should be in the right order.\n            auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.\n        \"\"\"\n        if not methods or not auth_types:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n            return\n\n        methods_with_group = _check_group_methods(methods)\n\n        _check_auth_type(auth_types, [\"system\", \"exec\", \"commands\", \"dot1x\"])\n\n        command_output = self.instance_commands[0].json_output\n\n        not_matching = []\n        not_configured = []\n\n        for auth_type in auth_types:\n            auth_type_key = f\"{auth_type}AcctMethods\"\n\n            method_key = list(command_output[auth_type_key].keys())[0]\n\n            if not command_output[auth_type_key][method_key].get(\"defaultAction\"):\n                not_configured.append(auth_type)\n\n            if command_output[auth_type_key][method_key][\"defaultMethods\"] != methods_with_group:\n                not_matching.append(auth_type)\n\n        if not_configured:\n            self.result.is_failure(f\"AAA default accounting is not configured for {not_configured}\")\n            return\n\n        if not not_matching:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"AAA accounting default methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAcctDefaultMethods.test","title":"test","text":"
test(\n    methods: Optional[List[str]] = None,\n    auth_types: Optional[List[str]] = None,\n) -> None\n

Run VerifyAcctDefaultMethods validation.

Parameters:

Name Type Description Default methods Optional[List[str]]

List of AAA accounting default methods. Methods should be in the right order.

None auth_types Optional[List[str]]

List of accounting types to verify. List elements must be: commands, exec, system, dot1x.

None Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyAcctDefaultMethods validation.\n\n    Args:\n        methods: List of AAA accounting default methods. Methods should be in the right order.\n        auth_types: List of accounting types to verify. List elements must be: commands, exec, system, dot1x.\n    \"\"\"\n    if not methods or not auth_types:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n        return\n\n    methods_with_group = _check_group_methods(methods)\n\n    _check_auth_type(auth_types, [\"system\", \"exec\", \"commands\", \"dot1x\"])\n\n    command_output = self.instance_commands[0].json_output\n\n    not_matching = []\n    not_configured = []\n\n    for auth_type in auth_types:\n        auth_type_key = f\"{auth_type}AcctMethods\"\n\n        method_key = list(command_output[auth_type_key].keys())[0]\n\n        if not command_output[auth_type_key][method_key].get(\"defaultAction\"):\n            not_configured.append(auth_type)\n\n        if command_output[auth_type_key][method_key][\"defaultMethods\"] != methods_with_group:\n            not_matching.append(auth_type)\n\n    if not_configured:\n        self.result.is_failure(f\"AAA default accounting is not configured for {not_configured}\")\n        return\n\n    if not not_matching:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"AAA accounting default methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAuthenMethods","title":"VerifyAuthenMethods","text":"

Bases: AntaTest

Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x).

Expected Results
  • success: The test will pass if the provided AAA authentication method list is matching in the configured authentication types.
  • failure: The test will fail if the provided AAA authentication method list is NOT matching in the configured authentication types.
  • skipped: The test will be skipped if the AAA authentication method list or authentication type list are not provided.
Source code in anta/tests/aaa.py
class VerifyAuthenMethods(AntaTest):\n\"\"\"\n    Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x).\n\n    Expected Results:\n        * success: The test will pass if the provided AAA authentication method list is matching in the configured authentication types.\n        * failure: The test will fail if the provided AAA authentication method list is NOT matching in the configured authentication types.\n        * skipped: The test will be skipped if the AAA authentication method list or authentication type list are not provided.\n    \"\"\"\n\n    name = \"VerifyAuthenMethods\"\n    description = \"Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x).\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show aaa methods authentication\")]\n\n    @AntaTest.anta_test\n    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyAuthenMethods validation.\n\n        Args:\n            methods: List of AAA authentication methods. Methods should be in the right order.\n            auth_types: List of authentication types to verify. List elements must be: login, enable, dot1x.\n        \"\"\"\n        if not methods or not auth_types:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n            return\n\n        methods_with_group = _check_group_methods(methods)\n\n        _check_auth_type(auth_types, [\"login\", \"enable\", \"dot1x\"])\n\n        command_output = self.instance_commands[0].json_output\n\n        not_matching = []\n\n        for auth_type in auth_types:\n            auth_type_key = f\"{auth_type}AuthenMethods\"\n\n            if auth_type_key == \"loginAuthenMethods\":\n                if not command_output[auth_type_key].get(\"login\"):\n                    self.result.is_failure(\"AAA authentication methods are not configured for login console\")\n                    return\n\n                if command_output[auth_type_key][\"login\"][\"methods\"] != methods_with_group:\n                    self.result.is_failure(f\"AAA authentication methods {methods} are not matching for login console\")\n                    return\n\n            if command_output[auth_type_key][\"default\"][\"methods\"] != methods_with_group:\n                not_matching.append(auth_type)\n\n        if not not_matching:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"AAA authentication methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAuthenMethods.test","title":"test","text":"
test(\n    methods: Optional[List[str]] = None,\n    auth_types: Optional[List[str]] = None,\n) -> None\n

Run VerifyAuthenMethods validation.

Parameters:

Name Type Description Default methods Optional[List[str]]

List of AAA authentication methods. Methods should be in the right order.

None auth_types Optional[List[str]]

List of authentication types to verify. List elements must be: login, enable, dot1x.

None Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyAuthenMethods validation.\n\n    Args:\n        methods: List of AAA authentication methods. Methods should be in the right order.\n        auth_types: List of authentication types to verify. List elements must be: login, enable, dot1x.\n    \"\"\"\n    if not methods or not auth_types:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n        return\n\n    methods_with_group = _check_group_methods(methods)\n\n    _check_auth_type(auth_types, [\"login\", \"enable\", \"dot1x\"])\n\n    command_output = self.instance_commands[0].json_output\n\n    not_matching = []\n\n    for auth_type in auth_types:\n        auth_type_key = f\"{auth_type}AuthenMethods\"\n\n        if auth_type_key == \"loginAuthenMethods\":\n            if not command_output[auth_type_key].get(\"login\"):\n                self.result.is_failure(\"AAA authentication methods are not configured for login console\")\n                return\n\n            if command_output[auth_type_key][\"login\"][\"methods\"] != methods_with_group:\n                self.result.is_failure(f\"AAA authentication methods {methods} are not matching for login console\")\n                return\n\n        if command_output[auth_type_key][\"default\"][\"methods\"] != methods_with_group:\n            not_matching.append(auth_type)\n\n    if not not_matching:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"AAA authentication methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAuthzMethods","title":"VerifyAuthzMethods","text":"

Bases: AntaTest

Verifies the AAA authorization method lists for different authorization types (commands, exec).

Expected Results
  • success: The test will pass if the provided AAA authorization method list is matching in the configured authorization types.
  • failure: The test will fail if the provided AAA authorization method list is NOT matching in the configured authorization types.
  • skipped: The test will be skipped if the AAA authentication method list or authorization type list are not provided.
Source code in anta/tests/aaa.py
class VerifyAuthzMethods(AntaTest):\n\"\"\"\n    Verifies the AAA authorization method lists for different authorization types (commands, exec).\n\n    Expected Results:\n        * success: The test will pass if the provided AAA authorization method list is matching in the configured authorization types.\n        * failure: The test will fail if the provided AAA authorization method list is NOT matching in the configured authorization types.\n        * skipped: The test will be skipped if the AAA authentication method list or authorization type list are not provided.\n    \"\"\"\n\n    name = \"VerifyAuthzMethods\"\n    description = \"Verifies the AAA authorization method lists for different authorization types (commands, exec).\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show aaa methods authorization\")]\n\n    @AntaTest.anta_test\n    def test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyAuthzMethods validation.\n\n        Args:\n            methods: List of AAA authorization methods. Methods should be in the right order.\n            auth_types: List of authorization types to verify. List elements must be: commands, exec.\n        \"\"\"\n        if not methods or not auth_types:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n            return\n\n        _check_auth_type(auth_types, [\"commands\", \"exec\"])\n\n        methods_with_group = _check_group_methods(methods)\n\n        command_output = self.instance_commands[0].json_output\n\n        not_matching = []\n\n        for auth_type in auth_types:\n            auth_type_key = f\"{auth_type}AuthzMethods\"\n\n            method_key = list(command_output[auth_type_key].keys())[0]\n\n            if command_output[auth_type_key][method_key][\"methods\"] != methods_with_group:\n                not_matching.append(auth_type)\n\n        if not not_matching:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"AAA authorization methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyAuthzMethods.test","title":"test","text":"
test(\n    methods: Optional[List[str]] = None,\n    auth_types: Optional[List[str]] = None,\n) -> None\n

Run VerifyAuthzMethods validation.

Parameters:

Name Type Description Default methods Optional[List[str]]

List of AAA authorization methods. Methods should be in the right order.

None auth_types Optional[List[str]]

List of authorization types to verify. List elements must be: commands, exec.

None Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, methods: Optional[List[str]] = None, auth_types: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyAuthzMethods validation.\n\n    Args:\n        methods: List of AAA authorization methods. Methods should be in the right order.\n        auth_types: List of authorization types to verify. List elements must be: commands, exec.\n    \"\"\"\n    if not methods or not auth_types:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because methods or auth_types were not supplied\")\n        return\n\n    _check_auth_type(auth_types, [\"commands\", \"exec\"])\n\n    methods_with_group = _check_group_methods(methods)\n\n    command_output = self.instance_commands[0].json_output\n\n    not_matching = []\n\n    for auth_type in auth_types:\n        auth_type_key = f\"{auth_type}AuthzMethods\"\n\n        method_key = list(command_output[auth_type_key].keys())[0]\n\n        if command_output[auth_type_key][method_key][\"methods\"] != methods_with_group:\n            not_matching.append(auth_type)\n\n    if not not_matching:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"AAA authorization methods {methods} are not matching for {not_matching}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyTacacsServerGroups","title":"VerifyTacacsServerGroups","text":"

Bases: AntaTest

Verifies if the provided TACACS server group(s) are configured.

Expected Results
  • success: The test will pass if the provided TACACS server group(s) are configured.
  • failure: The test will fail if one or all the provided TACACS server group(s) are NOT configured.
  • skipped: The test will be skipped if TACACS server group(s) are not provided.
Source code in anta/tests/aaa.py
class VerifyTacacsServerGroups(AntaTest):\n\"\"\"\n    Verifies if the provided TACACS server group(s) are configured.\n\n    Expected Results:\n        * success: The test will pass if the provided TACACS server group(s) are configured.\n        * failure: The test will fail if one or all the provided TACACS server group(s) are NOT configured.\n        * skipped: The test will be skipped if TACACS server group(s) are not provided.\n    \"\"\"\n\n    name = \"VerifyTacacsServerGroups\"\n    description = \"Verifies if the provided TACACS server group(s) are configured.\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show tacacs\")]\n\n    @AntaTest.anta_test\n    def test(self, groups: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyTacacsServerGroups validation.\n\n        Args:\n            groups: List of TACACS server group.\n        \"\"\"\n        if not groups:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because groups were not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        tacacs_groups = command_output[\"groups\"]\n\n        if not tacacs_groups:\n            self.result.is_failure(\"No TACACS server group(s) are configured\")\n            return\n\n        not_configured = [group for group in groups if group not in tacacs_groups]\n\n        if not not_configured:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"TACACS server group(s) {not_configured} are not configured\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyTacacsServerGroups.test","title":"test","text":"
test(groups: Optional[List[str]] = None) -> None\n

Run VerifyTacacsServerGroups validation.

Parameters:

Name Type Description Default groups Optional[List[str]]

List of TACACS server group.

None Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, groups: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyTacacsServerGroups validation.\n\n    Args:\n        groups: List of TACACS server group.\n    \"\"\"\n    if not groups:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because groups were not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    tacacs_groups = command_output[\"groups\"]\n\n    if not tacacs_groups:\n        self.result.is_failure(\"No TACACS server group(s) are configured\")\n        return\n\n    not_configured = [group for group in groups if group not in tacacs_groups]\n\n    if not not_configured:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"TACACS server group(s) {not_configured} are not configured\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyTacacsServers","title":"VerifyTacacsServers","text":"

Bases: AntaTest

Verifies TACACS servers are configured for a specified VRF.

Expected Results
  • success: The test will pass if the provided TACACS servers are configured in the specified VRF.
  • failure: The test will fail if the provided TACACS servers are NOT configured in the specified VRF.
  • skipped: The test will be skipped if TACACS servers or VRF are not provided.
Source code in anta/tests/aaa.py
class VerifyTacacsServers(AntaTest):\n\"\"\"\n    Verifies TACACS servers are configured for a specified VRF.\n\n    Expected Results:\n        * success: The test will pass if the provided TACACS servers are configured in the specified VRF.\n        * failure: The test will fail if the provided TACACS servers are NOT configured in the specified VRF.\n        * skipped: The test will be skipped if TACACS servers or VRF are not provided.\n    \"\"\"\n\n    name = \"VerifyTacacsServers\"\n    description = \"Verifies TACACS servers are configured for a specified VRF.\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show tacacs\")]\n\n    @AntaTest.anta_test\n    def test(self, servers: Optional[List[str]] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifyTacacsServers validation.\n\n        Args:\n            servers: List of TACACS servers IP addresses.\n            vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.\n        \"\"\"\n        if not servers or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because servers or vrf were not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        tacacs_servers = command_output[\"tacacsServers\"]\n\n        if not tacacs_servers:\n            self.result.is_failure(\"No TACACS servers are configured\")\n            return\n\n        not_configured = [\n            server\n            for server in servers\n            if not any(server == tacacs_server[\"serverInfo\"][\"hostname\"] and vrf == tacacs_server[\"serverInfo\"][\"vrf\"] for tacacs_server in tacacs_servers)\n        ]\n\n        if not not_configured:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"TACACS servers {not_configured} are not configured in VRF {vrf}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyTacacsServers.test","title":"test","text":"
test(\n    servers: Optional[List[str]] = None,\n    vrf: str = \"default\",\n) -> None\n

Run VerifyTacacsServers validation.

Parameters:

Name Type Description Default servers Optional[List[str]]

List of TACACS servers IP addresses.

None vrf str

The name of the VRF to transport TACACS messages. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, servers: Optional[List[str]] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifyTacacsServers validation.\n\n    Args:\n        servers: List of TACACS servers IP addresses.\n        vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.\n    \"\"\"\n    if not servers or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because servers or vrf were not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    tacacs_servers = command_output[\"tacacsServers\"]\n\n    if not tacacs_servers:\n        self.result.is_failure(\"No TACACS servers are configured\")\n        return\n\n    not_configured = [\n        server\n        for server in servers\n        if not any(server == tacacs_server[\"serverInfo\"][\"hostname\"] and vrf == tacacs_server[\"serverInfo\"][\"vrf\"] for tacacs_server in tacacs_servers)\n    ]\n\n    if not not_configured:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"TACACS servers {not_configured} are not configured in VRF {vrf}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyTacacsSourceIntf","title":"VerifyTacacsSourceIntf","text":"

Bases: AntaTest

Verifies TACACS source-interface for a specified VRF.

Expected Results
  • success: The test will pass if the provided TACACS source-interface is configured in the specified VRF.
  • failure: The test will fail if the provided TACACS source-interface is NOT configured in the specified VRF.
  • skipped: The test will be skipped if source-interface or VRF is not provided.
Source code in anta/tests/aaa.py
class VerifyTacacsSourceIntf(AntaTest):\n\"\"\"\n    Verifies TACACS source-interface for a specified VRF.\n\n    Expected Results:\n        * success: The test will pass if the provided TACACS source-interface is configured in the specified VRF.\n        * failure: The test will fail if the provided TACACS source-interface is NOT configured in the specified VRF.\n        * skipped: The test will be skipped if source-interface or VRF is not provided.\n    \"\"\"\n\n    name = \"VerifyTacacsSourceIntf\"\n    description = \"Verifies TACACS source-interface for a specified VRF.\"\n    categories = [\"aaa\"]\n    commands = [AntaCommand(command=\"show tacacs\")]\n\n    @AntaTest.anta_test\n    def test(self, intf: Optional[str] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifyTacacsSourceIntf validation.\n\n        Args:\n            intf: Source-interface to use as source IP of TACACS messages.\n            vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.\n        \"\"\"\n        if not intf or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because intf or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        try:\n            if command_output[\"srcIntf\"][vrf] == intf:\n                self.result.is_success()\n            else:\n                self.result.is_failure(f\"Wrong source-interface configured in VRF {vrf}\")\n\n        except KeyError:\n            self.result.is_failure(f\"Source-interface {intf} is not configured in VRF {vrf}\")\n
"},{"location":"api/tests.aaa/#anta.tests.aaa.VerifyTacacsSourceIntf.test","title":"test","text":"
test(\n    intf: Optional[str] = None, vrf: str = \"default\"\n) -> None\n

Run VerifyTacacsSourceIntf validation.

Parameters:

Name Type Description Default intf Optional[str]

Source-interface to use as source IP of TACACS messages.

None vrf str

The name of the VRF to transport TACACS messages. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/aaa.py
@AntaTest.anta_test\ndef test(self, intf: Optional[str] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifyTacacsSourceIntf validation.\n\n    Args:\n        intf: Source-interface to use as source IP of TACACS messages.\n        vrf: The name of the VRF to transport TACACS messages. Defaults to 'default'.\n    \"\"\"\n    if not intf or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because intf or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    try:\n        if command_output[\"srcIntf\"][vrf] == intf:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Wrong source-interface configured in VRF {vrf}\")\n\n    except KeyError:\n        self.result.is_failure(f\"Source-interface {intf} is not configured in VRF {vrf}\")\n
"},{"location":"api/tests.configuration/","title":"Configuration","text":""},{"location":"api/tests.configuration/#anta-catalog-for-configuration-tests","title":"ANTA catalog for configuration tests","text":"

Test functions related to the device configuration

"},{"location":"api/tests.configuration/#anta.tests.configuration.VerifyRunningConfigDiffs","title":"VerifyRunningConfigDiffs","text":"

Bases: AntaTest

Verifies there is no difference between the running-config and the startup-config.

Source code in anta/tests/configuration.py
class VerifyRunningConfigDiffs(AntaTest):\n\"\"\"\n    Verifies there is no difference between the running-config and the startup-config.\n    \"\"\"\n\n    name = \"VerifyRunningConfigDiffs\"\n    description = \"\"\n    categories = [\"configuration\"]\n    commands = [AntaCommand(command=\"show running-config diffs\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyRunningConfigDiffs validation\"\"\"\n        command_output = self.instance_commands[0].output\n        if command_output is None or command_output == \"\":\n            self.result.is_success()\n        else:\n            self.result.is_failure()\n            self.result.is_failure(str(command_output))\n
"},{"location":"api/tests.configuration/#anta.tests.configuration.VerifyRunningConfigDiffs.test","title":"test","text":"
test() -> None\n

Run VerifyRunningConfigDiffs validation

Source code in anta/tests/configuration.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyRunningConfigDiffs validation\"\"\"\n    command_output = self.instance_commands[0].output\n    if command_output is None or command_output == \"\":\n        self.result.is_success()\n    else:\n        self.result.is_failure()\n        self.result.is_failure(str(command_output))\n
"},{"location":"api/tests.configuration/#anta.tests.configuration.VerifyZeroTouch","title":"VerifyZeroTouch","text":"

Bases: AntaTest

Verifies ZeroTouch is disabled.

Source code in anta/tests/configuration.py
class VerifyZeroTouch(AntaTest):\n\"\"\"\n    Verifies ZeroTouch is disabled.\n    \"\"\"\n\n    name = \"VerifyZeroTouch\"\n    description = \"Verifies ZeroTouch is disabled.\"\n    categories = [\"configuration\"]\n    commands = [AntaCommand(command=\"show zerotouch\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyZeroTouch validation\"\"\"\n\n        command_output = self.instance_commands[0].output\n\n        assert isinstance(command_output, dict)\n        if command_output[\"mode\"] == \"disabled\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"ZTP is NOT disabled\")\n
"},{"location":"api/tests.configuration/#anta.tests.configuration.VerifyZeroTouch.test","title":"test","text":"
test() -> None\n

Run VerifyZeroTouch validation

Source code in anta/tests/configuration.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyZeroTouch validation\"\"\"\n\n    command_output = self.instance_commands[0].output\n\n    assert isinstance(command_output, dict)\n    if command_output[\"mode\"] == \"disabled\":\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"ZTP is NOT disabled\")\n
"},{"location":"api/tests.connectivity/","title":"Connectivity","text":""},{"location":"api/tests.connectivity/#anta-catalog-for-connectivity-tests","title":"ANTA catalog for connectivity tests","text":"

Test functions related to various connectivity checks

"},{"location":"api/tests.connectivity/#anta.tests.connectivity.VerifyReachability","title":"VerifyReachability","text":"

Bases: AntaTest

Test network reachability to one or many destination IP(s).

Expected Results
  • success: The test will pass if all destination IP(s) are reachable.
  • failure: The test will fail if one or many destination IP(s) are unreachable.
  • error: The test will give an error if the destination IP(s) or the source interface/IP(s) are not provided as template_params.
Source code in anta/tests/connectivity.py
class VerifyReachability(AntaTest):\n\"\"\"\n    Test network reachability to one or many destination IP(s).\n\n    Expected Results:\n        * success: The test will pass if all destination IP(s) are reachable.\n        * failure: The test will fail if one or many destination IP(s) are unreachable.\n        * error: The test will give an error if the destination IP(s) or the source interface/IP(s) are not provided as template_params.\n    \"\"\"\n\n    name = \"VerifyReachability\"\n    description = \"Test the network reachability to one or many destination IP(s).\"\n    categories = [\"connectivity\"]\n    template = AntaTemplate(template=\"ping {dst} source {src} repeat 2\")\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyReachability validation.\n        \"\"\"\n\n        failures = []\n\n        for command in self.instance_commands:\n            if command.params and (\"src\" and \"dst\") in command.params:\n                src, dst = command.params[\"src\"], command.params[\"dst\"]\n            else:\n                self.result.is_error(\"The destination IP(s) or the source interface/IP(s) are not provided as template_params\")\n                return\n\n            if \"2 received\" not in command.json_output[\"messages\"][0]:\n                failures.append((src, dst))\n\n        if not failures:\n            self.result.is_success()\n\n        else:\n            self.result.is_failure(f\"Connectivity test failed for the following source-destination pairs: {failures}\")\n
"},{"location":"api/tests.connectivity/#anta.tests.connectivity.VerifyReachability.test","title":"test","text":"
test() -> None\n

Run VerifyReachability validation.

Source code in anta/tests/connectivity.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyReachability validation.\n    \"\"\"\n\n    failures = []\n\n    for command in self.instance_commands:\n        if command.params and (\"src\" and \"dst\") in command.params:\n            src, dst = command.params[\"src\"], command.params[\"dst\"]\n        else:\n            self.result.is_error(\"The destination IP(s) or the source interface/IP(s) are not provided as template_params\")\n            return\n\n        if \"2 received\" not in command.json_output[\"messages\"][0]:\n            failures.append((src, dst))\n\n    if not failures:\n        self.result.is_success()\n\n    else:\n        self.result.is_failure(f\"Connectivity test failed for the following source-destination pairs: {failures}\")\n
"},{"location":"api/tests.field_notices/","title":"Field Notices","text":""},{"location":"api/tests.field_notices/#anta-catalog-for-field-notices-tests","title":"ANTA catalog for Field Notices tests","text":"

Test functions to flag field notices

"},{"location":"api/tests.field_notices/#anta.tests.field_notices.VerifyFieldNotice44Resolution","title":"VerifyFieldNotice44Resolution","text":"

Bases: AntaTest

Verifies the device is using an Aboot version that fix the bug discussed in the field notice 44 (Aboot manages system settings prior to EOS initialization).

https://www.arista.com/en/support/advisories-notices/field-notice/8756-field-notice-44

Source code in anta/tests/field_notices.py
class VerifyFieldNotice44Resolution(AntaTest):\n\"\"\"\n    Verifies the device is using an Aboot version that fix the bug discussed\n    in the field notice 44 (Aboot manages system settings prior to EOS initialization).\n\n    https://www.arista.com/en/support/advisories-notices/field-notice/8756-field-notice-44\n    \"\"\"\n\n    name = \"VerifyFieldNotice44Resolution\"\n    description = (\n        \"Verifies the device is using an Aboot version that fix the bug discussed in the field notice 44 (Aboot manages system settings prior to EOS initialization)\"\n    )\n    categories = [\"field notices\", \"software\"]\n    commands = [AntaCommand(command=\"show version detail\")]\n\n    # TODO maybe implement ONLY ON PLATFORMS instead\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:  # type: ignore[override]\n\"\"\"Run VerifyFieldNotice44Resolution validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        devices = [\n            \"DCS-7010T-48\",\n            \"DCS-7010T-48-DC\",\n            \"DCS-7050TX-48\",\n            \"DCS-7050TX-64\",\n            \"DCS-7050TX-72\",\n            \"DCS-7050TX-72Q\",\n            \"DCS-7050TX-96\",\n            \"DCS-7050TX2-128\",\n            \"DCS-7050SX-64\",\n            \"DCS-7050SX-72\",\n            \"DCS-7050SX-72Q\",\n            \"DCS-7050SX2-72Q\",\n            \"DCS-7050SX-96\",\n            \"DCS-7050SX2-128\",\n            \"DCS-7050QX-32S\",\n            \"DCS-7050QX2-32S\",\n            \"DCS-7050SX3-48YC12\",\n            \"DCS-7050CX3-32S\",\n            \"DCS-7060CX-32S\",\n            \"DCS-7060CX2-32S\",\n            \"DCS-7060SX2-48YC6\",\n            \"DCS-7160-48YC6\",\n            \"DCS-7160-48TC6\",\n            \"DCS-7160-32CQ\",\n            \"DCS-7280SE-64\",\n            \"DCS-7280SE-68\",\n            \"DCS-7280SE-72\",\n            \"DCS-7150SC-24-CLD\",\n            \"DCS-7150SC-64-CLD\",\n            \"DCS-7020TR-48\",\n            \"DCS-7020TRA-48\",\n            \"DCS-7020SR-24C2\",\n            \"DCS-7020SRG-24C2\",\n            \"DCS-7280TR-48C6\",\n            \"DCS-7280TRA-48C6\",\n            \"DCS-7280SR-48C6\",\n            \"DCS-7280SRA-48C6\",\n            \"DCS-7280SRAM-48C6\",\n            \"DCS-7280SR2K-48C6-M\",\n            \"DCS-7280SR2-48YC6\",\n            \"DCS-7280SR2A-48YC6\",\n            \"DCS-7280SRM-40CX2\",\n            \"DCS-7280QR-C36\",\n            \"DCS-7280QRA-C36S\",\n        ]\n        variants = [\"-SSD-F\", \"-SSD-R\", \"-M-F\", \"-M-R\", \"-F\", \"-R\"]\n\n        model = command_output[\"modelName\"]\n        # TODO this list could be a regex\n        for variant in variants:\n            model = model.replace(variant, \"\")\n        if model not in devices:\n            self.result.is_skipped(\"device is not impacted by FN044\")\n            return\n\n        for component in command_output[\"details\"][\"components\"]:\n            if component[\"name\"] == \"Aboot\":\n                aboot_version = component[\"version\"].split(\"-\")[2]\n        self.result.is_success()\n        if aboot_version.startswith(\"4.0.\") and int(aboot_version.split(\".\")[2]) < 7:\n            self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n        elif aboot_version.startswith(\"4.1.\") and int(aboot_version.split(\".\")[2]) < 1:\n            self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n        elif aboot_version.startswith(\"6.0.\") and int(aboot_version.split(\".\")[2]) < 9:\n            self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n        elif aboot_version.startswith(\"6.1.\") and int(aboot_version.split(\".\")[2]) < 7:\n            self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n
"},{"location":"api/tests.field_notices/#anta.tests.field_notices.VerifyFieldNotice44Resolution.test","title":"test","text":"
test() -> None\n

Run VerifyFieldNotice44Resolution validation

Source code in anta/tests/field_notices.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:  # type: ignore[override]\n\"\"\"Run VerifyFieldNotice44Resolution validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    devices = [\n        \"DCS-7010T-48\",\n        \"DCS-7010T-48-DC\",\n        \"DCS-7050TX-48\",\n        \"DCS-7050TX-64\",\n        \"DCS-7050TX-72\",\n        \"DCS-7050TX-72Q\",\n        \"DCS-7050TX-96\",\n        \"DCS-7050TX2-128\",\n        \"DCS-7050SX-64\",\n        \"DCS-7050SX-72\",\n        \"DCS-7050SX-72Q\",\n        \"DCS-7050SX2-72Q\",\n        \"DCS-7050SX-96\",\n        \"DCS-7050SX2-128\",\n        \"DCS-7050QX-32S\",\n        \"DCS-7050QX2-32S\",\n        \"DCS-7050SX3-48YC12\",\n        \"DCS-7050CX3-32S\",\n        \"DCS-7060CX-32S\",\n        \"DCS-7060CX2-32S\",\n        \"DCS-7060SX2-48YC6\",\n        \"DCS-7160-48YC6\",\n        \"DCS-7160-48TC6\",\n        \"DCS-7160-32CQ\",\n        \"DCS-7280SE-64\",\n        \"DCS-7280SE-68\",\n        \"DCS-7280SE-72\",\n        \"DCS-7150SC-24-CLD\",\n        \"DCS-7150SC-64-CLD\",\n        \"DCS-7020TR-48\",\n        \"DCS-7020TRA-48\",\n        \"DCS-7020SR-24C2\",\n        \"DCS-7020SRG-24C2\",\n        \"DCS-7280TR-48C6\",\n        \"DCS-7280TRA-48C6\",\n        \"DCS-7280SR-48C6\",\n        \"DCS-7280SRA-48C6\",\n        \"DCS-7280SRAM-48C6\",\n        \"DCS-7280SR2K-48C6-M\",\n        \"DCS-7280SR2-48YC6\",\n        \"DCS-7280SR2A-48YC6\",\n        \"DCS-7280SRM-40CX2\",\n        \"DCS-7280QR-C36\",\n        \"DCS-7280QRA-C36S\",\n    ]\n    variants = [\"-SSD-F\", \"-SSD-R\", \"-M-F\", \"-M-R\", \"-F\", \"-R\"]\n\n    model = command_output[\"modelName\"]\n    # TODO this list could be a regex\n    for variant in variants:\n        model = model.replace(variant, \"\")\n    if model not in devices:\n        self.result.is_skipped(\"device is not impacted by FN044\")\n        return\n\n    for component in command_output[\"details\"][\"components\"]:\n        if component[\"name\"] == \"Aboot\":\n            aboot_version = component[\"version\"].split(\"-\")[2]\n    self.result.is_success()\n    if aboot_version.startswith(\"4.0.\") and int(aboot_version.split(\".\")[2]) < 7:\n        self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n    elif aboot_version.startswith(\"4.1.\") and int(aboot_version.split(\".\")[2]) < 1:\n        self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n    elif aboot_version.startswith(\"6.0.\") and int(aboot_version.split(\".\")[2]) < 9:\n        self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n    elif aboot_version.startswith(\"6.1.\") and int(aboot_version.split(\".\")[2]) < 7:\n        self.result.is_failure(f\"device is running incorrect version of aboot ({aboot_version})\")\n
"},{"location":"api/tests.field_notices/#anta.tests.field_notices.VerifyFieldNotice72Resolution","title":"VerifyFieldNotice72Resolution","text":"

Bases: AntaTest

Checks if the device is potentially exposed to Field Notice 72, and if the issue has been mitigated.

https://www.arista.com/en/support/advisories-notices/field-notice/17410-field-notice-0072

Source code in anta/tests/field_notices.py
class VerifyFieldNotice72Resolution(AntaTest):\n\"\"\"\n    Checks if the device is potentially exposed to Field Notice 72, and if the issue has been mitigated.\n\n    https://www.arista.com/en/support/advisories-notices/field-notice/17410-field-notice-0072\n    \"\"\"\n\n    name = \"VerifyFieldNotice72Resolution\"\n    description = \"Verifies if the device has exposeure to FN72, and if the issue has been mitigated\"\n    categories = [\"field notices\", \"software\"]\n    commands = [AntaCommand(command=\"show version detail\")]\n\n    # TODO maybe implement ONLY ON PLATFORMS instead\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:  # type: ignore[override]\n\"\"\"Run VerifyFieldNotice72Resolution validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        devices = [\"DCS-7280SR3-48YC8\", \"DCS-7280SR3K-48YC8\"]\n        variants = [\"-SSD-F\", \"-SSD-R\", \"-M-F\", \"-M-R\", \"-F\", \"-R\"]\n        model = command_output[\"modelName\"]\n\n        for variant in variants:\n            model = model.replace(variant, \"\")\n        if model not in devices:\n            self.result.is_skipped(\"Platform is not impacted by FN072\")\n            return\n\n        serial = command_output[\"serialNumber\"]\n        number = int(serial[3:7])\n\n        if \"JPE\" not in serial and \"JAS\" not in serial:\n            self.result.is_skipped(\"Device not exposed\")\n            return\n\n        if model == \"DCS-7280SR3-48YC8\" and \"JPE\" in serial and number >= 2131:\n            self.result.is_skipped(\"Device not exposed\")\n            return\n\n        if model == \"DCS-7280SR3-48YC8\" and \"JAS\" in serial and number >= 2041:\n            self.result.is_skipped(\"Device not exposed\")\n            return\n\n        if model == \"DCS-7280SR3K-48YC8\" and \"JPE\" in serial and number >= 2134:\n            self.result.is_skipped(\"Device not exposed\")\n            return\n\n        if model == \"DCS-7280SR3K-48YC8\" and \"JAS\" in serial and number >= 2041:\n            self.result.is_skipped(\"Device not exposed\")\n            return\n\n        # Because each of the if checks above will return if taken, we only run the long\n        # check if we get this far\n        for entry in command_output[\"details\"][\"components\"]:\n            if entry[\"name\"] == \"FixedSystemvrm1\":\n                if int(entry[\"version\"]) < 7:\n                    self.result.is_failure(\"Device is exposed to FN72\")\n                else:\n                    self.result.is_success(\"FN72 is mitigated\")\n                return\n        # We should never hit this point\n        self.result.is_error(\"Error in running test - FixedSystemvrm1 not found\")\n        return\n
"},{"location":"api/tests.field_notices/#anta.tests.field_notices.VerifyFieldNotice72Resolution.test","title":"test","text":"
test() -> None\n

Run VerifyFieldNotice72Resolution validation

Source code in anta/tests/field_notices.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:  # type: ignore[override]\n\"\"\"Run VerifyFieldNotice72Resolution validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    devices = [\"DCS-7280SR3-48YC8\", \"DCS-7280SR3K-48YC8\"]\n    variants = [\"-SSD-F\", \"-SSD-R\", \"-M-F\", \"-M-R\", \"-F\", \"-R\"]\n    model = command_output[\"modelName\"]\n\n    for variant in variants:\n        model = model.replace(variant, \"\")\n    if model not in devices:\n        self.result.is_skipped(\"Platform is not impacted by FN072\")\n        return\n\n    serial = command_output[\"serialNumber\"]\n    number = int(serial[3:7])\n\n    if \"JPE\" not in serial and \"JAS\" not in serial:\n        self.result.is_skipped(\"Device not exposed\")\n        return\n\n    if model == \"DCS-7280SR3-48YC8\" and \"JPE\" in serial and number >= 2131:\n        self.result.is_skipped(\"Device not exposed\")\n        return\n\n    if model == \"DCS-7280SR3-48YC8\" and \"JAS\" in serial and number >= 2041:\n        self.result.is_skipped(\"Device not exposed\")\n        return\n\n    if model == \"DCS-7280SR3K-48YC8\" and \"JPE\" in serial and number >= 2134:\n        self.result.is_skipped(\"Device not exposed\")\n        return\n\n    if model == \"DCS-7280SR3K-48YC8\" and \"JAS\" in serial and number >= 2041:\n        self.result.is_skipped(\"Device not exposed\")\n        return\n\n    # Because each of the if checks above will return if taken, we only run the long\n    # check if we get this far\n    for entry in command_output[\"details\"][\"components\"]:\n        if entry[\"name\"] == \"FixedSystemvrm1\":\n            if int(entry[\"version\"]) < 7:\n                self.result.is_failure(\"Device is exposed to FN72\")\n            else:\n                self.result.is_success(\"FN72 is mitigated\")\n            return\n    # We should never hit this point\n    self.result.is_error(\"Error in running test - FixedSystemvrm1 not found\")\n    return\n
"},{"location":"api/tests.hardware/","title":"Hardware","text":""},{"location":"api/tests.hardware/#anta-catalog-for-hardware-tests","title":"ANTA catalog for hardware tests","text":"

Test functions related to the hardware or environement

"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyAdverseDrops","title":"VerifyAdverseDrops","text":"

Bases: AntaTest

Verifies there is no adverse drops on DCS7280E and DCS7500E.

Source code in anta/tests/hardware.py
class VerifyAdverseDrops(AntaTest):\n\"\"\"\n    Verifies there is no adverse drops on DCS7280E and DCS7500E.\n    \"\"\"\n\n    name = \"VerifyAdverseDrops\"\n    description = \"Verifies there is no adverse drops on DCS7280E and DCS7500E\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show hardware counter drop\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyAdverseDrops validation\"\"\"\n        command_output = self.instance_commands[0].json_output\n        total_adverse_drop = command_output[\"totalAdverseDrops\"] if \"totalAdverseDrops\" in command_output.keys() else \"\"\n        if total_adverse_drop == 0:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Device TotalAdverseDrops counter is {total_adverse_drop}\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyAdverseDrops.test","title":"test","text":"
test() -> None\n

Run VerifyAdverseDrops validation

Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyAdverseDrops validation\"\"\"\n    command_output = self.instance_commands[0].json_output\n    total_adverse_drop = command_output[\"totalAdverseDrops\"] if \"totalAdverseDrops\" in command_output.keys() else \"\"\n    if total_adverse_drop == 0:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Device TotalAdverseDrops counter is {total_adverse_drop}\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyEnvironmentCooling","title":"VerifyEnvironmentCooling","text":"

Bases: AntaTest

Verifies the fans status is in the accepted states list.

Default accepted states list is [\u2018ok\u2019]

Source code in anta/tests/hardware.py
class VerifyEnvironmentCooling(AntaTest):\n\"\"\"\n    Verifies the fans status is in the accepted states list.\n\n    Default accepted states list is ['ok']\n    \"\"\"\n\n    name = \"VerifyEnvironmentCooling\"\n    description = \"Verifies the fans status is OK for fans\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show system environment cooling\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self, accepted_states: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyEnvironmentCooling validation\n\n        Args:\n            accepted_states: Accepted states list for fan status\n        \"\"\"\n        if accepted_states is None:\n            accepted_states = [\"ok\"]\n\n        command_output = self.instance_commands[0].json_output\n        self.result.is_success()\n        # First go through power supplies fans\n        for power_supply in command_output.get(\"powerSupplySlots\", []):\n            for fan in power_supply.get(\"fans\", []):\n                if (state := fan[\"status\"]) not in accepted_states:\n                    if self.result.result == \"success\":\n                        self.result.is_failure(f\"Some fans state are not in the accepted list: {accepted_states}.\")\n                    self.result.is_failure(f\"Fan {fan['label']} on PowerSupply {power_supply['label']} has state '{state}'.\")\n        # Then go through Fan Trays\n        for fan_tray in command_output.get(\"fanTraySlots\", []):\n            for fan in fan_tray.get(\"fans\", []):\n                if (state := fan[\"status\"]) not in accepted_states:\n                    if self.result.result == \"success\":\n                        self.result.is_failure(f\"Some fans state are not in the accepted list: {accepted_states}.\")\n                    self.result.is_failure(f\"Fan {fan['label']} on Fan Tray {fan_tray['label']} has state '{state}'.\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyEnvironmentCooling.test","title":"test","text":"
test(accepted_states: Optional[List[str]] = None) -> None\n

Run VerifyEnvironmentCooling validation

Parameters:

Name Type Description Default accepted_states Optional[List[str]]

Accepted states list for fan status

None Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self, accepted_states: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyEnvironmentCooling validation\n\n    Args:\n        accepted_states: Accepted states list for fan status\n    \"\"\"\n    if accepted_states is None:\n        accepted_states = [\"ok\"]\n\n    command_output = self.instance_commands[0].json_output\n    self.result.is_success()\n    # First go through power supplies fans\n    for power_supply in command_output.get(\"powerSupplySlots\", []):\n        for fan in power_supply.get(\"fans\", []):\n            if (state := fan[\"status\"]) not in accepted_states:\n                if self.result.result == \"success\":\n                    self.result.is_failure(f\"Some fans state are not in the accepted list: {accepted_states}.\")\n                self.result.is_failure(f\"Fan {fan['label']} on PowerSupply {power_supply['label']} has state '{state}'.\")\n    # Then go through Fan Trays\n    for fan_tray in command_output.get(\"fanTraySlots\", []):\n        for fan in fan_tray.get(\"fans\", []):\n            if (state := fan[\"status\"]) not in accepted_states:\n                if self.result.result == \"success\":\n                    self.result.is_failure(f\"Some fans state are not in the accepted list: {accepted_states}.\")\n                self.result.is_failure(f\"Fan {fan['label']} on Fan Tray {fan_tray['label']} has state '{state}'.\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyEnvironmentPower","title":"VerifyEnvironmentPower","text":"

Bases: AntaTest

Verifies the power supplies status is in the accepted states list

The default accepted states list is [\u2018ok\u2019]

Source code in anta/tests/hardware.py
class VerifyEnvironmentPower(AntaTest):\n\"\"\"\n    Verifies the power supplies status is in the accepted states list\n\n    The default accepted states list is ['ok']\n    \"\"\"\n\n    name = \"VerifyEnvironmentPower\"\n    description = \"Verifies the power supplies status is OK\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show system environment power\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self, accepted_states: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyEnvironmentPower validation\n\n        Args:\n            accepted_states: Accepted states list for power supplies\n        \"\"\"\n        if accepted_states is None:\n            accepted_states = [\"ok\"]\n        command_output = self.instance_commands[0].json_output\n        power_supplies = command_output[\"powerSupplies\"] if \"powerSupplies\" in command_output.keys() else \"{}\"\n        wrong_power_supplies = {\n            powersupply: {\"state\": value[\"state\"]} for powersupply, value in dict(power_supplies).items() if value[\"state\"] not in accepted_states\n        }\n        if not wrong_power_supplies:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following power supplies states are not in the accepted_states list {accepted_states}\")\n            self.result.messages.append(str(wrong_power_supplies))\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyEnvironmentPower.test","title":"test","text":"
test(accepted_states: Optional[List[str]] = None) -> None\n

Run VerifyEnvironmentPower validation

Parameters:

Name Type Description Default accepted_states Optional[List[str]]

Accepted states list for power supplies

None Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self, accepted_states: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyEnvironmentPower validation\n\n    Args:\n        accepted_states: Accepted states list for power supplies\n    \"\"\"\n    if accepted_states is None:\n        accepted_states = [\"ok\"]\n    command_output = self.instance_commands[0].json_output\n    power_supplies = command_output[\"powerSupplies\"] if \"powerSupplies\" in command_output.keys() else \"{}\"\n    wrong_power_supplies = {\n        powersupply: {\"state\": value[\"state\"]} for powersupply, value in dict(power_supplies).items() if value[\"state\"] not in accepted_states\n    }\n    if not wrong_power_supplies:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following power supplies states are not in the accepted_states list {accepted_states}\")\n        self.result.messages.append(str(wrong_power_supplies))\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyEnvironmentSystemCooling","title":"VerifyEnvironmentSystemCooling","text":"

Bases: AntaTest

Verifies the System Cooling is ok.

Source code in anta/tests/hardware.py
class VerifyEnvironmentSystemCooling(AntaTest):\n\"\"\"\n    Verifies the System Cooling is ok.\n    \"\"\"\n\n    name = \"VerifyEnvironmentSystemCooling\"\n    description = \"Verifies the fans status is OK for fans\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show system environment cooling\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyEnvironmentCooling validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n        sys_status = command_output[\"systemStatus\"] if \"systemStatus\" in command_output.keys() else \"\"\n\n        self.result.is_success()\n        if sys_status != \"coolingOk\":\n            self.result.is_failure(f\"Device System cooling is not OK: {sys_status}\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyEnvironmentSystemCooling.test","title":"test","text":"
test() -> None\n

Run VerifyEnvironmentCooling validation

Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyEnvironmentCooling validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n    sys_status = command_output[\"systemStatus\"] if \"systemStatus\" in command_output.keys() else \"\"\n\n    self.result.is_success()\n    if sys_status != \"coolingOk\":\n        self.result.is_failure(f\"Device System cooling is not OK: {sys_status}\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyTemperature","title":"VerifyTemperature","text":"

Bases: AntaTest

Verifies device temparture is currently OK (temperatureOK).

Source code in anta/tests/hardware.py
class VerifyTemperature(AntaTest):\n\"\"\"\n    Verifies device temparture is currently OK (temperatureOK).\n    \"\"\"\n\n    name = \"VerifyTemperature\"\n    description = \"Verifies device temparture is currently OK (temperatureOK)\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show system environment temperature\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyTemperature validation\"\"\"\n        command_output = self.instance_commands[0].json_output\n        temperature_status = command_output[\"systemStatus\"] if \"systemStatus\" in command_output.keys() else \"\"\n        if temperature_status == \"temperatureOk\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Device temperature is not OK, systemStatus: {temperature_status }\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyTemperature.test","title":"test","text":"
test() -> None\n

Run VerifyTemperature validation

Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyTemperature validation\"\"\"\n    command_output = self.instance_commands[0].json_output\n    temperature_status = command_output[\"systemStatus\"] if \"systemStatus\" in command_output.keys() else \"\"\n    if temperature_status == \"temperatureOk\":\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Device temperature is not OK, systemStatus: {temperature_status }\")\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyTransceiversManufacturers","title":"VerifyTransceiversManufacturers","text":"

Bases: AntaTest

Verifies Manufacturers of all Transceivers.

Source code in anta/tests/hardware.py
class VerifyTransceiversManufacturers(AntaTest):\n\"\"\"\n    Verifies Manufacturers of all Transceivers.\n    \"\"\"\n\n    name = \"VerifyTransceiversManufacturers\"\n    description = \"\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show inventory\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self, manufacturers: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyTransceiversManufacturers validation\n\n        Args:\n            manufacturers: List of allowed transceivers manufacturers.\n        \"\"\"\n        if not manufacturers:\n            self.result.is_skipped(f\"{self.__class__.name} was not run as no manufacturers were given\")\n        else:\n            command_output = self.instance_commands[0].json_output\n            wrong_manufacturers = {interface: value[\"mfgName\"] for interface, value in command_output[\"xcvrSlots\"].items() if value[\"mfgName\"] not in manufacturers}\n            if not wrong_manufacturers:\n                self.result.is_success()\n            else:\n                self.result.is_failure(\"The following interfaces have transceivers from unauthorized manufacturers\")\n                self.result.messages.append(str(wrong_manufacturers))\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyTransceiversManufacturers.test","title":"test","text":"
test(manufacturers: Optional[List[str]] = None) -> None\n

Run VerifyTransceiversManufacturers validation

Parameters:

Name Type Description Default manufacturers Optional[List[str]]

List of allowed transceivers manufacturers.

None Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self, manufacturers: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyTransceiversManufacturers validation\n\n    Args:\n        manufacturers: List of allowed transceivers manufacturers.\n    \"\"\"\n    if not manufacturers:\n        self.result.is_skipped(f\"{self.__class__.name} was not run as no manufacturers were given\")\n    else:\n        command_output = self.instance_commands[0].json_output\n        wrong_manufacturers = {interface: value[\"mfgName\"] for interface, value in command_output[\"xcvrSlots\"].items() if value[\"mfgName\"] not in manufacturers}\n        if not wrong_manufacturers:\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"The following interfaces have transceivers from unauthorized manufacturers\")\n            self.result.messages.append(str(wrong_manufacturers))\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyTransceiversTemperature","title":"VerifyTransceiversTemperature","text":"

Bases: AntaTest

Verifies Transceivers temperature is currently OK.

Source code in anta/tests/hardware.py
class VerifyTransceiversTemperature(AntaTest):\n\"\"\"\n    Verifies Transceivers temperature is currently OK.\n    \"\"\"\n\n    name = \"VerifyTransceiversTemperature\"\n    description = \"Verifies Transceivers temperature is currently OK\"\n    categories = [\"hardware\"]\n    commands = [AntaCommand(command=\"show system environment temperature transceiver\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyTransceiversTemperature validation\"\"\"\n        command_output = self.instance_commands[0].json_output\n        sensors = command_output[\"tempSensors\"] if \"tempSensors\" in command_output.keys() else \"\"\n        wrong_sensors = {\n            sensor[\"name\"]: {\n                \"hwStatus\": sensor[\"hwStatus\"],\n                \"alertCount\": sensor[\"alertCount\"],\n            }\n            for sensor in sensors\n            if sensor[\"hwStatus\"] != \"ok\" or sensor[\"alertCount\"] != 0\n        }\n        if not wrong_sensors:\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"The following sensors do not have the correct temperature or had alarms in the past:\")\n            self.result.messages.append(str(wrong_sensors))\n
"},{"location":"api/tests.hardware/#anta.tests.hardware.VerifyTransceiversTemperature.test","title":"test","text":"
test() -> None\n

Run VerifyTransceiversTemperature validation

Source code in anta/tests/hardware.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyTransceiversTemperature validation\"\"\"\n    command_output = self.instance_commands[0].json_output\n    sensors = command_output[\"tempSensors\"] if \"tempSensors\" in command_output.keys() else \"\"\n    wrong_sensors = {\n        sensor[\"name\"]: {\n            \"hwStatus\": sensor[\"hwStatus\"],\n            \"alertCount\": sensor[\"alertCount\"],\n        }\n        for sensor in sensors\n        if sensor[\"hwStatus\"] != \"ok\" or sensor[\"alertCount\"] != 0\n    }\n    if not wrong_sensors:\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"The following sensors do not have the correct temperature or had alarms in the past:\")\n        self.result.messages.append(str(wrong_sensors))\n
"},{"location":"api/tests.interfaces/","title":"Interfaces","text":""},{"location":"api/tests.interfaces/#anta-catalog-for-interfaces-tests","title":"ANTA catalog for interfaces tests","text":"

Test functions related to the device interfaces

"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyIPProxyARP","title":"VerifyIPProxyARP","text":"

Bases: AntaTest

Verifies if Proxy-ARP is enabled for the provided list of interface(s).

Expected Results
  • success: The test will pass if Proxy-ARP is enabled on the specified interface(s).
  • failure: The test will fail if Proxy-ARP is disabled on the specified interface(s).
  • error: The test will give an error if a list of interface(s) is not provided as template_params.
Source code in anta/tests/interfaces.py
class VerifyIPProxyARP(AntaTest):\n\"\"\"\n    Verifies if Proxy-ARP is enabled for the provided list of interface(s).\n\n    Expected Results:\n        * success: The test will pass if Proxy-ARP is enabled on the specified interface(s).\n        * failure: The test will fail if Proxy-ARP is disabled on the specified interface(s).\n        * error: The test will give an error if a list of interface(s) is not provided as template_params.\n\n    \"\"\"\n\n    name = \"VerifyIPProxyARP\"\n    description = \"Verifies if Proxy-ARP is enabled for the provided list of interface(s).\"\n    categories = [\"interfaces\"]\n    template = AntaTemplate(template=\"show ip interface {intf}\")\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyIPProxyARP validation.\n        \"\"\"\n\n        disabled_intf = []\n        for command in self.instance_commands:\n            if command.params and \"intf\" in command.params:\n                intf = command.params[\"intf\"]\n            if not command.json_output[\"interfaces\"][intf][\"proxyArp\"]:\n                disabled_intf.append(intf)\n\n        if disabled_intf:\n            self.result.is_failure(f\"The following interface(s) have Proxy-ARP disabled: {disabled_intf}\")\n\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyIPProxyARP.test","title":"test","text":"
test() -> None\n

Run VerifyIPProxyARP validation.

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyIPProxyARP validation.\n    \"\"\"\n\n    disabled_intf = []\n    for command in self.instance_commands:\n        if command.params and \"intf\" in command.params:\n            intf = command.params[\"intf\"]\n        if not command.json_output[\"interfaces\"][intf][\"proxyArp\"]:\n            disabled_intf.append(intf)\n\n    if disabled_intf:\n        self.result.is_failure(f\"The following interface(s) have Proxy-ARP disabled: {disabled_intf}\")\n\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyIllegalLACP","title":"VerifyIllegalLACP","text":"

Bases: AntaTest

Verifies there is no illegal LACP packets received.

Source code in anta/tests/interfaces.py
class VerifyIllegalLACP(AntaTest):\n\"\"\"\n    Verifies there is no illegal LACP packets received.\n    \"\"\"\n\n    name = \"VerifyIllegalLACP\"\n    description = \"Verifies there is no illegal LACP packets received.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show lacp counters all-ports\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyIllegalLACP validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        po_with_illegal_lacp: List[Dict[str, Dict[str, int]]] = []\n        for portchannel, portchannel_dict in command_output[\"portChannels\"].items():\n            po_with_illegal_lacp.extend(\n                {portchannel: interface} for interface, interface_dict in portchannel_dict[\"interfaces\"].items() if interface_dict[\"illegalRxCount\"] != 0\n            )\n\n        if not po_with_illegal_lacp:\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"The following port-channels have recieved illegal lacp packets on the \" f\"following ports: {po_with_illegal_lacp}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyIllegalLACP.test","title":"test","text":"
test() -> None\n

Run VerifyIllegalLACP validation

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyIllegalLACP validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    po_with_illegal_lacp: List[Dict[str, Dict[str, int]]] = []\n    for portchannel, portchannel_dict in command_output[\"portChannels\"].items():\n        po_with_illegal_lacp.extend(\n            {portchannel: interface} for interface, interface_dict in portchannel_dict[\"interfaces\"].items() if interface_dict[\"illegalRxCount\"] != 0\n        )\n\n    if not po_with_illegal_lacp:\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"The following port-channels have recieved illegal lacp packets on the \" f\"following ports: {po_with_illegal_lacp}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceDiscards","title":"VerifyInterfaceDiscards","text":"

Bases: AntaTest

Verifies interfaces packet discard counters are equal to zero.

Source code in anta/tests/interfaces.py
class VerifyInterfaceDiscards(AntaTest):\n\"\"\"\n    Verifies interfaces packet discard counters are equal to zero.\n    \"\"\"\n\n    name = \"VerifyInterfaceDiscards\"\n    description = \"Verifies interfaces packet discard counters are equal to zero.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show interfaces counters discards\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyInterfaceDiscards validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        wrong_interfaces: List[Dict[str, Dict[str, int]]] = []\n\n        for interface, outer_v in command_output[\"interfaces\"].items():\n            wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)\n        if not wrong_interfaces:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following interfaces have non 0 discard counter(s): {wrong_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceDiscards.test","title":"test","text":"
test() -> None\n

Run VerifyInterfaceDiscards validation

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyInterfaceDiscards validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    wrong_interfaces: List[Dict[str, Dict[str, int]]] = []\n\n    for interface, outer_v in command_output[\"interfaces\"].items():\n        wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)\n    if not wrong_interfaces:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following interfaces have non 0 discard counter(s): {wrong_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceErrDisabled","title":"VerifyInterfaceErrDisabled","text":"

Bases: AntaTest

Verifies there is no interface in error disable state.

Source code in anta/tests/interfaces.py
class VerifyInterfaceErrDisabled(AntaTest):\n\"\"\"\n    Verifies there is no interface in error disable state.\n    \"\"\"\n\n    name = \"VerifyInterfaceErrDisabled\"\n    description = \"Verifies there is no interface in error disable state.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show interfaces status\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyInterfaceErrDisabled validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        errdisabled_interfaces = [interface for interface, value in command_output[\"interfaceStatuses\"].items() if value[\"linkStatus\"] == \"errdisabled\"]\n\n        if errdisabled_interfaces:\n            self.result.is_failure(f\"The following interfaces are in error disabled state: {errdisabled_interfaces}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceErrDisabled.test","title":"test","text":"
test() -> None\n

Run VerifyInterfaceErrDisabled validation

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyInterfaceErrDisabled validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    errdisabled_interfaces = [interface for interface, value in command_output[\"interfaceStatuses\"].items() if value[\"linkStatus\"] == \"errdisabled\"]\n\n    if errdisabled_interfaces:\n        self.result.is_failure(f\"The following interfaces are in error disabled state: {errdisabled_interfaces}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceErrors","title":"VerifyInterfaceErrors","text":"

Bases: AntaTest

Verifies interfaces error counters are equal to zero.

Source code in anta/tests/interfaces.py
class VerifyInterfaceErrors(AntaTest):\n\"\"\"\n    Verifies interfaces error counters are equal to zero.\n    \"\"\"\n\n    name = \"VerifyInterfaceErrors\"\n    description = \"Verifies interfaces error counters are equal to zero.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show interfaces counters errors\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyInterfaceUtilization validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        wrong_interfaces: List[Dict[str, Dict[str, int]]] = []\n        for interface, outer_v in command_output[\"interfaceErrorCounters\"].items():\n            wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)\n        if not wrong_interfaces:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following interfaces have non 0 error counter(s): {wrong_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceErrors.test","title":"test","text":"
test() -> None\n

Run VerifyInterfaceUtilization validation

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyInterfaceUtilization validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    wrong_interfaces: List[Dict[str, Dict[str, int]]] = []\n    for interface, outer_v in command_output[\"interfaceErrorCounters\"].items():\n        wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)\n    if not wrong_interfaces:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following interfaces have non 0 error counter(s): {wrong_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceUtilization","title":"VerifyInterfaceUtilization","text":"

Bases: AntaTest

Verifies interfaces utilization is below 75%.

Source code in anta/tests/interfaces.py
class VerifyInterfaceUtilization(AntaTest):\n\"\"\"\n    Verifies interfaces utilization is below 75%.\n    \"\"\"\n\n    name = \"VerifyInterfaceUtilization\"\n    description = \"Verifies interfaces utilization is below 75%.\"\n    categories = [\"interfaces\"]\n    # TODO - move from text to json if possible\n    commands = [AntaCommand(command=\"show interfaces counters rates\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyInterfaceUtilization validation\"\"\"\n\n        command_output = self.instance_commands[0].text_output\n\n        wrong_interfaces = {}\n        for line in command_output.split(\"\\n\")[1:]:\n            if len(line) > 0:\n                if line.split()[-5] == \"-\" or line.split()[-2] == \"-\":\n                    pass\n                elif float(line.split()[-5].replace(\"%\", \"\")) > 75.0:\n                    wrong_interfaces[line.split()[0]] = line.split()[-5]\n                elif float(line.split()[-2].replace(\"%\", \"\")) > 75.0:\n                    wrong_interfaces[line.split()[0]] = line.split()[-2]\n\n        if not wrong_interfaces:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following interfaces have a usage > 75%: {wrong_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfaceUtilization.test","title":"test","text":"
test() -> None\n

Run VerifyInterfaceUtilization validation

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyInterfaceUtilization validation\"\"\"\n\n    command_output = self.instance_commands[0].text_output\n\n    wrong_interfaces = {}\n    for line in command_output.split(\"\\n\")[1:]:\n        if len(line) > 0:\n            if line.split()[-5] == \"-\" or line.split()[-2] == \"-\":\n                pass\n            elif float(line.split()[-5].replace(\"%\", \"\")) > 75.0:\n                wrong_interfaces[line.split()[0]] = line.split()[-5]\n            elif float(line.split()[-2].replace(\"%\", \"\")) > 75.0:\n                wrong_interfaces[line.split()[0]] = line.split()[-2]\n\n    if not wrong_interfaces:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following interfaces have a usage > 75%: {wrong_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfacesStatus","title":"VerifyInterfacesStatus","text":"

Bases: AntaTest

Verifies the number of Ethernet interfaces up/up on the device is higher or equal than a value.

Source code in anta/tests/interfaces.py
class VerifyInterfacesStatus(AntaTest):\n\"\"\"\n    Verifies the number of Ethernet interfaces up/up on the device is higher or equal than a value.\n    \"\"\"\n\n    name = \"VerifyInterfacesStatus\"\n    description = \"Verifies the number of Ethernet interfaces up/up on the device is higher or equal than a value.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show interfaces description\")]\n\n    @AntaTest.anta_test\n    def test(self, minimum: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyInterfacesStatus validation\n\n        Args:\n            minimum: Expected minimum number of Ethernet interfaces up/up.\n        \"\"\"\n\n        if minimum is None or minimum < 0:\n            self.result.is_skipped(f\"VerifyInterfacesStatus was not run as an invalid minimum value was given {minimum}.\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        count_up_up = 0\n        other_ethernet_interfaces = []\n\n        for interface in command_output[\"interfaceDescriptions\"]:\n            interface_dict = command_output[\"interfaceDescriptions\"][interface]\n            if \"Ethernet\" in interface:\n                if re.match(r\"connected|up\", interface_dict[\"lineProtocolStatus\"]) and re.match(r\"connected|up\", interface_dict[\"interfaceStatus\"]):\n                    count_up_up += 1\n                else:\n                    other_ethernet_interfaces.append(interface)\n\n        if count_up_up >= minimum:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Only {count_up_up}, less than {minimum} Ethernet interfaces are UP/UP\")\n            self.result.messages.append(f\"The following Ethernet interfaces are not UP/UP: {other_ethernet_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyInterfacesStatus.test","title":"test","text":"
test(minimum: Optional[int] = None) -> None\n

Run VerifyInterfacesStatus validation

Parameters:

Name Type Description Default minimum Optional[int]

Expected minimum number of Ethernet interfaces up/up.

None Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self, minimum: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyInterfacesStatus validation\n\n    Args:\n        minimum: Expected minimum number of Ethernet interfaces up/up.\n    \"\"\"\n\n    if minimum is None or minimum < 0:\n        self.result.is_skipped(f\"VerifyInterfacesStatus was not run as an invalid minimum value was given {minimum}.\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    count_up_up = 0\n    other_ethernet_interfaces = []\n\n    for interface in command_output[\"interfaceDescriptions\"]:\n        interface_dict = command_output[\"interfaceDescriptions\"][interface]\n        if \"Ethernet\" in interface:\n            if re.match(r\"connected|up\", interface_dict[\"lineProtocolStatus\"]) and re.match(r\"connected|up\", interface_dict[\"interfaceStatus\"]):\n                count_up_up += 1\n            else:\n                other_ethernet_interfaces.append(interface)\n\n    if count_up_up >= minimum:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Only {count_up_up}, less than {minimum} Ethernet interfaces are UP/UP\")\n        self.result.messages.append(f\"The following Ethernet interfaces are not UP/UP: {other_ethernet_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyL3MTU","title":"VerifyL3MTU","text":"

Bases: AntaTest

Verifies the global layer 3 Maximum Transfer Unit (MTU) for all layer 3 interfaces.

Expected Results
  • success: The test will pass if all layer 3 interfaces have the proper MTU configured.
  • failure: The test will fail if one or many layer 3 interfaces have the wrong MTU configured.
  • skipped: The test will be skipped if the MTU value is not provided.
Limitations
  • Only Ethernet, Port-Channel, Vlan interfaces are supported.
  • Other interface types, like Management, Loopback, Vxlan, Tunnel are currently not supported.

https://www.arista.com/en/support/toi/eos-4-23-1f/14388-global-knob-to-set-mtu-for-all-layer-3-interfaces

Source code in anta/tests/interfaces.py
class VerifyL3MTU(AntaTest):\n\"\"\"\n    Verifies the global layer 3 Maximum Transfer Unit (MTU) for all layer 3 interfaces.\n\n    Expected Results:\n        * success: The test will pass if all layer 3 interfaces have the proper MTU configured.\n        * failure: The test will fail if one or many layer 3 interfaces have the wrong MTU configured.\n        * skipped: The test will be skipped if the MTU value is not provided.\n\n    Limitations:\n        * Only Ethernet, Port-Channel, Vlan interfaces are supported.\n        * Other interface types, like Management, Loopback, Vxlan, Tunnel are currently not supported.\n\n    https://www.arista.com/en/support/toi/eos-4-23-1f/14388-global-knob-to-set-mtu-for-all-layer-3-interfaces\n\n    \"\"\"\n\n    name = \"VerifyL3MTU\"\n    description = \"Verifies the global layer 3 Maximum Transfer Unit (MTU) for all layer 3 interfaces.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show interfaces\")]\n\n    NOT_SUPPORTED_INTERFACES: List[str] = [\"Management\", \"Loopback\", \"Vxlan\", \"Tunnel\"]\n\n    @AntaTest.anta_test\n    def test(self, mtu: int = 1500) -> None:\n\"\"\"\n        Run VerifyL3MTU validation\n\n        Args:\n          mtu: Layer 3 MTU to verify. Defaults to 1500.\n\n        \"\"\"\n\n        if not mtu:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because mtu was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        wrong_l3mtu_intf = []\n\n        for interface, values in command_output[\"interfaces\"].items():\n            if re.sub(r\"\\d+$\", \"\", interface) not in self.NOT_SUPPORTED_INTERFACES:\n                if values[\"forwardingModel\"] == \"routed\" and values[\"mtu\"] != mtu:\n                    wrong_l3mtu_intf.append(interface)\n\n        if not wrong_l3mtu_intf:\n            self.result.is_success()\n\n        else:\n            self.result.is_failure(f\"The following interface(s) have the wrong MTU configured: {wrong_l3mtu_intf}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyL3MTU.test","title":"test","text":"
test(mtu: int = 1500) -> None\n

Run VerifyL3MTU validation

Parameters:

Name Type Description Default mtu int

Layer 3 MTU to verify. Defaults to 1500.

1500 Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self, mtu: int = 1500) -> None:\n\"\"\"\n    Run VerifyL3MTU validation\n\n    Args:\n      mtu: Layer 3 MTU to verify. Defaults to 1500.\n\n    \"\"\"\n\n    if not mtu:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because mtu was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    wrong_l3mtu_intf = []\n\n    for interface, values in command_output[\"interfaces\"].items():\n        if re.sub(r\"\\d+$\", \"\", interface) not in self.NOT_SUPPORTED_INTERFACES:\n            if values[\"forwardingModel\"] == \"routed\" and values[\"mtu\"] != mtu:\n                wrong_l3mtu_intf.append(interface)\n\n    if not wrong_l3mtu_intf:\n        self.result.is_success()\n\n    else:\n        self.result.is_failure(f\"The following interface(s) have the wrong MTU configured: {wrong_l3mtu_intf}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyLoopbackCount","title":"VerifyLoopbackCount","text":"

Bases: AntaTest

Verifies the number of loopback interfaces on the device is the one we expect and if none of the loopback is down.

Source code in anta/tests/interfaces.py
class VerifyLoopbackCount(AntaTest):\n\"\"\"\n    Verifies the number of loopback interfaces on the device is the one we expect and if none of the loopback is down.\n    \"\"\"\n\n    name = \"VerifyLoopbackCount\"\n    description = \"Verifies the number of loopback interfaces on the device is the one we expect and if none of the loopback is down.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show ip interface brief\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyLoopbackCount validation\n\n        Args:\n            number: Number of loopback interfaces expected to be present.\n        \"\"\"\n\n        if number is None:\n            self.result.is_skipped(\"VerifyLoopbackCount was not run as no number value was given.\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        loopback_count = 0\n        down_loopback_interfaces = []\n\n        for interface in command_output[\"interfaces\"]:\n            interface_dict = command_output[\"interfaces\"][interface]\n            if \"Loopback\" in interface:\n                loopback_count += 1\n                if not (interface_dict[\"lineProtocolStatus\"] == \"up\" and interface_dict[\"interfaceStatus\"] == \"connected\"):\n                    down_loopback_interfaces.append(interface)\n\n        if loopback_count == number and len(down_loopback_interfaces) == 0:\n            self.result.is_success()\n        else:\n            self.result.is_failure()\n            if loopback_count != number:\n                self.result.is_failure(f\"Found {loopback_count} Loopbacks when expecting {number}\")\n            elif len(down_loopback_interfaces) != 0:\n                self.result.is_failure(f\"The following Loopbacks are not up: {down_loopback_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyLoopbackCount.test","title":"test","text":"
test(number: Optional[int] = None) -> None\n

Run VerifyLoopbackCount validation

Parameters:

Name Type Description Default number Optional[int]

Number of loopback interfaces expected to be present.

None Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyLoopbackCount validation\n\n    Args:\n        number: Number of loopback interfaces expected to be present.\n    \"\"\"\n\n    if number is None:\n        self.result.is_skipped(\"VerifyLoopbackCount was not run as no number value was given.\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    loopback_count = 0\n    down_loopback_interfaces = []\n\n    for interface in command_output[\"interfaces\"]:\n        interface_dict = command_output[\"interfaces\"][interface]\n        if \"Loopback\" in interface:\n            loopback_count += 1\n            if not (interface_dict[\"lineProtocolStatus\"] == \"up\" and interface_dict[\"interfaceStatus\"] == \"connected\"):\n                down_loopback_interfaces.append(interface)\n\n    if loopback_count == number and len(down_loopback_interfaces) == 0:\n        self.result.is_success()\n    else:\n        self.result.is_failure()\n        if loopback_count != number:\n            self.result.is_failure(f\"Found {loopback_count} Loopbacks when expecting {number}\")\n        elif len(down_loopback_interfaces) != 0:\n            self.result.is_failure(f\"The following Loopbacks are not up: {down_loopback_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyPortChannels","title":"VerifyPortChannels","text":"

Bases: AntaTest

Verifies there is no inactive port in port channels.

Source code in anta/tests/interfaces.py
class VerifyPortChannels(AntaTest):\n\"\"\"\n    Verifies there is no inactive port in port channels.\n    \"\"\"\n\n    name = \"VerifyPortChannels\"\n    description = \"Verifies there is no inactive port in port channels.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show port-channel\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyPortChannels validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        po_with_invactive_ports: List[Dict[str, str]] = []\n        for portchannel, portchannel_dict in command_output[\"portChannels\"].items():\n            if len(portchannel_dict[\"inactivePorts\"]) != 0:\n                po_with_invactive_ports.extend({portchannel: portchannel_dict[\"inactivePorts\"]})\n\n        if not po_with_invactive_ports:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following port-channels have inactive port(s): {po_with_invactive_ports}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyPortChannels.test","title":"test","text":"
test() -> None\n

Run VerifyPortChannels validation

Source code in anta/tests/interfaces.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyPortChannels validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    po_with_invactive_ports: List[Dict[str, str]] = []\n    for portchannel, portchannel_dict in command_output[\"portChannels\"].items():\n        if len(portchannel_dict[\"inactivePorts\"]) != 0:\n            po_with_invactive_ports.extend({portchannel: portchannel_dict[\"inactivePorts\"]})\n\n    if not po_with_invactive_ports:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following port-channels have inactive port(s): {po_with_invactive_ports}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifySVI","title":"VerifySVI","text":"

Bases: AntaTest

Verifies there is no interface vlan down.

Source code in anta/tests/interfaces.py
class VerifySVI(AntaTest):\n\"\"\"\n    Verifies there is no interface vlan down.\n    \"\"\"\n\n    name = \"VerifySVI\"\n    description = \"Verifies there is no interface vlan down.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show ip interface brief\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifySVI validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        down_svis = []\n\n        for interface in command_output[\"interfaces\"]:\n            interface_dict = command_output[\"interfaces\"][interface]\n            if \"Vlan\" in interface:\n                if not (interface_dict[\"lineProtocolStatus\"] == \"up\" and interface_dict[\"interfaceStatus\"] == \"connected\"):\n                    down_svis.append(interface)\n\n        if len(down_svis) == 0:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following SVIs are not up: {down_svis}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifySVI.test","title":"test","text":"
test() -> None\n

Run VerifySVI validation

Source code in anta/tests/interfaces.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifySVI validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    down_svis = []\n\n    for interface in command_output[\"interfaces\"]:\n        interface_dict = command_output[\"interfaces\"][interface]\n        if \"Vlan\" in interface:\n            if not (interface_dict[\"lineProtocolStatus\"] == \"up\" and interface_dict[\"interfaceStatus\"] == \"connected\"):\n                down_svis.append(interface)\n\n    if len(down_svis) == 0:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following SVIs are not up: {down_svis}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyStormControlDrops","title":"VerifyStormControlDrops","text":"

Bases: AntaTest

Verifies the device did not drop packets due its to storm-control configuration.

Source code in anta/tests/interfaces.py
class VerifyStormControlDrops(AntaTest):\n\"\"\"\n    Verifies the device did not drop packets due its to storm-control configuration.\n    \"\"\"\n\n    name = \"VerifyStormControlDrops\"\n    description = \"Verifies the device did not drop packets due its to storm-control configuration.\"\n    categories = [\"interfaces\"]\n    commands = [AntaCommand(command=\"show storm-control\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyStormControlDrops validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        storm_controlled_interfaces: Dict[str, Dict[str, Any]] = {}\n        for interface, interface_dict in command_output[\"interfaces\"].items():\n            for traffic_type, traffic_type_dict in interface_dict[\"trafficTypes\"].items():\n                if \"drop\" in traffic_type_dict and traffic_type_dict[\"drop\"] != 0:\n                    storm_controlled_interface_dict = storm_controlled_interfaces.setdefault(interface, {})\n                    storm_controlled_interface_dict.update({traffic_type: traffic_type_dict[\"drop\"]})\n\n        if not storm_controlled_interfaces:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following interfaces have none 0 storm-control drop counters {storm_controlled_interfaces}\")\n
"},{"location":"api/tests.interfaces/#anta.tests.interfaces.VerifyStormControlDrops.test","title":"test","text":"
test() -> None\n

Run VerifyStormControlDrops validation

Source code in anta/tests/interfaces.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyStormControlDrops validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    storm_controlled_interfaces: Dict[str, Dict[str, Any]] = {}\n    for interface, interface_dict in command_output[\"interfaces\"].items():\n        for traffic_type, traffic_type_dict in interface_dict[\"trafficTypes\"].items():\n            if \"drop\" in traffic_type_dict and traffic_type_dict[\"drop\"] != 0:\n                storm_controlled_interface_dict = storm_controlled_interfaces.setdefault(interface, {})\n                storm_controlled_interface_dict.update({traffic_type: traffic_type_dict[\"drop\"]})\n\n    if not storm_controlled_interfaces:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following interfaces have none 0 storm-control drop counters {storm_controlled_interfaces}\")\n
"},{"location":"api/tests.logging/","title":"Logging","text":""},{"location":"api/tests.logging/#anta-catalog-for-logging-tests","title":"ANTA catalog for logging tests","text":"

Test functions related to the EOS various logging settings

NOTE: \u2018show logging\u2019 does not support json output yet

"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingAccounting","title":"VerifyLoggingAccounting","text":"

Bases: AntaTest

Verifies if AAA accounting logs are generated.

Expected Results
  • success: The test will pass if AAA accounting logs are generated.
  • failure: The test will fail if AAA accounting logs are NOT generated.
Source code in anta/tests/logging.py
class VerifyLoggingAccounting(AntaTest):\n\"\"\"\n    Verifies if AAA accounting logs are generated.\n\n    Expected Results:\n        * success: The test will pass if AAA accounting logs are generated.\n        * failure: The test will fail if AAA accounting logs are NOT generated.\n    \"\"\"\n\n    name = \"VerifyLoggingAccounting\"\n    description = \"Verifies if AAA accounting logs are generated.\"\n    categories = [\"logging\"]\n    commands = [AntaCommand(command=\"show aaa accounting logs | tail\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyLoggingAccountingvalidation.\n        \"\"\"\n        pattern = r\"cmd=show aaa accounting logs\"\n        output = self.instance_commands[0].text_output\n\n        if re.search(pattern, output):\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"AAA accounting logs are not generated\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingAccounting.test","title":"test","text":"
test() -> None\n

Run VerifyLoggingAccountingvalidation.

Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyLoggingAccountingvalidation.\n    \"\"\"\n    pattern = r\"cmd=show aaa accounting logs\"\n    output = self.instance_commands[0].text_output\n\n    if re.search(pattern, output):\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"AAA accounting logs are not generated\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingHostname","title":"VerifyLoggingHostname","text":"

Bases: AntaTest

Verifies if logs are generated with the device FQDN.

Expected Results
  • success: The test will pass if logs are generated with the device FQDN.
  • failure: The test will fail if logs are NOT generated with the device FQDN.
Source code in anta/tests/logging.py
class VerifyLoggingHostname(AntaTest):\n\"\"\"\n    Verifies if logs are generated with the device FQDN.\n\n    Expected Results:\n        * success: The test will pass if logs are generated with the device FQDN.\n        * failure: The test will fail if logs are NOT generated with the device FQDN.\n    \"\"\"\n\n    name = \"VerifyLoggingHostname\"\n    description = \"Verifies if logs are generated with the device FQDN.\"\n    categories = [\"logging\"]\n    commands = [\n        AntaCommand(command=\"show hostname\"),\n        AntaCommand(command=\"send log level informational message ANTA VerifyLoggingHostname validation\"),\n        AntaCommand(command=\"show logging informational last 30 seconds | grep ANTA\", ofmt=\"text\"),\n    ]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyLoggingHostname validation.\n        \"\"\"\n        output_hostname = self.instance_commands[0].json_output\n        output_logging = self.instance_commands[2].text_output\n        fqdn = output_hostname[\"fqdn\"]\n        lines = output_logging.strip().split(\"\\n\")[::-1]\n\n        log_pattern = r\"ANTA VerifyLoggingHostname validation\"\n\n        last_line_with_pattern = \"\"\n        for line in lines:\n            if re.search(log_pattern, line):\n                last_line_with_pattern = line\n                break\n\n        if fqdn in last_line_with_pattern:\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"Logs are not generated with the device FQDN\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingHostname.test","title":"test","text":"
test() -> None\n

Run VerifyLoggingHostname validation.

Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyLoggingHostname validation.\n    \"\"\"\n    output_hostname = self.instance_commands[0].json_output\n    output_logging = self.instance_commands[2].text_output\n    fqdn = output_hostname[\"fqdn\"]\n    lines = output_logging.strip().split(\"\\n\")[::-1]\n\n    log_pattern = r\"ANTA VerifyLoggingHostname validation\"\n\n    last_line_with_pattern = \"\"\n    for line in lines:\n        if re.search(log_pattern, line):\n            last_line_with_pattern = line\n            break\n\n    if fqdn in last_line_with_pattern:\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"Logs are not generated with the device FQDN\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingHosts","title":"VerifyLoggingHosts","text":"

Bases: AntaTest

Verifies logging hosts (syslog servers) for a specified VRF.

Expected Results
  • success: The test will pass if the provided syslog servers are configured in the specified VRF.
  • failure: The test will fail if the provided syslog servers are NOT configured in the specified VRF.
  • skipped: The test will be skipped if syslog servers or VRF are not provided.
Source code in anta/tests/logging.py
class VerifyLoggingHosts(AntaTest):\n\"\"\"\n    Verifies logging hosts (syslog servers) for a specified VRF.\n\n    Expected Results:\n        * success: The test will pass if the provided syslog servers are configured in the specified VRF.\n        * failure: The test will fail if the provided syslog servers are NOT configured in the specified VRF.\n        * skipped: The test will be skipped if syslog servers or VRF are not provided.\n    \"\"\"\n\n    name = \"VerifyLoggingHosts\"\n    description = \"Verifies logging hosts (syslog servers) for a specified VRF.\"\n    categories = [\"logging\"]\n    commands = [AntaCommand(command=\"show logging\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self, hosts: Optional[List[str]] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifyLoggingHosts validation.\n\n        Args:\n            hosts: List of hosts (syslog servers) IP addresses.\n            vrf: The name of the VRF to transport log messages. Defaults to 'default'.\n        \"\"\"\n        if not hosts or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because hosts or vrf were not supplied\")\n            return\n\n        output = self.instance_commands[0].text_output\n\n        not_configured = []\n\n        for host in hosts:\n            pattern = rf\"Logging to '{host}'.*VRF {vrf}\"\n            if not re.search(pattern, _get_logging_states(self.logger, output)):\n                not_configured.append(host)\n\n        if not not_configured:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Syslog servers {not_configured} are not configured in VRF {vrf}\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingHosts.test","title":"test","text":"
test(\n    hosts: Optional[List[str]] = None, vrf: str = \"default\"\n) -> None\n

Run VerifyLoggingHosts validation.

Parameters:

Name Type Description Default hosts Optional[List[str]]

List of hosts (syslog servers) IP addresses.

None vrf str

The name of the VRF to transport log messages. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self, hosts: Optional[List[str]] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifyLoggingHosts validation.\n\n    Args:\n        hosts: List of hosts (syslog servers) IP addresses.\n        vrf: The name of the VRF to transport log messages. Defaults to 'default'.\n    \"\"\"\n    if not hosts or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because hosts or vrf were not supplied\")\n        return\n\n    output = self.instance_commands[0].text_output\n\n    not_configured = []\n\n    for host in hosts:\n        pattern = rf\"Logging to '{host}'.*VRF {vrf}\"\n        if not re.search(pattern, _get_logging_states(self.logger, output)):\n            not_configured.append(host)\n\n    if not not_configured:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Syslog servers {not_configured} are not configured in VRF {vrf}\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingLogsGeneration","title":"VerifyLoggingLogsGeneration","text":"

Bases: AntaTest

Verifies if logs are generated.

Expected Results
  • success: The test will pass if logs are generated.
  • failure: The test will fail if logs are NOT generated.
Source code in anta/tests/logging.py
class VerifyLoggingLogsGeneration(AntaTest):\n\"\"\"\n    Verifies if logs are generated.\n\n    Expected Results:\n        * success: The test will pass if logs are generated.\n        * failure: The test will fail if logs are NOT generated.\n    \"\"\"\n\n    name = \"VerifyLoggingLogsGeneration\"\n    description = \"Verifies if logs are generated.\"\n    categories = [\"logging\"]\n    commands = [\n        AntaCommand(command=\"send log level informational message ANTA VerifyLoggingLogsGeneration validation\"),\n        AntaCommand(command=\"show logging informational last 30 seconds | grep ANTA\", ofmt=\"text\"),\n    ]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyLoggingLogs validation.\n        \"\"\"\n        log_pattern = r\"ANTA VerifyLoggingLogsGeneration validation\"\n\n        output = self.instance_commands[1].text_output\n        lines = output.strip().split(\"\\n\")[::-1]\n\n        for line in lines:\n            if re.search(log_pattern, line):\n                self.result.is_success()\n                return\n\n        self.result.is_failure(\"Logs are not generated\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingLogsGeneration.test","title":"test","text":"
test() -> None\n

Run VerifyLoggingLogs validation.

Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyLoggingLogs validation.\n    \"\"\"\n    log_pattern = r\"ANTA VerifyLoggingLogsGeneration validation\"\n\n    output = self.instance_commands[1].text_output\n    lines = output.strip().split(\"\\n\")[::-1]\n\n    for line in lines:\n        if re.search(log_pattern, line):\n            self.result.is_success()\n            return\n\n    self.result.is_failure(\"Logs are not generated\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingPersistent","title":"VerifyLoggingPersistent","text":"

Bases: AntaTest

Verifies if logging persistent is enabled and logs are saved in flash.

Expected Results
  • success: The test will pass if logging persistent is enabled and logs are in flash.
  • failure: The test will fail if logging persistent is disabled or no logs are saved in flash.
Source code in anta/tests/logging.py
class VerifyLoggingPersistent(AntaTest):\n\"\"\"\n    Verifies if logging persistent is enabled and logs are saved in flash.\n\n    Expected Results:\n        * success: The test will pass if logging persistent is enabled and logs are in flash.\n        * failure: The test will fail if logging persistent is disabled or no logs are saved in flash.\n    \"\"\"\n\n    name = \"VerifyLoggingPersistent\"\n    description = \"Verifies if logging persistent is enabled and logs are saved in flash.\"\n    categories = [\"logging\"]\n    commands = [\n        AntaCommand(command=\"show logging\", ofmt=\"text\"),\n        AntaCommand(command=\"dir flash:/persist/messages\", ofmt=\"text\"),\n    ]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyLoggingPersistent validation.\n        \"\"\"\n        self.result.is_success()\n\n        log_output = self.instance_commands[0].text_output\n        dir_flash_output = self.instance_commands[1].text_output\n\n        if \"Persistent logging: disabled\" in _get_logging_states(self.logger, log_output):\n            self.result.is_failure(\"Persistent logging is disabled\")\n            return\n\n        pattern = r\"-rw-\\s+(\\d+)\"\n        persist_logs = re.search(pattern, dir_flash_output)\n\n        if not persist_logs or int(persist_logs.group(1)) == 0:\n            self.result.is_failure(\"No persistent logs are saved in flash\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingPersistent.test","title":"test","text":"
test() -> None\n

Run VerifyLoggingPersistent validation.

Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyLoggingPersistent validation.\n    \"\"\"\n    self.result.is_success()\n\n    log_output = self.instance_commands[0].text_output\n    dir_flash_output = self.instance_commands[1].text_output\n\n    if \"Persistent logging: disabled\" in _get_logging_states(self.logger, log_output):\n        self.result.is_failure(\"Persistent logging is disabled\")\n        return\n\n    pattern = r\"-rw-\\s+(\\d+)\"\n    persist_logs = re.search(pattern, dir_flash_output)\n\n    if not persist_logs or int(persist_logs.group(1)) == 0:\n        self.result.is_failure(\"No persistent logs are saved in flash\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingSourceIntf","title":"VerifyLoggingSourceIntf","text":"

Bases: AntaTest

Verifies logging source-interface for a specified VRF.

Expected Results
  • success: The test will pass if the provided logging source-interface is configured in the specified VRF.
  • failure: The test will fail if the provided logging source-interface is NOT configured in the specified VRF.
  • skipped: The test will be skipped if source-interface or VRF is not provided.
Source code in anta/tests/logging.py
class VerifyLoggingSourceIntf(AntaTest):\n\"\"\"\n    Verifies logging source-interface for a specified VRF.\n\n    Expected Results:\n        * success: The test will pass if the provided logging source-interface is configured in the specified VRF.\n        * failure: The test will fail if the provided logging source-interface is NOT configured in the specified VRF.\n        * skipped: The test will be skipped if source-interface or VRF is not provided.\n    \"\"\"\n\n    name = \"VerifyLoggingSourceInt\"\n    description = \"Verifies logging source-interface for a specified VRF.\"\n    categories = [\"logging\"]\n    commands = [AntaCommand(command=\"show logging\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self, intf: Optional[str] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifyLoggingSrcDst validation.\n\n        Args:\n            intf: Source-interface to use as source IP of log messages.\n            vrf: The name of the VRF to transport log messages. Defaults to 'default'.\n        \"\"\"\n        if not intf or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because intf or vrf was not supplied\")\n            return\n\n        output = self.instance_commands[0].text_output\n\n        pattern = rf\"Logging source-interface '{intf}'.*VRF {vrf}\"\n\n        if re.search(pattern, _get_logging_states(self.logger, output)):\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Source-interface '{intf}' is not configured in VRF {vrf}\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingSourceIntf.test","title":"test","text":"
test(\n    intf: Optional[str] = None, vrf: str = \"default\"\n) -> None\n

Run VerifyLoggingSrcDst validation.

Parameters:

Name Type Description Default intf Optional[str]

Source-interface to use as source IP of log messages.

None vrf str

The name of the VRF to transport log messages. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self, intf: Optional[str] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifyLoggingSrcDst validation.\n\n    Args:\n        intf: Source-interface to use as source IP of log messages.\n        vrf: The name of the VRF to transport log messages. Defaults to 'default'.\n    \"\"\"\n    if not intf or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because intf or vrf was not supplied\")\n        return\n\n    output = self.instance_commands[0].text_output\n\n    pattern = rf\"Logging source-interface '{intf}'.*VRF {vrf}\"\n\n    if re.search(pattern, _get_logging_states(self.logger, output)):\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Source-interface '{intf}' is not configured in VRF {vrf}\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingTimestamp","title":"VerifyLoggingTimestamp","text":"

Bases: AntaTest

Verifies if logs are generated with the approprate timestamp.

Expected Results
  • success: The test will pass if logs are generated with the appropriated timestamp.
  • failure: The test will fail if logs are NOT generated with the appropriated timestamp.
Source code in anta/tests/logging.py
class VerifyLoggingTimestamp(AntaTest):\n\"\"\"\n    Verifies if logs are generated with the approprate timestamp.\n\n    Expected Results:\n        * success: The test will pass if logs are generated with the appropriated timestamp.\n        * failure: The test will fail if logs are NOT generated with the appropriated timestamp.\n    \"\"\"\n\n    name = \"VerifyLoggingTimestamp\"\n    description = \"Verifies if logs are generated with the appropriate timestamp.\"\n    categories = [\"logging\"]\n    commands = [\n        AntaCommand(command=\"send log level informational message ANTA VerifyLoggingTimestamp validation\"),\n        AntaCommand(command=\"show logging informational last 30 seconds | grep ANTA\", ofmt=\"text\"),\n    ]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyLoggingTimestamp validation.\n        \"\"\"\n        log_pattern = r\"ANTA VerifyLoggingTimestamp validation\"\n        timestamp_pattern = r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{6}-\\d{2}:\\d{2}\"\n\n        output = self.instance_commands[1].text_output\n\n        lines = output.strip().split(\"\\n\")[::-1]\n\n        last_line_with_pattern = \"\"\n        for line in lines:\n            if re.search(log_pattern, line):\n                last_line_with_pattern = line\n                break\n\n        if re.search(timestamp_pattern, last_line_with_pattern):\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"Logs are not generated with the appropriate timestamp format\")\n
"},{"location":"api/tests.logging/#anta.tests.logging.VerifyLoggingTimestamp.test","title":"test","text":"
test() -> None\n

Run VerifyLoggingTimestamp validation.

Source code in anta/tests/logging.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyLoggingTimestamp validation.\n    \"\"\"\n    log_pattern = r\"ANTA VerifyLoggingTimestamp validation\"\n    timestamp_pattern = r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{6}-\\d{2}:\\d{2}\"\n\n    output = self.instance_commands[1].text_output\n\n    lines = output.strip().split(\"\\n\")[::-1]\n\n    last_line_with_pattern = \"\"\n    for line in lines:\n        if re.search(log_pattern, line):\n            last_line_with_pattern = line\n            break\n\n    if re.search(timestamp_pattern, last_line_with_pattern):\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"Logs are not generated with the appropriate timestamp format\")\n
"},{"location":"api/tests/","title":"Overview","text":""},{"location":"api/tests/#anta-tests-landing-page","title":"ANTA Tests landing page","text":"

This section describes all the available tests provided by ANTA package.

  • AAA
  • Configuration
  • Connectivity
  • Field Notice
  • Hardware
  • Interfaces
  • Logging
  • MLAG
  • Multicast
  • Profiles
  • Routing Generic
  • Routing BGP
  • Routing OSPF
  • Security
  • SNMP
  • Software
  • STP
  • System
  • VxLAN

All these tests can be imported in a catalog to be used by the anta cli or in your own framework

"},{"location":"api/tests.mlag/","title":"MLAG","text":""},{"location":"api/tests.mlag/#anta-catalog-for-mlag-tests","title":"ANTA catalog for mlag tests","text":"

Test functions related to Multi-chassis Link Aggregation (MLAG)

"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagConfigSanity","title":"VerifyMlagConfigSanity","text":"

Bases: AntaTest

This test verifies there are no MLAG config-sanity inconsistencies.

Expected Results
  • success: The test will pass if there are NO MLAG config-sanity inconsistencies.
  • failure: The test will fail if there are MLAG config-sanity inconsistencies.
  • skipped: The test will be skipped if MLAG is \u2018disabled\u2019.
  • error: The test will give an error if \u2018mlagActive\u2019 is not found in the JSON response.
Source code in anta/tests/mlag.py
class VerifyMlagConfigSanity(AntaTest):\n\"\"\"\n    This test verifies there are no MLAG config-sanity inconsistencies.\n\n    Expected Results:\n        * success: The test will pass if there are NO MLAG config-sanity inconsistencies.\n        * failure: The test will fail if there are MLAG config-sanity inconsistencies.\n        * skipped: The test will be skipped if MLAG is 'disabled'.\n        * error: The test will give an error if 'mlagActive' is not found in the JSON response.\n    \"\"\"\n\n    name = \"VerifyMlagConfigSanity\"\n    description = \"This test verifies there are no MLAG config-sanity inconsistencies.\"\n    categories = [\"mlag\"]\n    commands = [AntaCommand(command=\"show mlag config-sanity\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyMlagConfigSanity validation\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if (mlag_status := get_value(command_output, \"mlagActive\")) is None:\n            self.result.is_error(\"Incorrect JSON response - 'mlagActive' state was not found\")\n            return\n\n        if mlag_status is False:\n            self.result.is_skipped(\"MLAG is disabled\")\n            return\n\n        keys_to_verify = [\"globalConfiguration\", \"interfaceConfiguration\"]\n        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n        if not any(verified_output.values()):\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"MLAG config-sanity returned inconsistencies: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagConfigSanity.test","title":"test","text":"
test() -> None\n

Run VerifyMlagConfigSanity validation

Source code in anta/tests/mlag.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyMlagConfigSanity validation\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if (mlag_status := get_value(command_output, \"mlagActive\")) is None:\n        self.result.is_error(\"Incorrect JSON response - 'mlagActive' state was not found\")\n        return\n\n    if mlag_status is False:\n        self.result.is_skipped(\"MLAG is disabled\")\n        return\n\n    keys_to_verify = [\"globalConfiguration\", \"interfaceConfiguration\"]\n    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n    if not any(verified_output.values()):\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"MLAG config-sanity returned inconsistencies: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagDualPrimary","title":"VerifyMlagDualPrimary","text":"

Bases: AntaTest

This test verifies the dual-primary detection and its parameters of the MLAG configuration.

Expected Results
  • success: The test will pass if the dual-primary detection is enabled and its parameters are configured properly.
  • failure: The test will fail if the dual-primary detection is NOT enabled or its parameters are NOT configured properly.
  • skipped: The test will be skipped if the dual-primary parameters are NOT provided or if MLAG is \u2018disabled\u2019.
Source code in anta/tests/mlag.py
class VerifyMlagDualPrimary(AntaTest):\n\"\"\"\n    This test verifies the dual-primary detection and its parameters of the MLAG configuration.\n\n    Expected Results:\n        * success: The test will pass if the dual-primary detection is enabled and its parameters are configured properly.\n        * failure: The test will fail if the dual-primary detection is NOT enabled or its parameters are NOT configured properly.\n        * skipped: The test will be skipped if the dual-primary parameters are NOT provided or if MLAG is 'disabled'.\n    \"\"\"\n\n    name = \"VerifyMlagDualPrimary\"\n    description = \"This test verifies the dual-primary detection and its parameters of the MLAG configuration.\"\n    categories = [\"mlag\"]\n    commands = [AntaCommand(command=\"show mlag detail\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(\n        self, detection_delay: Optional[int] = None, errdisabled: bool = False, recovery_delay: Optional[int] = None, recovery_delay_non_mlag: Optional[int] = None\n    ) -> None:\n\"\"\"\n        Run VerifyMlagDualPrimary validation\n\n        Args:\n            detection_delay: Delay detection for <N> seconds.\n            errdisabled: Errdisabled all interfaces when dual-primary is detected. Defaults to False.\n            recovery_delay: Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled.\n            recovery_delay_non_mlag: Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled.\n        \"\"\"\n\n        if detection_delay is None or errdisabled is None or recovery_delay is None or recovery_delay_non_mlag is None:\n            self.result.is_skipped(\n                f\"{self.__class__.name} did not run because detection_delay, errdisabled, recovery_delay or recovery_delay_non_mlag were not supplied\"\n            )\n            return\n\n        errdisabled_action = \"errdisableAllInterfaces\" if errdisabled else \"none\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"state\"] == \"disabled\":\n            self.result.is_skipped(\"MLAG is disabled\")\n            return\n\n        if command_output[\"dualPrimaryDetectionState\"] == \"disabled\":\n            self.result.is_failure(\"Dual-primary detection is disabled\")\n            return\n\n        keys_to_verify = [\"detail.dualPrimaryDetectionDelay\", \"detail.dualPrimaryAction\", \"dualPrimaryMlagRecoveryDelay\", \"dualPrimaryNonMlagRecoveryDelay\"]\n        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n        if (\n            verified_output[\"detail.dualPrimaryDetectionDelay\"] == detection_delay\n            and verified_output[\"detail.dualPrimaryAction\"] == errdisabled_action\n            and verified_output[\"dualPrimaryMlagRecoveryDelay\"] == recovery_delay\n            and verified_output[\"dualPrimaryNonMlagRecoveryDelay\"] == recovery_delay_non_mlag\n        ):\n            self.result.is_success()\n\n        else:\n            self.result.is_failure(f\"The dual-primary parameters are not configured properly: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagDualPrimary.test","title":"test","text":"
test(\n    detection_delay: Optional[int] = None,\n    errdisabled: bool = False,\n    recovery_delay: Optional[int] = None,\n    recovery_delay_non_mlag: Optional[int] = None,\n) -> None\n

Run VerifyMlagDualPrimary validation

Parameters:

Name Type Description Default detection_delay Optional[int]

Delay detection for seconds. None errdisabled bool

Errdisabled all interfaces when dual-primary is detected. Defaults to False.

False recovery_delay Optional[int]

Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled.

None recovery_delay_non_mlag Optional[int]

Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled.

None Source code in anta/tests/mlag.py
@AntaTest.anta_test\ndef test(\n    self, detection_delay: Optional[int] = None, errdisabled: bool = False, recovery_delay: Optional[int] = None, recovery_delay_non_mlag: Optional[int] = None\n) -> None:\n\"\"\"\n    Run VerifyMlagDualPrimary validation\n\n    Args:\n        detection_delay: Delay detection for <N> seconds.\n        errdisabled: Errdisabled all interfaces when dual-primary is detected. Defaults to False.\n        recovery_delay: Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled.\n        recovery_delay_non_mlag: Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled.\n    \"\"\"\n\n    if detection_delay is None or errdisabled is None or recovery_delay is None or recovery_delay_non_mlag is None:\n        self.result.is_skipped(\n            f\"{self.__class__.name} did not run because detection_delay, errdisabled, recovery_delay or recovery_delay_non_mlag were not supplied\"\n        )\n        return\n\n    errdisabled_action = \"errdisableAllInterfaces\" if errdisabled else \"none\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"state\"] == \"disabled\":\n        self.result.is_skipped(\"MLAG is disabled\")\n        return\n\n    if command_output[\"dualPrimaryDetectionState\"] == \"disabled\":\n        self.result.is_failure(\"Dual-primary detection is disabled\")\n        return\n\n    keys_to_verify = [\"detail.dualPrimaryDetectionDelay\", \"detail.dualPrimaryAction\", \"dualPrimaryMlagRecoveryDelay\", \"dualPrimaryNonMlagRecoveryDelay\"]\n    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n    if (\n        verified_output[\"detail.dualPrimaryDetectionDelay\"] == detection_delay\n        and verified_output[\"detail.dualPrimaryAction\"] == errdisabled_action\n        and verified_output[\"dualPrimaryMlagRecoveryDelay\"] == recovery_delay\n        and verified_output[\"dualPrimaryNonMlagRecoveryDelay\"] == recovery_delay_non_mlag\n    ):\n        self.result.is_success()\n\n    else:\n        self.result.is_failure(f\"The dual-primary parameters are not configured properly: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagInterfaces","title":"VerifyMlagInterfaces","text":"

Bases: AntaTest

This test verifies there are no inactive or active-partial MLAG ports.

Expected Results
  • success: The test will pass if there are NO inactive or active-partial MLAG ports.
  • failure: The test will fail if there are inactive or active-partial MLAG ports.
  • skipped: The test will be skipped if MLAG is \u2018disabled\u2019.
Source code in anta/tests/mlag.py
class VerifyMlagInterfaces(AntaTest):\n\"\"\"\n    This test verifies there are no inactive or active-partial MLAG ports.\n\n    Expected Results:\n        * success: The test will pass if there are NO inactive or active-partial MLAG ports.\n        * failure: The test will fail if there are inactive or active-partial MLAG ports.\n        * skipped: The test will be skipped if MLAG is 'disabled'.\n    \"\"\"\n\n    name = \"VerifyMlagInterfaces\"\n    description = \"This test verifies there are no inactive or active-partial MLAG ports.\"\n    categories = [\"mlag\"]\n    commands = [AntaCommand(command=\"show mlag\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyMlagInterfaces validation\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"state\"] == \"disabled\":\n            self.result.is_skipped(\"MLAG is disabled\")\n            return\n\n        if command_output[\"mlagPorts\"][\"Inactive\"] == 0 and command_output[\"mlagPorts\"][\"Active-partial\"] == 0:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"MLAG status is not OK: {command_output['mlagPorts']}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagInterfaces.test","title":"test","text":"
test() -> None\n

Run VerifyMlagInterfaces validation

Source code in anta/tests/mlag.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyMlagInterfaces validation\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"state\"] == \"disabled\":\n        self.result.is_skipped(\"MLAG is disabled\")\n        return\n\n    if command_output[\"mlagPorts\"][\"Inactive\"] == 0 and command_output[\"mlagPorts\"][\"Active-partial\"] == 0:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"MLAG status is not OK: {command_output['mlagPorts']}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagReloadDelay","title":"VerifyMlagReloadDelay","text":"

Bases: AntaTest

This test verifies the reload-delay parameters of the MLAG configuration.

Expected Results
  • success: The test will pass if the reload-delay parameters are configured properly.
  • failure: The test will fail if the reload-delay parameters are NOT configured properly.
  • skipped: The test will be skipped if the reload-delay parameters are NOT provided or if MLAG is \u2018disabled\u2019.
Source code in anta/tests/mlag.py
class VerifyMlagReloadDelay(AntaTest):\n\"\"\"\n    This test verifies the reload-delay parameters of the MLAG configuration.\n\n    Expected Results:\n        * success: The test will pass if the reload-delay parameters are configured properly.\n        * failure: The test will fail if the reload-delay parameters are NOT configured properly.\n        * skipped: The test will be skipped if the reload-delay parameters are NOT provided or if MLAG is 'disabled'.\n    \"\"\"\n\n    name = \"VerifyMlagReloadDelay\"\n    description = \"This test verifies the reload-delay parameters of the MLAG configuration.\"\n    categories = [\"mlag\"]\n    commands = [AntaCommand(command=\"show mlag\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self, reload_delay: Optional[int] = None, reload_delay_non_mlag: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyMlagReloadDelay validation\n\n        Args:\n            reload_delay: Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled.\n            reload_delay_non_mlag: Delay (seconds) after reboot until ports that are not part of an MLAG are enabled.\n        \"\"\"\n\n        if not reload_delay or not reload_delay_non_mlag:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because reload_delay or reload_delay_non_mlag were not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"state\"] == \"disabled\":\n            self.result.is_skipped(\"MLAG is disabled\")\n            return\n\n        keys_to_verify = [\"reloadDelay\", \"reloadDelayNonMlag\"]\n        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n        if verified_output[\"reloadDelay\"] == reload_delay and verified_output[\"reloadDelayNonMlag\"] == reload_delay_non_mlag:\n            self.result.is_success()\n\n        else:\n            self.result.is_failure(f\"The reload-delay parameters are not configured properly: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagReloadDelay.test","title":"test","text":"
test(\n    reload_delay: Optional[int] = None,\n    reload_delay_non_mlag: Optional[int] = None,\n) -> None\n

Run VerifyMlagReloadDelay validation

Parameters:

Name Type Description Default reload_delay Optional[int]

Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled.

None reload_delay_non_mlag Optional[int]

Delay (seconds) after reboot until ports that are not part of an MLAG are enabled.

None Source code in anta/tests/mlag.py
@AntaTest.anta_test\ndef test(self, reload_delay: Optional[int] = None, reload_delay_non_mlag: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyMlagReloadDelay validation\n\n    Args:\n        reload_delay: Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled.\n        reload_delay_non_mlag: Delay (seconds) after reboot until ports that are not part of an MLAG are enabled.\n    \"\"\"\n\n    if not reload_delay or not reload_delay_non_mlag:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because reload_delay or reload_delay_non_mlag were not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"state\"] == \"disabled\":\n        self.result.is_skipped(\"MLAG is disabled\")\n        return\n\n    keys_to_verify = [\"reloadDelay\", \"reloadDelayNonMlag\"]\n    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n    if verified_output[\"reloadDelay\"] == reload_delay and verified_output[\"reloadDelayNonMlag\"] == reload_delay_non_mlag:\n        self.result.is_success()\n\n    else:\n        self.result.is_failure(f\"The reload-delay parameters are not configured properly: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagStatus","title":"VerifyMlagStatus","text":"

Bases: AntaTest

This test verifies the health status of the MLAG configuration.

Expected Results
  • success: The test will pass if the MLAG state is \u2018active\u2019, negotiation status is \u2018connected\u2019, peer-link status and local interface status are \u2018up\u2019.
  • failure: The test will fail if the MLAG state is not \u2018active\u2019, negotiation status is not \u2018connected\u2019, peer-link status or local interface status are not \u2018up\u2019.
  • skipped: The test will be skipped if MLAG is \u2018disabled\u2019.
Source code in anta/tests/mlag.py
class VerifyMlagStatus(AntaTest):\n\"\"\"\n    This test verifies the health status of the MLAG configuration.\n\n    Expected Results:\n        * success: The test will pass if the MLAG state is 'active', negotiation status is 'connected',\n                   peer-link status and local interface status are 'up'.\n        * failure: The test will fail if the MLAG state is not 'active', negotiation status is not 'connected',\n                   peer-link status or local interface status are not 'up'.\n        * skipped: The test will be skipped if MLAG is 'disabled'.\n    \"\"\"\n\n    name = \"VerifyMlagStatus\"\n    description = \"This test verifies the health status of the MLAG configuration.\"\n    categories = [\"mlag\"]\n    commands = [AntaCommand(command=\"show mlag\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyMlagStatus validation\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"state\"] == \"disabled\":\n            self.result.is_skipped(\"MLAG is disabled\")\n            return\n\n        keys_to_verify = [\"state\", \"negStatus\", \"localIntfStatus\", \"peerLinkStatus\"]\n        verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n        if (\n            verified_output[\"state\"] == \"active\"\n            and verified_output[\"negStatus\"] == \"connected\"\n            and verified_output[\"localIntfStatus\"] == \"up\"\n            and verified_output[\"peerLinkStatus\"] == \"up\"\n        ):\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"MLAG status is not OK: {verified_output}\")\n
"},{"location":"api/tests.mlag/#anta.tests.mlag.VerifyMlagStatus.test","title":"test","text":"
test() -> None\n

Run VerifyMlagStatus validation

Source code in anta/tests/mlag.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyMlagStatus validation\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"state\"] == \"disabled\":\n        self.result.is_skipped(\"MLAG is disabled\")\n        return\n\n    keys_to_verify = [\"state\", \"negStatus\", \"localIntfStatus\", \"peerLinkStatus\"]\n    verified_output = {key: get_value(command_output, key) for key in keys_to_verify}\n\n    if (\n        verified_output[\"state\"] == \"active\"\n        and verified_output[\"negStatus\"] == \"connected\"\n        and verified_output[\"localIntfStatus\"] == \"up\"\n        and verified_output[\"peerLinkStatus\"] == \"up\"\n    ):\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"MLAG status is not OK: {verified_output}\")\n
"},{"location":"api/tests.multicast/","title":"Multicast","text":""},{"location":"api/tests.multicast/#anta-catalog-for-multicast-tests","title":"ANTA catalog for multicast tests","text":"

Test functions related to multicast

"},{"location":"api/tests.multicast/#anta.tests.multicast.VerifyIGMPSnoopingGlobal","title":"VerifyIGMPSnoopingGlobal","text":"

Bases: AntaTest

Verifies the IGMP snooping global configuration.

Parameters:

Name Type Description Default configuration str

Expected global IGMP snooping configuration (enabled or disabled).

required Source code in anta/tests/multicast.py
class VerifyIGMPSnoopingGlobal(AntaTest):\n\"\"\"\n    Verifies the IGMP snooping global configuration.\n\n    Args:\n        configuration (str): Expected global IGMP snooping configuration (enabled or disabled).\n    \"\"\"\n\n    name = \"VerifyIGMPSnoopingGlobal\"\n    description = \"Verifies the IGMP snooping global configuration.\"\n    categories = [\"multicast\", \"igmp\"]\n    commands = [AntaCommand(command=\"show ip igmp snooping\")]\n\n    @AntaTest.anta_test\n    def test(self, configuration: Optional[str] = None) -> None:\n\"\"\"\n        Run VerifyIGMPSnoopingGlobal validation\n\n        Args:\n            configuration: Expected global IGMP configuration (enabled or disabled).\n        \"\"\"\n\n        if not configuration:\n            self.result.is_skipped(\"VerifyIGMPSnoopingGlobal was not run as no configuration was given\")\n            return\n\n        if configuration not in [\"enabled\", \"disabled\"]:\n            self.result.is_error(f\"VerifyIGMPSnoopingGlobal was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        self.result.is_success()\n        if (igmp_state := command_output[\"igmpSnoopingState\"]) != configuration:\n            self.result.is_failure(f\"IGMP state is not valid: {igmp_state}\")\n
"},{"location":"api/tests.multicast/#anta.tests.multicast.VerifyIGMPSnoopingGlobal.test","title":"test","text":"
test(configuration: Optional[str] = None) -> None\n

Run VerifyIGMPSnoopingGlobal validation

Parameters:

Name Type Description Default configuration Optional[str]

Expected global IGMP configuration (enabled or disabled).

None Source code in anta/tests/multicast.py
@AntaTest.anta_test\ndef test(self, configuration: Optional[str] = None) -> None:\n\"\"\"\n    Run VerifyIGMPSnoopingGlobal validation\n\n    Args:\n        configuration: Expected global IGMP configuration (enabled or disabled).\n    \"\"\"\n\n    if not configuration:\n        self.result.is_skipped(\"VerifyIGMPSnoopingGlobal was not run as no configuration was given\")\n        return\n\n    if configuration not in [\"enabled\", \"disabled\"]:\n        self.result.is_error(f\"VerifyIGMPSnoopingGlobal was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    self.result.is_success()\n    if (igmp_state := command_output[\"igmpSnoopingState\"]) != configuration:\n        self.result.is_failure(f\"IGMP state is not valid: {igmp_state}\")\n
"},{"location":"api/tests.multicast/#anta.tests.multicast.VerifyIGMPSnoopingVlans","title":"VerifyIGMPSnoopingVlans","text":"

Bases: AntaTest

Verifies the IGMP snooping configuration for some VLANs.

Parameters:

Name Type Description Default vlans List[str]

A list of VLANs

required configuration str

Expected IGMP snooping configuration (enabled or disabled) for these VLANs.

required Source code in anta/tests/multicast.py
class VerifyIGMPSnoopingVlans(AntaTest):\n\"\"\"\n    Verifies the IGMP snooping configuration for some VLANs.\n\n    Args:\n        vlans (List[str]): A list of VLANs\n        configuration (str): Expected IGMP snooping configuration (enabled or disabled) for these VLANs.\n    \"\"\"\n\n    name = \"VerifyIGMPSnoopingVlans\"\n    description = \"Verifies the IGMP snooping configuration for some VLANs.\"\n    categories = [\"multicast\", \"igmp\"]\n    commands = [AntaCommand(command=\"show ip igmp snooping\")]\n\n    @AntaTest.anta_test\n    def test(self, vlans: Optional[List[str]] = None, configuration: Optional[str] = None) -> None:\n\"\"\"\n        Run VerifyIGMPSnoopingVlans validation\n\n        Args:\n            vlans: List of VLANs.\n            configuration: Expected IGMP configuration (enabled or disabled) for these VLANs.\n        \"\"\"\n\n        if not vlans or not configuration:\n            self.result.is_skipped(\"VerifyIGMPSnoopingVlans was not run as no vlans or configuration was given\")\n            return\n        if configuration not in [\"enabled\", \"disabled\"]:\n            self.result.is_error(f\"VerifyIGMPSnoopingVlans was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        self.result.is_success()\n        for vlan in vlans:\n            if vlan not in command_output[\"vlans\"]:\n                self.result.is_failure(f\"Supplied vlan {vlan} is not present on the device.\")\n                continue\n\n            igmp_state = command_output[\"vlans\"][str(vlan)][\"igmpSnoopingState\"]\n            if igmp_state != configuration:\n                self.result.is_failure(f\"IGMP state for vlan {vlan} is {igmp_state}\")\n
"},{"location":"api/tests.multicast/#anta.tests.multicast.VerifyIGMPSnoopingVlans.test","title":"test","text":"
test(\n    vlans: Optional[List[str]] = None,\n    configuration: Optional[str] = None,\n) -> None\n

Run VerifyIGMPSnoopingVlans validation

Parameters:

Name Type Description Default vlans Optional[List[str]]

List of VLANs.

None configuration Optional[str]

Expected IGMP configuration (enabled or disabled) for these VLANs.

None Source code in anta/tests/multicast.py
@AntaTest.anta_test\ndef test(self, vlans: Optional[List[str]] = None, configuration: Optional[str] = None) -> None:\n\"\"\"\n    Run VerifyIGMPSnoopingVlans validation\n\n    Args:\n        vlans: List of VLANs.\n        configuration: Expected IGMP configuration (enabled or disabled) for these VLANs.\n    \"\"\"\n\n    if not vlans or not configuration:\n        self.result.is_skipped(\"VerifyIGMPSnoopingVlans was not run as no vlans or configuration was given\")\n        return\n    if configuration not in [\"enabled\", \"disabled\"]:\n        self.result.is_error(f\"VerifyIGMPSnoopingVlans was not run as 'configuration': {configuration} is not in the allowed values: ['enabled', 'disabled'])\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    self.result.is_success()\n    for vlan in vlans:\n        if vlan not in command_output[\"vlans\"]:\n            self.result.is_failure(f\"Supplied vlan {vlan} is not present on the device.\")\n            continue\n\n        igmp_state = command_output[\"vlans\"][str(vlan)][\"igmpSnoopingState\"]\n        if igmp_state != configuration:\n            self.result.is_failure(f\"IGMP state for vlan {vlan} is {igmp_state}\")\n
"},{"location":"api/tests.profiles/","title":"Profiles","text":""},{"location":"api/tests.profiles/#anta-catalog-for-profiles-tests","title":"ANTA catalog for profiles tests","text":"

Test functions related to ASIC profiles

"},{"location":"api/tests.profiles/#anta.tests.profiles.VerifyTcamProfile","title":"VerifyTcamProfile","text":"

Bases: AntaTest

Verifies the device is using the configured TCAM profile.

Source code in anta/tests/profiles.py
class VerifyTcamProfile(AntaTest):\n\"\"\"\n    Verifies the device is using the configured TCAM profile.\n    \"\"\"\n\n    name = \"VerifyTcamProfile\"\n    description = \"Verify that the assigned TCAM profile is actually running on the device\"\n    categories = [\"profiles\"]\n    commands = [AntaCommand(command=\"show hardware tcam profile\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self, profile: Optional[str] = None) -> None:\n\"\"\"\n        Run VerifyTcamProfile validation\n\n        Args:\n            profile: Expected TCAM profile.\n        \"\"\"\n        if not profile:\n            self.result.is_skipped(\"VerifyTcamProfile was not run as no profile was given\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n        if command_output[\"pmfProfiles\"][\"FixedSystem\"][\"status\"] == command_output[\"pmfProfiles\"][\"FixedSystem\"][\"config\"] == profile:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Incorrect profile running on device: {command_output['pmfProfiles']['FixedSystem']['status']}\")\n
"},{"location":"api/tests.profiles/#anta.tests.profiles.VerifyTcamProfile.test","title":"test","text":"
test(profile: Optional[str] = None) -> None\n

Run VerifyTcamProfile validation

Parameters:

Name Type Description Default profile Optional[str]

Expected TCAM profile.

None Source code in anta/tests/profiles.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self, profile: Optional[str] = None) -> None:\n\"\"\"\n    Run VerifyTcamProfile validation\n\n    Args:\n        profile: Expected TCAM profile.\n    \"\"\"\n    if not profile:\n        self.result.is_skipped(\"VerifyTcamProfile was not run as no profile was given\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n    if command_output[\"pmfProfiles\"][\"FixedSystem\"][\"status\"] == command_output[\"pmfProfiles\"][\"FixedSystem\"][\"config\"] == profile:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Incorrect profile running on device: {command_output['pmfProfiles']['FixedSystem']['status']}\")\n
"},{"location":"api/tests.profiles/#anta.tests.profiles.VerifyUnifiedForwardingTableMode","title":"VerifyUnifiedForwardingTableMode","text":"

Bases: AntaTest

Verifies the device is using the expected Unified Forwarding Table mode.

Source code in anta/tests/profiles.py
class VerifyUnifiedForwardingTableMode(AntaTest):\n\"\"\"\n    Verifies the device is using the expected Unified Forwarding Table mode.\n    \"\"\"\n\n    name = \"VerifyUnifiedForwardingTableMode\"\n    description = \"\"\n    categories = [\"profiles\"]\n    commands = [AntaCommand(command=\"show platform trident forwarding-table partition\", ofmt=\"json\")]\n\n    @skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n    @AntaTest.anta_test\n    def test(self, mode: Optional[str] = None) -> None:\n\"\"\"\n        Run VerifyUnifiedForwardingTableMode validation\n\n        Args:\n            mode: Expected UFT mode.\n        \"\"\"\n        if not mode:\n            self.result.is_skipped(\"VerifyUnifiedForwardingTableMode was not run as no mode was given\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n        if command_output[\"uftMode\"] == mode:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Device is not running correct UFT mode (expected: {mode} / running: {command_output['uftMode']})\")\n
"},{"location":"api/tests.profiles/#anta.tests.profiles.VerifyUnifiedForwardingTableMode.test","title":"test","text":"
test(mode: Optional[str] = None) -> None\n

Run VerifyUnifiedForwardingTableMode validation

Parameters:

Name Type Description Default mode Optional[str]

Expected UFT mode.

None Source code in anta/tests/profiles.py
@skip_on_platforms([\"cEOSLab\", \"vEOS-lab\"])\n@AntaTest.anta_test\ndef test(self, mode: Optional[str] = None) -> None:\n\"\"\"\n    Run VerifyUnifiedForwardingTableMode validation\n\n    Args:\n        mode: Expected UFT mode.\n    \"\"\"\n    if not mode:\n        self.result.is_skipped(\"VerifyUnifiedForwardingTableMode was not run as no mode was given\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n    if command_output[\"uftMode\"] == mode:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Device is not running correct UFT mode (expected: {mode} / running: {command_output['uftMode']})\")\n
"},{"location":"api/tests.routing.bgp/","title":"BGP","text":""},{"location":"api/tests.routing.bgp/#anta-catalog-for-routing-bgp-tests","title":"ANTA catalog for routing-bgp tests","text":"

BGP test functions

"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPEVPNCount","title":"VerifyBGPEVPNCount","text":"

Bases: AntaTest

Verifies all EVPN BGP sessions are established (default VRF) and the actual number of BGP EVPN neighbors is the one we expect (default VRF).

  • self.result = \u201cskipped\u201d if the number parameter is missing
  • self.result = \u201csuccess\u201d if all EVPN BGP sessions are Established and if the actual number of BGP EVPN neighbors is the one we expect.
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPEVPNCount(AntaTest):\n\"\"\"\n    Verifies all EVPN BGP sessions are established (default VRF)\n    and the actual number of BGP EVPN neighbors is the one we expect (default VRF).\n\n    * self.result = \"skipped\" if the `number` parameter is missing\n    * self.result = \"success\" if all EVPN BGP sessions are Established and if the actual\n                         number of BGP EVPN neighbors is the one we expect.\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPEVPNCount\"\n    description = \"Verifies all EVPN BGP sessions are established (default VRF) and the actual number of BGP EVPN neighbors is the one we expect (default VRF).\"\n    categories = [\"routing\", \"bgp\"]\n    commands = [AntaCommand(command=\"show bgp evpn summary\")]\n\n    @check_bgp_family_enable(\"evpn\")\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyBGPEVPNCount validation\n\n        Args:\n            number: The expected number of BGP EVPN neighbors in the default VRF.\n        \"\"\"\n        if not number:\n            self.result.is_skipped(\"VerifyBGPEVPNCount could not run because number was not supplied.\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        peers = command_output[\"vrfs\"][\"default\"][\"peers\"]\n        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n        if not non_established_peers and len(peers) == number:\n            self.result.is_success()\n        else:\n            self.result.is_failure()\n            if len(peers) != number:\n                self.result.is_failure(f\"Expecting {number} BGP EVPN peers and got {len(peers)}\")\n            if non_established_peers:\n                self.result.is_failure(f\"The following EVPN peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPEVPNCount.test","title":"test","text":"
test(number: Optional[int] = None) -> None\n

Run VerifyBGPEVPNCount validation

Parameters:

Name Type Description Default number Optional[int]

The expected number of BGP EVPN neighbors in the default VRF.

None Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"evpn\")\n@AntaTest.anta_test\ndef test(self, number: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyBGPEVPNCount validation\n\n    Args:\n        number: The expected number of BGP EVPN neighbors in the default VRF.\n    \"\"\"\n    if not number:\n        self.result.is_skipped(\"VerifyBGPEVPNCount could not run because number was not supplied.\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    peers = command_output[\"vrfs\"][\"default\"][\"peers\"]\n    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n    if not non_established_peers and len(peers) == number:\n        self.result.is_success()\n    else:\n        self.result.is_failure()\n        if len(peers) != number:\n            self.result.is_failure(f\"Expecting {number} BGP EVPN peers and got {len(peers)}\")\n        if non_established_peers:\n            self.result.is_failure(f\"The following EVPN peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPEVPNState","title":"VerifyBGPEVPNState","text":"

Bases: AntaTest

Verifies all EVPN BGP sessions are established (default VRF).

  • self.result = \u201cskipped\u201d if no BGP EVPN peers are returned by the device
  • self.result = \u201csuccess\u201d if all EVPN BGP sessions are established.
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPEVPNState(AntaTest):\n\"\"\"\n    Verifies all EVPN BGP sessions are established (default VRF).\n\n    * self.result = \"skipped\" if no BGP EVPN peers are returned by the device\n    * self.result = \"success\" if all EVPN BGP sessions are established.\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPEVPNState\"\n    description = \"Verifies all EVPN BGP sessions are established (default VRF).\"\n    categories = [\"routing\", \"bgp\"]\n    commands = [AntaCommand(command=\"show bgp evpn summary\")]\n\n    @check_bgp_family_enable(\"evpn\")\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyBGPEVPNState validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        bgp_vrfs = command_output[\"vrfs\"]\n\n        peers = bgp_vrfs[\"default\"][\"peers\"]\n        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n        if not non_established_peers:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following EVPN peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPEVPNState.test","title":"test","text":"
test() -> None\n

Run VerifyBGPEVPNState validation

Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"evpn\")\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyBGPEVPNState validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    bgp_vrfs = command_output[\"vrfs\"]\n\n    peers = bgp_vrfs[\"default\"][\"peers\"]\n    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n    if not non_established_peers:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following EVPN peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPIPv4UnicastCount","title":"VerifyBGPIPv4UnicastCount","text":"

Bases: AntaTest

Verifies all IPv4 unicast BGP sessions are established and all BGP messages queues for these sessions are empty and the actual number of BGP IPv4 unicast neighbors is the one we expect.

  • self.result = \u201cskipped\u201d if the number or vrf parameter is missing
  • self.result = \u201csuccess\u201d if all IPv4 unicast BGP sessions are established and if all BGP messages queues for these sessions are empty and if the actual number of BGP IPv4 unicast neighbors is equal to `number.
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPIPv4UnicastCount(AntaTest):\n\"\"\"\n    Verifies all IPv4 unicast BGP sessions are established\n    and all BGP messages queues for these sessions are empty\n    and the actual number of BGP IPv4 unicast neighbors is the one we expect.\n\n    * self.result = \"skipped\" if the `number` or `vrf` parameter is missing\n    * self.result = \"success\" if all IPv4 unicast BGP sessions are established\n                         and if all BGP messages queues for these sessions are empty\n                         and if the actual number of BGP IPv4 unicast neighbors is equal to `number.\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPIPv4UnicastCount\"\n    description = (\n        \"Verifies all IPv4 unicast BGP sessions are established and all their BGP messages queues are empty and \"\n        \" the actual number of BGP IPv4 unicast neighbors is the one we expect.\"\n    )\n    categories = [\"routing\", \"bgp\"]\n    template = AntaTemplate(template=\"show bgp ipv4 unicast summary vrf {vrf}\")\n\n    @check_bgp_family_enable(\"ipv4\")\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyBGPIPv4UnicastCount validation\n\n        Args:\n            number: The expected number of BGP IPv4 unicast neighbors.\n            vrf: VRF to verify (template parameter)\n        \"\"\"\n\n        if not number:\n            self.result.is_skipped(\"VerifyBGPIPv4UnicastCount could not run because number was not supplied\")\n            return\n\n        self.result.is_success()\n\n        for command in self.instance_commands:\n            if command.params and \"vrf\" in command.params:\n                vrf = command.params[\"vrf\"]\n\n            peers = command.json_output[\"vrfs\"][vrf][\"peers\"]\n            state_issue = _check_bgp_vrfs(command.json_output[\"vrfs\"])\n\n            if len(peers) != number:\n                self.result.is_failure(f\"Expecting {number} BGP peer in vrf {vrf} and got {len(peers)}\")\n            if state_issue:\n                self.result.is_failure(f\"The following IPv4 peers are not established: {state_issue}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPIPv4UnicastCount.test","title":"test","text":"
test(number: Optional[int] = None) -> None\n

Run VerifyBGPIPv4UnicastCount validation

Parameters:

Name Type Description Default number Optional[int]

The expected number of BGP IPv4 unicast neighbors.

None vrf

VRF to verify (template parameter)

required Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"ipv4\")\n@AntaTest.anta_test\ndef test(self, number: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyBGPIPv4UnicastCount validation\n\n    Args:\n        number: The expected number of BGP IPv4 unicast neighbors.\n        vrf: VRF to verify (template parameter)\n    \"\"\"\n\n    if not number:\n        self.result.is_skipped(\"VerifyBGPIPv4UnicastCount could not run because number was not supplied\")\n        return\n\n    self.result.is_success()\n\n    for command in self.instance_commands:\n        if command.params and \"vrf\" in command.params:\n            vrf = command.params[\"vrf\"]\n\n        peers = command.json_output[\"vrfs\"][vrf][\"peers\"]\n        state_issue = _check_bgp_vrfs(command.json_output[\"vrfs\"])\n\n        if len(peers) != number:\n            self.result.is_failure(f\"Expecting {number} BGP peer in vrf {vrf} and got {len(peers)}\")\n        if state_issue:\n            self.result.is_failure(f\"The following IPv4 peers are not established: {state_issue}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPIPv4UnicastState","title":"VerifyBGPIPv4UnicastState","text":"

Bases: AntaTest

Verifies all IPv4 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF).

  • self.result = \u201cskipped\u201d if no BGP vrf are returned by the device
  • self.result = \u201csuccess\u201d if all IPv4 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF).
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPIPv4UnicastState(AntaTest):\n\"\"\"\n    Verifies all IPv4 unicast BGP sessions are established (for all VRF)\n    and all BGP messages queues for these sessions are empty (for all VRF).\n\n    * self.result = \"skipped\" if no BGP vrf are returned by the device\n    * self.result = \"success\" if all IPv4 unicast BGP sessions are established (for all VRF)\n                         and all BGP messages queues for these sessions are empty (for all VRF).\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPIPv4UnicastState\"\n    description = \"Verifies all IPv4 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF).\"\n    categories = [\"routing\", \"bgp\"]\n    commands = [AntaCommand(command=\"show bgp ipv4 unicast summary vrf all\")]\n\n    @check_bgp_family_enable(\"ipv4\")\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyBGPIPv4UnicastState validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n        state_issue = _check_bgp_vrfs(command_output[\"vrfs\"])\n\n        if not state_issue:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Some IPv4 Unicast BGP Peer are not up: {state_issue}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPIPv4UnicastState.test","title":"test","text":"
test() -> None\n

Run VerifyBGPIPv4UnicastState validation

Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"ipv4\")\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyBGPIPv4UnicastState validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n    state_issue = _check_bgp_vrfs(command_output[\"vrfs\"])\n\n    if not state_issue:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Some IPv4 Unicast BGP Peer are not up: {state_issue}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPIPv6UnicastState","title":"VerifyBGPIPv6UnicastState","text":"

Bases: AntaTest

Verifies all IPv6 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF).

  • self.result = \u201cskipped\u201d if no BGP vrf are returned by the device
  • self.result = \u201csuccess\u201d if all IPv6 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF).
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPIPv6UnicastState(AntaTest):\n\"\"\"\n    Verifies all IPv6 unicast BGP sessions are established (for all VRF)\n    and all BGP messages queues for these sessions are empty (for all VRF).\n\n    * self.result = \"skipped\" if no BGP vrf are returned by the device\n    * self.result = \"success\" if all IPv6 unicast BGP sessions are established (for all VRF)\n                         and all BGP messages queues for these sessions are empty (for all VRF).\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPIPv6UnicastState\"\n    description = \"Verifies all IPv6 unicast BGP sessions are established (for all VRF) and all BGP messages queues for these sessions are empty (for all VRF).\"\n    categories = [\"routing\", \"bgp\"]\n    commands = [AntaCommand(command=\"show bgp ipv6 unicast summary vrf all\")]\n\n    @check_bgp_family_enable(\"ipv6\")\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyBGPIPv6UnicastState validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        state_issue = _check_bgp_vrfs(command_output[\"vrfs\"])\n\n        if not state_issue:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Some IPv4 Unicast BGP Peer are not up: {state_issue}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPIPv6UnicastState.test","title":"test","text":"
test() -> None\n

Run VerifyBGPIPv6UnicastState validation

Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"ipv6\")\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyBGPIPv6UnicastState validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    state_issue = _check_bgp_vrfs(command_output[\"vrfs\"])\n\n    if not state_issue:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Some IPv4 Unicast BGP Peer are not up: {state_issue}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPRTCCount","title":"VerifyBGPRTCCount","text":"

Bases: AntaTest

Verifies all RTC BGP sessions are established (default VRF) and the actual number of BGP RTC neighbors is the one we expect (default VRF).

  • self.result = \u201cskipped\u201d if the number parameter is missing
  • self.result = \u201csuccess\u201d if all RTC BGP sessions are Established and if the actual number of BGP RTC neighbors is the one we expect.
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPRTCCount(AntaTest):\n\"\"\"\n    Verifies all RTC BGP sessions are established (default VRF)\n    and the actual number of BGP RTC neighbors is the one we expect (default VRF).\n\n    * self.result = \"skipped\" if the `number` parameter is missing\n    * self.result = \"success\" if all RTC BGP sessions are Established and if the actual\n                         number of BGP RTC neighbors is the one we expect.\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPRTCCount\"\n    description = \"Verifies all RTC BGP sessions are established (default VRF) and the actual number of BGP RTC neighbors is the one we expect (default VRF).\"\n    categories = [\"routing\", \"bgp\"]\n    commands = [AntaCommand(command=\"show bgp rt-membership summary\")]\n\n    @check_bgp_family_enable(\"rtc\")\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyBGPRTCCount validation\n\n        Args:\n            number: The expected number of BGP RTC neighbors (default VRF).\n        \"\"\"\n        if not number:\n            self.result.is_skipped(\"VerifyBGPRTCCount could not run because number was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        peers = command_output[\"vrfs\"][\"default\"][\"peers\"]\n        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n        if not non_established_peers and len(peers) == number:\n            self.result.is_success()\n        else:\n            self.result.is_failure()\n            if len(peers) != number:\n                self.result.is_failure(f\"Expecting {number} BGP RTC peers and got {len(peers)}\")\n            if non_established_peers:\n                self.result.is_failure(f\"The following RTC peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPRTCCount.test","title":"test","text":"
test(number: Optional[int] = None) -> None\n

Run VerifyBGPRTCCount validation

Parameters:

Name Type Description Default number Optional[int]

The expected number of BGP RTC neighbors (default VRF).

None Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"rtc\")\n@AntaTest.anta_test\ndef test(self, number: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyBGPRTCCount validation\n\n    Args:\n        number: The expected number of BGP RTC neighbors (default VRF).\n    \"\"\"\n    if not number:\n        self.result.is_skipped(\"VerifyBGPRTCCount could not run because number was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    peers = command_output[\"vrfs\"][\"default\"][\"peers\"]\n    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n    if not non_established_peers and len(peers) == number:\n        self.result.is_success()\n    else:\n        self.result.is_failure()\n        if len(peers) != number:\n            self.result.is_failure(f\"Expecting {number} BGP RTC peers and got {len(peers)}\")\n        if non_established_peers:\n            self.result.is_failure(f\"The following RTC peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPRTCState","title":"VerifyBGPRTCState","text":"

Bases: AntaTest

Verifies all RTC BGP sessions are established (default VRF).

  • self.result = \u201cskipped\u201d if no BGP RTC peers are returned by the device
  • self.result = \u201csuccess\u201d if all RTC BGP sessions are established.
  • self.result = \u201cfailure\u201d otherwise.
Source code in anta/tests/routing/bgp.py
class VerifyBGPRTCState(AntaTest):\n\"\"\"\n    Verifies all RTC BGP sessions are established (default VRF).\n\n    * self.result = \"skipped\" if no BGP RTC peers are returned by the device\n    * self.result = \"success\" if all RTC BGP sessions are established.\n    * self.result = \"failure\" otherwise.\n    \"\"\"\n\n    name = \"VerifyBGPRTCState\"\n    description = \"Verifies all RTC BGP sessions are established (default VRF).\"\n    categories = [\"routing\", \"bgp\"]\n    commands = [AntaCommand(command=\"show bgp rt-membership summary\")]\n\n    @check_bgp_family_enable(\"rtc\")\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyBGPRTCState validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        bgp_vrfs = command_output[\"vrfs\"]\n\n        peers = bgp_vrfs[\"default\"][\"peers\"]\n        non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n        if not non_established_peers:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"The following RTC peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.bgp/#anta.tests.routing.bgp.VerifyBGPRTCState.test","title":"test","text":"
test() -> None\n

Run VerifyBGPRTCState validation

Source code in anta/tests/routing/bgp.py
@check_bgp_family_enable(\"rtc\")\n@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyBGPRTCState validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    bgp_vrfs = command_output[\"vrfs\"]\n\n    peers = bgp_vrfs[\"default\"][\"peers\"]\n    non_established_peers = [peer for peer, peer_dict in peers.items() if peer_dict[\"peerState\"] != \"Established\"]\n\n    if not non_established_peers:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"The following RTC peers are not established: {non_established_peers}\")\n
"},{"location":"api/tests.routing.generic/","title":"Generic","text":""},{"location":"api/tests.routing.generic/#anta-catalog-for-routing-generic-tests","title":"ANTA catalog for routing-generic tests","text":"

Generic routing test functions

"},{"location":"api/tests.routing.generic/#anta.tests.routing.generic.VerifyBFD","title":"VerifyBFD","text":"

Bases: AntaTest

Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors).

Source code in anta/tests/routing/generic.py
class VerifyBFD(AntaTest):\n\"\"\"\n    Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors).\n    \"\"\"\n\n    name = \"VerifyBFD\"\n    description = \"Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors).\"\n    categories = [\"routing\", \"generic\"]\n    # revision 1 as later revision introduce additional nesting for type\n    commands = [AntaCommand(command=\"show bfd peers\", revision=1)]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyBFD validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        self.result.is_success()\n\n        for _, vrf_data in command_output[\"vrfs\"].items():\n            for _, neighbor_data in vrf_data[\"ipv4Neighbors\"].items():\n                for peer, peer_data in neighbor_data[\"peerStats\"].items():\n                    if (peer_status := peer_data[\"status\"]) != \"up\":\n                        failure_message = f\"bfd state for peer '{peer}' is {peer_status} (expected up).\"\n                        if (peer_l3intf := peer_data.get(\"l3intf\")) is not None and peer_l3intf != \"\":\n                            failure_message += f\" Interface: {peer_l3intf}.\"\n                        self.result.is_failure(failure_message)\n
"},{"location":"api/tests.routing.generic/#anta.tests.routing.generic.VerifyBFD.test","title":"test","text":"
test() -> None\n

Run VerifyBFD validation

Source code in anta/tests/routing/generic.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyBFD validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    self.result.is_success()\n\n    for _, vrf_data in command_output[\"vrfs\"].items():\n        for _, neighbor_data in vrf_data[\"ipv4Neighbors\"].items():\n            for peer, peer_data in neighbor_data[\"peerStats\"].items():\n                if (peer_status := peer_data[\"status\"]) != \"up\":\n                    failure_message = f\"bfd state for peer '{peer}' is {peer_status} (expected up).\"\n                    if (peer_l3intf := peer_data.get(\"l3intf\")) is not None and peer_l3intf != \"\":\n                        failure_message += f\" Interface: {peer_l3intf}.\"\n                    self.result.is_failure(failure_message)\n
"},{"location":"api/tests.routing.generic/#anta.tests.routing.generic.VerifyRoutingProtocolModel","title":"VerifyRoutingProtocolModel","text":"

Bases: AntaTest

Verifies the configured routing protocol model is the one we expect. And if there is no mismatch between the configured and operating routing protocol model.

model(str): Expected routing protocol model (multi-agent or ribd). Default is multi-agent\n
Source code in anta/tests/routing/generic.py
class VerifyRoutingProtocolModel(AntaTest):\n\"\"\"\n    Verifies the configured routing protocol model is the one we expect.\n    And if there is no mismatch between the configured and operating routing protocol model.\n\n        model(str): Expected routing protocol model (multi-agent or ribd). Default is multi-agent\n    \"\"\"\n\n    name = \"VerifyRoutingProtocolModel\"\n    description = (\n        \"Verifies the configured routing protocol model is the expected one and if there is no mismatch between the configured and operating routing protocol model.\"\n    )\n    categories = [\"routing\", \"generic\"]\n    # \"revision\": 3\n    commands = [AntaCommand(command=\"show ip route summary\")]\n\n    @AntaTest.anta_test\n    def test(self, model: Optional[str] = \"multi-agent\") -> None:\n\"\"\"Run VerifyRoutingProtocolModel validation\"\"\"\n\n        if not model:\n            self.result.is_skipped(\"VerifyRoutingProtocolModel was not run as no model was given\")\n            return\n        command_output = self.instance_commands[0].json_output\n\n        configured_model = command_output[\"protoModelStatus\"][\"configuredProtoModel\"]\n        operating_model = command_output[\"protoModelStatus\"][\"operatingProtoModel\"]\n        if configured_model == operating_model == model:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"routing model is misconfigured: configured: {configured_model} - operating: {operating_model} - expected: {model}\")\n
"},{"location":"api/tests.routing.generic/#anta.tests.routing.generic.VerifyRoutingProtocolModel.test","title":"test","text":"
test(model: Optional[str] = 'multi-agent') -> None\n

Run VerifyRoutingProtocolModel validation

Source code in anta/tests/routing/generic.py
@AntaTest.anta_test\ndef test(self, model: Optional[str] = \"multi-agent\") -> None:\n\"\"\"Run VerifyRoutingProtocolModel validation\"\"\"\n\n    if not model:\n        self.result.is_skipped(\"VerifyRoutingProtocolModel was not run as no model was given\")\n        return\n    command_output = self.instance_commands[0].json_output\n\n    configured_model = command_output[\"protoModelStatus\"][\"configuredProtoModel\"]\n    operating_model = command_output[\"protoModelStatus\"][\"operatingProtoModel\"]\n    if configured_model == operating_model == model:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"routing model is misconfigured: configured: {configured_model} - operating: {operating_model} - expected: {model}\")\n
"},{"location":"api/tests.routing.generic/#anta.tests.routing.generic.VerifyRoutingTableSize","title":"VerifyRoutingTableSize","text":"

Bases: AntaTest

Verifies the size of the IP routing table (default VRF). Should be between the two provided thresholds.

Parameters:

Name Type Description Default minimum(int)

Expected minimum routing table (default VRF) size.

required maximum(int)

Expected maximum routing table (default VRF) size.

required Source code in anta/tests/routing/generic.py
class VerifyRoutingTableSize(AntaTest):\n\"\"\"\n    Verifies the size of the IP routing table (default VRF).\n    Should be between the two provided thresholds.\n\n    Args:\n        minimum(int): Expected minimum routing table (default VRF) size.\n        maximum(int): Expected maximum routing table (default VRF) size.\n    \"\"\"\n\n    name = \"VerifyRoutingTableSize\"\n    description = \"Verifies the size of the IP routing table (default VRF). Should be between the two provided thresholds.\"\n    categories = [\"routing\", \"generic\"]\n    # \"revision\": 3\n    commands = [AntaCommand(command=\"show ip route summary\")]\n\n    @AntaTest.anta_test\n    def test(self, minimum: Optional[int] = None, maximum: Optional[int] = None) -> None:\n\"\"\"Run VerifyRoutingTableSize validation\"\"\"\n\n        if not minimum or not maximum:\n            self.result.is_skipped(f\"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} was not provided\")\n            return\n        if not isinstance(minimum, int) or not isinstance(maximum, int):\n            self.result.is_error(f\"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} is not a valid value (integer)\")\n            return\n        if maximum < minimum:\n            self.result.is_error(f\"VerifyRoutingTableSize was not run as minimum {minimum} is greate than maximum {maximum}.\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n        total_routes = int(command_output[\"vrfs\"][\"default\"][\"totalRoutes\"])\n        if minimum <= total_routes <= maximum:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"routing-table has {total_routes} routes and not between min ({minimum}) and maximum ({maximum})\")\n
"},{"location":"api/tests.routing.generic/#anta.tests.routing.generic.VerifyRoutingTableSize.test","title":"test","text":"
test(\n    minimum: Optional[int] = None,\n    maximum: Optional[int] = None,\n) -> None\n

Run VerifyRoutingTableSize validation

Source code in anta/tests/routing/generic.py
@AntaTest.anta_test\ndef test(self, minimum: Optional[int] = None, maximum: Optional[int] = None) -> None:\n\"\"\"Run VerifyRoutingTableSize validation\"\"\"\n\n    if not minimum or not maximum:\n        self.result.is_skipped(f\"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} was not provided\")\n        return\n    if not isinstance(minimum, int) or not isinstance(maximum, int):\n        self.result.is_error(f\"VerifyRoutingTableSize was not run as either minimum {minimum} or maximum {maximum} is not a valid value (integer)\")\n        return\n    if maximum < minimum:\n        self.result.is_error(f\"VerifyRoutingTableSize was not run as minimum {minimum} is greate than maximum {maximum}.\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n    total_routes = int(command_output[\"vrfs\"][\"default\"][\"totalRoutes\"])\n    if minimum <= total_routes <= maximum:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"routing-table has {total_routes} routes and not between min ({minimum}) and maximum ({maximum})\")\n
"},{"location":"api/tests.routing.ospf/","title":"OSPF","text":""},{"location":"api/tests.routing.ospf/#anta-catalog-for-routing-ospf-tests","title":"ANTA catalog for routing-ospf tests","text":"

OSPF test functions

"},{"location":"api/tests.routing.ospf/#anta.tests.routing.ospf.VerifyOSPFNeighborCount","title":"VerifyOSPFNeighborCount","text":"

Bases: AntaTest

Verifies the number of OSPF neighbors in FULL state is the one we expect.

Parameters:

Name Type Description Default number int

The expected number of OSPF neighbors in FULL state.

required Source code in anta/tests/routing/ospf.py
class VerifyOSPFNeighborCount(AntaTest):\n\"\"\"\n    Verifies the number of OSPF neighbors in FULL state is the one we expect.\n\n    Args:\n        number (int): The expected number of OSPF neighbors in FULL state.\n    \"\"\"\n\n    name = \"VerifyOSPFNeighborCount\"\n    description = \"Verifies the number of OSPF neighbors in FULL state is the one we expect.\"\n    categories = [\"routing\", \"ospf\"]\n    commands = [AntaCommand(command=\"show ip ospf neighbor\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None) -> None:\n\"\"\"Run VerifyOSPFNeighborCount validation\"\"\"\n        if not (isinstance(number, int) and number >= 0):\n            self.result.is_skipped(f\"VerifyOSPFNeighborCount was not run as the number given '{number}' is not a valid value.\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        if (neighbor_count := _count_ospf_neighbor(command_output)) == 0:\n            self.result.is_skipped(\"no OSPF neighbor found\")\n            return\n\n        self.result.is_success()\n\n        if neighbor_count != number:\n            self.result.is_failure(f\"device has {neighbor_count} neighbors (expected {number})\")\n\n        not_full_neighbors = _get_not_full_ospf_neighbors(command_output)\n        print(not_full_neighbors)\n        if not_full_neighbors:\n            self.result.is_failure(f\"Some neighbors are not correctly configured: {not_full_neighbors}.\")\n
"},{"location":"api/tests.routing.ospf/#anta.tests.routing.ospf.VerifyOSPFNeighborCount.test","title":"test","text":"
test(number: Optional[int] = None) -> None\n

Run VerifyOSPFNeighborCount validation

Source code in anta/tests/routing/ospf.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None) -> None:\n\"\"\"Run VerifyOSPFNeighborCount validation\"\"\"\n    if not (isinstance(number, int) and number >= 0):\n        self.result.is_skipped(f\"VerifyOSPFNeighborCount was not run as the number given '{number}' is not a valid value.\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    if (neighbor_count := _count_ospf_neighbor(command_output)) == 0:\n        self.result.is_skipped(\"no OSPF neighbor found\")\n        return\n\n    self.result.is_success()\n\n    if neighbor_count != number:\n        self.result.is_failure(f\"device has {neighbor_count} neighbors (expected {number})\")\n\n    not_full_neighbors = _get_not_full_ospf_neighbors(command_output)\n    print(not_full_neighbors)\n    if not_full_neighbors:\n        self.result.is_failure(f\"Some neighbors are not correctly configured: {not_full_neighbors}.\")\n
"},{"location":"api/tests.routing.ospf/#anta.tests.routing.ospf.VerifyOSPFNeighborState","title":"VerifyOSPFNeighborState","text":"

Bases: AntaTest

Verifies all OSPF neighbors are in FULL state.

Source code in anta/tests/routing/ospf.py
class VerifyOSPFNeighborState(AntaTest):\n\"\"\"\n    Verifies all OSPF neighbors are in FULL state.\n    \"\"\"\n\n    name = \"VerifyOSPFNeighborState\"\n    description = \"Verifies all OSPF neighbors are in FULL state.\"\n    categories = [\"routing\", \"ospf\"]\n    commands = [AntaCommand(command=\"show ip ospf neighbor\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyOSPFNeighborState validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if _count_ospf_neighbor(command_output) == 0:\n            self.result.is_skipped(\"no OSPF neighbor found\")\n            return\n\n        self.result.is_success()\n\n        not_full_neighbors = _get_not_full_ospf_neighbors(command_output)\n        if not_full_neighbors:\n            self.result.is_failure(f\"Some neighbors are not correctly configured: {not_full_neighbors}.\")\n
"},{"location":"api/tests.routing.ospf/#anta.tests.routing.ospf.VerifyOSPFNeighborState.test","title":"test","text":"
test() -> None\n

Run VerifyOSPFNeighborState validation

Source code in anta/tests/routing/ospf.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyOSPFNeighborState validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if _count_ospf_neighbor(command_output) == 0:\n        self.result.is_skipped(\"no OSPF neighbor found\")\n        return\n\n    self.result.is_success()\n\n    not_full_neighbors = _get_not_full_ospf_neighbors(command_output)\n    if not_full_neighbors:\n        self.result.is_failure(f\"Some neighbors are not correctly configured: {not_full_neighbors}.\")\n
"},{"location":"api/tests.security/","title":"Security","text":""},{"location":"api/tests.security/#anta-catalog-for-security-tests","title":"ANTA catalog for security tests","text":"

Test functions related to the EOS various security settings

"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIHttpStatus","title":"VerifyAPIHttpStatus","text":"

Bases: AntaTest

Verifies if eAPI HTTP server is disabled globally.

Expected Results
  • success: The test will pass if eAPI HTTP server is disabled globally.
  • failure: The test will fail if eAPI HTTP server is NOT disabled globally.
Source code in anta/tests/security.py
class VerifyAPIHttpStatus(AntaTest):\n\"\"\"\n    Verifies if eAPI HTTP server is disabled globally.\n\n    Expected Results:\n        * success: The test will pass if eAPI HTTP server is disabled globally.\n        * failure: The test will fail if eAPI HTTP server is NOT disabled globally.\n    \"\"\"\n\n    name = \"VerifyAPIHttpStatus\"\n    description = \"Verifies if eAPI HTTP server is disabled globally.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management api http-commands\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyAPIHTTPStatus validation.\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"enabled\"] and not command_output[\"httpServer\"][\"running\"]:\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"eAPI HTTP server is enabled globally\")\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIHttpStatus.test","title":"test","text":"
test() -> None\n

Run VerifyAPIHTTPStatus validation.

Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyAPIHTTPStatus validation.\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"enabled\"] and not command_output[\"httpServer\"][\"running\"]:\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"eAPI HTTP server is enabled globally\")\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIHttpsSSL","title":"VerifyAPIHttpsSSL","text":"

Bases: AntaTest

Verifies if eAPI HTTPS server SSL profile is configured and valid.

Expected results
  • success: The test will pass if the eAPI HTTPS server SSL profile is configured and valid.
  • failure: The test will fail if the eAPI HTTPS server SSL profile is NOT configured, misconfigured or invalid.
  • skipped: The test will be skipped if the SSL profile is not provided.
Source code in anta/tests/security.py
class VerifyAPIHttpsSSL(AntaTest):\n\"\"\"\n    Verifies if eAPI HTTPS server SSL profile is configured and valid.\n\n    Expected results:\n        * success: The test will pass if the eAPI HTTPS server SSL profile is configured and valid.\n        * failure: The test will fail if the eAPI HTTPS server SSL profile is NOT configured, misconfigured or invalid.\n        * skipped: The test will be skipped if the SSL profile is not provided.\n    \"\"\"\n\n    name = \"VerifyAPIHttpsSSL\"\n    description = \"Verifies if eAPI HTTPS server SSL profile is configured and valid.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management api http-commands\")]\n\n    @AntaTest.anta_test\n    def test(self, profile: Optional[str] = None) -> None:\n\"\"\"\n        Run VerifyAPIHttpsSSL validation.\n\n        Args:\n            profile: SSL profile to verify.\n        \"\"\"\n        if not profile:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because profile was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        try:\n            if command_output[\"sslProfile\"][\"name\"] == profile and command_output[\"sslProfile\"][\"state\"] == \"valid\":\n                self.result.is_success()\n            else:\n                self.result.is_failure(f\"eAPI HTTPS server SSL profile ({profile}) is misconfigured or invalid\")\n\n        except KeyError:\n            self.result.is_failure(f\"eAPI HTTPS server SSL profile ({profile}) is not configured\")\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIHttpsSSL.test","title":"test","text":"
test(profile: Optional[str] = None) -> None\n

Run VerifyAPIHttpsSSL validation.

Parameters:

Name Type Description Default profile Optional[str]

SSL profile to verify.

None Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self, profile: Optional[str] = None) -> None:\n\"\"\"\n    Run VerifyAPIHttpsSSL validation.\n\n    Args:\n        profile: SSL profile to verify.\n    \"\"\"\n    if not profile:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because profile was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    try:\n        if command_output[\"sslProfile\"][\"name\"] == profile and command_output[\"sslProfile\"][\"state\"] == \"valid\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"eAPI HTTPS server SSL profile ({profile}) is misconfigured or invalid\")\n\n    except KeyError:\n        self.result.is_failure(f\"eAPI HTTPS server SSL profile ({profile}) is not configured\")\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIIPv4Acl","title":"VerifyAPIIPv4Acl","text":"

Bases: AntaTest

Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF.

Expected results
  • success: The test will pass if eAPI has the provided number of IPv4 ACL(s) in the specified VRF.
  • failure: The test will fail if eAPI has not the right number of IPv4 ACL(s) in the specified VRF.
  • skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
Source code in anta/tests/security.py
class VerifyAPIIPv4Acl(AntaTest):\n\"\"\"\n    Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF.\n\n    Expected results:\n        * success: The test will pass if eAPI has the provided number of IPv4 ACL(s) in the specified VRF.\n        * failure: The test will fail if eAPI has not the right number of IPv4 ACL(s) in the specified VRF.\n        * skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifyAPIIPv4Acl\"\n    description = \"Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management api http-commands ip access-list summary\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifyAPIIPv4Acl validation.\n\n        Args:\n            number: The number of expected IPv4 ACL(s).\n            vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.\n        \"\"\"\n        if not number or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        ipv4_acl_list = command_output[\"ipAclList\"][\"aclList\"]\n        ipv4_acl_number = len(ipv4_acl_list)\n        not_configured_acl_list = []\n\n        if ipv4_acl_number != number:\n            self.result.is_failure(f\"Expected {number} eAPI IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}\")\n            return\n\n        for ipv4_acl in ipv4_acl_list:\n            if vrf not in ipv4_acl[\"configuredVrfs\"] or vrf not in ipv4_acl[\"activeVrfs\"]:\n                not_configured_acl_list.append(ipv4_acl[\"name\"])\n\n        if not_configured_acl_list:\n            self.result.is_failure(f\"eAPI IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIIPv4Acl.test","title":"test","text":"
test(\n    number: Optional[int] = None, vrf: str = \"default\"\n) -> None\n

Run VerifyAPIIPv4Acl validation.

Parameters:

Name Type Description Default number Optional[int]

The number of expected IPv4 ACL(s).

None vrf str

The name of the VRF in which to check for eAPI. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifyAPIIPv4Acl validation.\n\n    Args:\n        number: The number of expected IPv4 ACL(s).\n        vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.\n    \"\"\"\n    if not number or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    ipv4_acl_list = command_output[\"ipAclList\"][\"aclList\"]\n    ipv4_acl_number = len(ipv4_acl_list)\n    not_configured_acl_list = []\n\n    if ipv4_acl_number != number:\n        self.result.is_failure(f\"Expected {number} eAPI IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}\")\n        return\n\n    for ipv4_acl in ipv4_acl_list:\n        if vrf not in ipv4_acl[\"configuredVrfs\"] or vrf not in ipv4_acl[\"activeVrfs\"]:\n            not_configured_acl_list.append(ipv4_acl[\"name\"])\n\n    if not_configured_acl_list:\n        self.result.is_failure(f\"eAPI IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIIPv6Acl","title":"VerifyAPIIPv6Acl","text":"

Bases: AntaTest

Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF.

Expected results
  • success: The test will pass if eAPI has the provided number of IPv6 ACL(s) in the specified VRF.
  • failure: The test will fail if eAPI has not the right number of IPv6 ACL(s) in the specified VRF.
  • skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
Source code in anta/tests/security.py
class VerifyAPIIPv6Acl(AntaTest):\n\"\"\"\n    Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF.\n\n    Expected results:\n        * success: The test will pass if eAPI has the provided number of IPv6 ACL(s) in the specified VRF.\n        * failure: The test will fail if eAPI has not the right number of IPv6 ACL(s) in the specified VRF.\n        * skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifyAPIIPv6Acl\"\n    description = \"Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management api http-commands ipv6 access-list summary\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifyAPIIPv6Acl validation.\n\n        Args:\n            number: The number of expected IPv6 ACL(s).\n            vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.\n        \"\"\"\n        if not number or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        ipv6_acl_list = command_output[\"ipv6AclList\"][\"aclList\"]\n        ipv6_acl_number = len(ipv6_acl_list)\n        not_configured_acl_list = []\n\n        if ipv6_acl_number != number:\n            self.result.is_failure(f\"Expected {number} eAPI IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}\")\n            return\n\n        for ipv6_acl in ipv6_acl_list:\n            if vrf not in ipv6_acl[\"configuredVrfs\"] or vrf not in ipv6_acl[\"activeVrfs\"]:\n                not_configured_acl_list.append(ipv6_acl[\"name\"])\n\n        if not_configured_acl_list:\n            self.result.is_failure(f\"eAPI IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyAPIIPv6Acl.test","title":"test","text":"
test(\n    number: Optional[int] = None, vrf: str = \"default\"\n) -> None\n

Run VerifyAPIIPv6Acl validation.

Parameters:

Name Type Description Default number Optional[int]

The number of expected IPv6 ACL(s).

None vrf str

The name of the VRF in which to check for eAPI. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifyAPIIPv6Acl validation.\n\n    Args:\n        number: The number of expected IPv6 ACL(s).\n        vrf: The name of the VRF in which to check for eAPI. Defaults to 'default'.\n    \"\"\"\n    if not number or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    ipv6_acl_list = command_output[\"ipv6AclList\"][\"aclList\"]\n    ipv6_acl_number = len(ipv6_acl_list)\n    not_configured_acl_list = []\n\n    if ipv6_acl_number != number:\n        self.result.is_failure(f\"Expected {number} eAPI IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}\")\n        return\n\n    for ipv6_acl in ipv6_acl_list:\n        if vrf not in ipv6_acl[\"configuredVrfs\"] or vrf not in ipv6_acl[\"activeVrfs\"]:\n            not_configured_acl_list.append(ipv6_acl[\"name\"])\n\n    if not_configured_acl_list:\n        self.result.is_failure(f\"eAPI IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifySSHIPv4Acl","title":"VerifySSHIPv4Acl","text":"

Bases: AntaTest

Verifies if the SSHD agent has the right number IPv4 ACL(s) configured for a specified VRF.

Expected results
  • success: The test will pass if the SSHD agent has the provided number of IPv4 ACL(s) in the specified VRF.
  • failure: The test will fail if the SSHD agent has not the right number of IPv4 ACL(s) in the specified VRF.
  • skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
Source code in anta/tests/security.py
class VerifySSHIPv4Acl(AntaTest):\n\"\"\"\n    Verifies if the SSHD agent has the right number IPv4 ACL(s) configured for a specified VRF.\n\n    Expected results:\n        * success: The test will pass if the SSHD agent has the provided number of IPv4 ACL(s) in the specified VRF.\n        * failure: The test will fail if the SSHD agent has not the right number of IPv4 ACL(s) in the specified VRF.\n        * skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifySSHIPv4Acl\"\n    description = \"Verifies if the SSHD agent has IPv4 ACL(s) configured.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management ssh ip access-list summary\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifySSHIPv4Acl validation.\n\n        Args:\n            number: The number of expected IPv4 ACL(s).\n            vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.\n        \"\"\"\n        if not number or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        ipv4_acl_list = command_output[\"ipAclList\"][\"aclList\"]\n        ipv4_acl_number = len(ipv4_acl_list)\n        not_configured_acl_list = []\n\n        if ipv4_acl_number != number:\n            self.result.is_failure(f\"Expected {number} SSH IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}\")\n            return\n\n        for ipv4_acl in ipv4_acl_list:\n            if vrf not in ipv4_acl[\"configuredVrfs\"] or vrf not in ipv4_acl[\"activeVrfs\"]:\n                not_configured_acl_list.append(ipv4_acl[\"name\"])\n\n        if not_configured_acl_list:\n            self.result.is_failure(f\"SSH IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifySSHIPv4Acl.test","title":"test","text":"
test(\n    number: Optional[int] = None, vrf: str = \"default\"\n) -> None\n

Run VerifySSHIPv4Acl validation.

Parameters:

Name Type Description Default number Optional[int]

The number of expected IPv4 ACL(s).

None vrf str

The name of the VRF in which to check for the SSHD agent. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifySSHIPv4Acl validation.\n\n    Args:\n        number: The number of expected IPv4 ACL(s).\n        vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.\n    \"\"\"\n    if not number or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    ipv4_acl_list = command_output[\"ipAclList\"][\"aclList\"]\n    ipv4_acl_number = len(ipv4_acl_list)\n    not_configured_acl_list = []\n\n    if ipv4_acl_number != number:\n        self.result.is_failure(f\"Expected {number} SSH IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}\")\n        return\n\n    for ipv4_acl in ipv4_acl_list:\n        if vrf not in ipv4_acl[\"configuredVrfs\"] or vrf not in ipv4_acl[\"activeVrfs\"]:\n            not_configured_acl_list.append(ipv4_acl[\"name\"])\n\n    if not_configured_acl_list:\n        self.result.is_failure(f\"SSH IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifySSHIPv6Acl","title":"VerifySSHIPv6Acl","text":"

Bases: AntaTest

Verifies if the SSHD agent has the right number IPv6 ACL(s) configured for a specified VRF.

Expected results
  • success: The test will pass if the SSHD agent has the provided number of IPv6 ACL(s) in the specified VRF.
  • failure: The test will fail if the SSHD agent has not the right number of IPv6 ACL(s) in the specified VRF.
  • skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
Source code in anta/tests/security.py
class VerifySSHIPv6Acl(AntaTest):\n\"\"\"\n    Verifies if the SSHD agent has the right number IPv6 ACL(s) configured for a specified VRF.\n\n    Expected results:\n        * success: The test will pass if the SSHD agent has the provided number of IPv6 ACL(s) in the specified VRF.\n        * failure: The test will fail if the SSHD agent has not the right number of IPv6 ACL(s) in the specified VRF.\n        * skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifySSHIPv6Acl\"\n    description = \"Verifies if the SSHD agent has IPv6 ACL(s) configured.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management ssh ipv6 access-list summary\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifySSHIPv6Acl validation.\n\n        Args:\n            number: The number of expected IPv6 ACL(s).\n            vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.\n        \"\"\"\n        if not number or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        ipv6_acl_list = command_output[\"ipv6AclList\"][\"aclList\"]\n        ipv6_acl_number = len(ipv6_acl_list)\n        not_configured_acl_list = []\n\n        if ipv6_acl_number != number:\n            self.result.is_failure(f\"Expected {number} SSH IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}\")\n            return\n\n        for ipv6_acl in ipv6_acl_list:\n            if vrf not in ipv6_acl[\"configuredVrfs\"] or vrf not in ipv6_acl[\"activeVrfs\"]:\n                not_configured_acl_list.append(ipv6_acl[\"name\"])\n\n        if not_configured_acl_list:\n            self.result.is_failure(f\"SSH IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifySSHIPv6Acl.test","title":"test","text":"
test(\n    number: Optional[int] = None, vrf: str = \"default\"\n) -> None\n

Run VerifySSHIPv6Acl validation.

Parameters:

Name Type Description Default number Optional[int]

The number of expected IPv6 ACL(s).

None vrf str

The name of the VRF in which to check for the SSHD agent. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifySSHIPv6Acl validation.\n\n    Args:\n        number: The number of expected IPv6 ACL(s).\n        vrf: The name of the VRF in which to check for the SSHD agent. Defaults to 'default'.\n    \"\"\"\n    if not number or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    ipv6_acl_list = command_output[\"ipv6AclList\"][\"aclList\"]\n    ipv6_acl_number = len(ipv6_acl_list)\n    not_configured_acl_list = []\n\n    if ipv6_acl_number != number:\n        self.result.is_failure(f\"Expected {number} SSH IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}\")\n        return\n\n    for ipv6_acl in ipv6_acl_list:\n        if vrf not in ipv6_acl[\"configuredVrfs\"] or vrf not in ipv6_acl[\"activeVrfs\"]:\n            not_configured_acl_list.append(ipv6_acl[\"name\"])\n\n    if not_configured_acl_list:\n        self.result.is_failure(f\"SSH IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.security/#anta.tests.security.VerifySSHStatus","title":"VerifySSHStatus","text":"

Bases: AntaTest

Verifies if the SSHD agent is disabled in the default VRF.

Expected Results
  • success: The test will pass if the SSHD agent is disabled in the default VRF.
  • failure: The test will fail if the SSHD agent is NOT disabled in the default VRF.
Source code in anta/tests/security.py
class VerifySSHStatus(AntaTest):\n\"\"\"\n    Verifies if the SSHD agent is disabled in the default VRF.\n\n    Expected Results:\n        * success: The test will pass if the SSHD agent is disabled in the default VRF.\n        * failure: The test will fail if the SSHD agent is NOT disabled in the default VRF.\n    \"\"\"\n\n    name = \"VerifySSHStatus\"\n    description = \"Verifies if the SSHD agent is disabled in the default VRF.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management ssh\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifySSHStatus validation.\n        \"\"\"\n\n        command_output = self.instance_commands[0].text_output\n\n        line = [line for line in command_output.split(\"\\n\") if line.startswith(\"SSHD status\")][0]\n        status = line.split(\"is \")[1]\n\n        if status == \"disabled\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(line)\n
"},{"location":"api/tests.security/#anta.tests.security.VerifySSHStatus.test","title":"test","text":"
test() -> None\n

Run VerifySSHStatus validation.

Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifySSHStatus validation.\n    \"\"\"\n\n    command_output = self.instance_commands[0].text_output\n\n    line = [line for line in command_output.split(\"\\n\") if line.startswith(\"SSHD status\")][0]\n    status = line.split(\"is \")[1]\n\n    if status == \"disabled\":\n        self.result.is_success()\n    else:\n        self.result.is_failure(line)\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyTelnetStatus","title":"VerifyTelnetStatus","text":"

Bases: AntaTest

Verifies if Telnet is disabled in the default VRF.

Expected Results
  • success: The test will pass if Telnet is disabled in the default VRF.
  • failure: The test will fail if Telnet is NOT disabled in the default VRF.
Source code in anta/tests/security.py
class VerifyTelnetStatus(AntaTest):\n\"\"\"\n    Verifies if Telnet is disabled in the default VRF.\n\n    Expected Results:\n        * success: The test will pass if Telnet is disabled in the default VRF.\n        * failure: The test will fail if Telnet is NOT disabled in the default VRF.\n    \"\"\"\n\n    name = \"VerifyTelnetStatus\"\n    description = \"Verifies if Telnet is disabled in the default VRF.\"\n    categories = [\"security\"]\n    commands = [AntaCommand(command=\"show management telnet\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyTelnetStatus validation.\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"serverState\"] == \"disabled\":\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"Telnet status for Default VRF is enabled\")\n
"},{"location":"api/tests.security/#anta.tests.security.VerifyTelnetStatus.test","title":"test","text":"
test() -> None\n

Run VerifyTelnetStatus validation.

Source code in anta/tests/security.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyTelnetStatus validation.\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"serverState\"] == \"disabled\":\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"Telnet status for Default VRF is enabled\")\n
"},{"location":"api/tests.snmp/","title":"SNMP","text":""},{"location":"api/tests.snmp/#anta-catalog-for-snmp-tests","title":"ANTA catalog for SNMP tests","text":"

Test functions related to the EOS various SNMP settings

"},{"location":"api/tests.snmp/#anta.tests.snmp.VerifySnmpIPv4Acl","title":"VerifySnmpIPv4Acl","text":"

Bases: AntaTest

Verifies if the SNMP agent has the right number IPv4 ACL(s) configured for a specified VRF.

Expected results
  • success: The test will pass if the SNMP agent has the provided number of IPv4 ACL(s) in the specified VRF.
  • failure: The test will fail if the SNMP agent has not the right number of IPv4 ACL(s) in the specified VRF.
  • skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.
Source code in anta/tests/snmp.py
class VerifySnmpIPv4Acl(AntaTest):\n\"\"\"\n    Verifies if the SNMP agent has the right number IPv4 ACL(s) configured for a specified VRF.\n\n    Expected results:\n        * success: The test will pass if the SNMP agent has the provided number of IPv4 ACL(s) in the specified VRF.\n        * failure: The test will fail if the SNMP agent has not the right number of IPv4 ACL(s) in the specified VRF.\n        * skipped: The test will be skipped if the number of IPv4 ACL(s) or VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifySnmpIPv4Acl\"\n    description = \"Verifies if the SNMP agent has IPv4 ACL(s) configured.\"\n    categories = [\"snmp\"]\n    commands = [AntaCommand(command=\"show snmp ipv4 access-list summary\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifySnmpIPv4Acl validation.\n\n        Args:\n            number: The number of expected IPv4 ACL(s).\n            vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.\n        \"\"\"\n        if not number or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        ipv4_acl_list = command_output[\"ipAclList\"][\"aclList\"]\n        ipv4_acl_number = len(ipv4_acl_list)\n        not_configured_acl_list = []\n\n        if ipv4_acl_number != number:\n            self.result.is_failure(f\"Expected {number} SNMP IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}\")\n            return\n\n        for ipv4_acl in ipv4_acl_list:\n            if vrf not in ipv4_acl[\"configuredVrfs\"] or vrf not in ipv4_acl[\"activeVrfs\"]:\n                not_configured_acl_list.append(ipv4_acl[\"name\"])\n\n        if not_configured_acl_list:\n            self.result.is_failure(f\"SNMP IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.snmp/#anta.tests.snmp.VerifySnmpIPv4Acl.test","title":"test","text":"
test(\n    number: Optional[int] = None, vrf: str = \"default\"\n) -> None\n

Run VerifySnmpIPv4Acl validation.

Parameters:

Name Type Description Default number Optional[int]

The number of expected IPv4 ACL(s).

None vrf str

The name of the VRF in which to check for the SNMP agent. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/snmp.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifySnmpIPv4Acl validation.\n\n    Args:\n        number: The number of expected IPv4 ACL(s).\n        vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.\n    \"\"\"\n    if not number or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    ipv4_acl_list = command_output[\"ipAclList\"][\"aclList\"]\n    ipv4_acl_number = len(ipv4_acl_list)\n    not_configured_acl_list = []\n\n    if ipv4_acl_number != number:\n        self.result.is_failure(f\"Expected {number} SNMP IPv4 ACL(s) in vrf {vrf} but got {ipv4_acl_number}\")\n        return\n\n    for ipv4_acl in ipv4_acl_list:\n        if vrf not in ipv4_acl[\"configuredVrfs\"] or vrf not in ipv4_acl[\"activeVrfs\"]:\n            not_configured_acl_list.append(ipv4_acl[\"name\"])\n\n    if not_configured_acl_list:\n        self.result.is_failure(f\"SNMP IPv4 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.snmp/#anta.tests.snmp.VerifySnmpIPv6Acl","title":"VerifySnmpIPv6Acl","text":"

Bases: AntaTest

Verifies if the SNMP agent has the right number IPv6 ACL(s) configured for a specified VRF.

Expected results
  • success: The test will pass if the SNMP agent has the provided number of IPv6 ACL(s) in the specified VRF.
  • failure: The test will fail if the SNMP agent has not the right number of IPv6 ACL(s) in the specified VRF.
  • skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
Source code in anta/tests/snmp.py
class VerifySnmpIPv6Acl(AntaTest):\n\"\"\"\n    Verifies if the SNMP agent has the right number IPv6 ACL(s) configured for a specified VRF.\n\n    Expected results:\n        * success: The test will pass if the SNMP agent has the provided number of IPv6 ACL(s) in the specified VRF.\n        * failure: The test will fail if the SNMP agent has not the right number of IPv6 ACL(s) in the specified VRF.\n        * skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifySnmpIPv6Acl\"\n    description = \"Verifies if the SNMP agent has IPv6 ACL(s) configured.\"\n    categories = [\"snmp\"]\n    commands = [AntaCommand(command=\"show snmp ipv6 access-list summary\")]\n\n    @AntaTest.anta_test\n    def test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifySnmpIPv6Acl validation.\n\n        Args:\n            number: The number of expected IPv6 ACL(s).\n            vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.\n        \"\"\"\n        if not number or not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        ipv6_acl_list = command_output[\"ipv6AclList\"][\"aclList\"]\n        ipv6_acl_number = len(ipv6_acl_list)\n        not_configured_acl_list = []\n\n        if ipv6_acl_number != number:\n            self.result.is_failure(f\"Expected {number} SNMP IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}\")\n            return\n\n        for ipv6_acl in ipv6_acl_list:\n            if vrf not in ipv6_acl[\"configuredVrfs\"] or vrf not in ipv6_acl[\"activeVrfs\"]:\n                not_configured_acl_list.append(ipv6_acl[\"name\"])\n\n        if not_configured_acl_list:\n            self.result.is_failure(f\"SNMP IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.snmp/#anta.tests.snmp.VerifySnmpIPv6Acl.test","title":"test","text":"
test(\n    number: Optional[int] = None, vrf: str = \"default\"\n) -> None\n

Run VerifySnmpIPv6Acl validation.

Parameters:

Name Type Description Default number Optional[int]

The number of expected IPv6 ACL(s).

None vrf str

The name of the VRF in which to check for the SNMP agent. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/snmp.py
@AntaTest.anta_test\ndef test(self, number: Optional[int] = None, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifySnmpIPv6Acl validation.\n\n    Args:\n        number: The number of expected IPv6 ACL(s).\n        vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.\n    \"\"\"\n    if not number or not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because number or vrf was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    ipv6_acl_list = command_output[\"ipv6AclList\"][\"aclList\"]\n    ipv6_acl_number = len(ipv6_acl_list)\n    not_configured_acl_list = []\n\n    if ipv6_acl_number != number:\n        self.result.is_failure(f\"Expected {number} SNMP IPv6 ACL(s) in vrf {vrf} but got {ipv6_acl_number}\")\n        return\n\n    for ipv6_acl in ipv6_acl_list:\n        if vrf not in ipv6_acl[\"configuredVrfs\"] or vrf not in ipv6_acl[\"activeVrfs\"]:\n            not_configured_acl_list.append(ipv6_acl[\"name\"])\n\n    if not_configured_acl_list:\n        self.result.is_failure(f\"SNMP IPv6 ACL(s) not configured or active in vrf {vrf}: {not_configured_acl_list}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.snmp/#anta.tests.snmp.VerifySnmpStatus","title":"VerifySnmpStatus","text":"

Bases: AntaTest

Verifies whether the SNMP agent is enabled in a specified VRF.

Expected Results
  • success: The test will pass if the SNMP agent is enabled in the specified VRF.
  • failure: The test will fail if the SNMP agent is disabled in the specified VRF.
  • skipped: The test will be skipped if the VRF parameter is not provided.
Source code in anta/tests/snmp.py
class VerifySnmpStatus(AntaTest):\n\"\"\"\n    Verifies whether the SNMP agent is enabled in a specified VRF.\n\n    Expected Results:\n        * success: The test will pass if the SNMP agent is enabled in the specified VRF.\n        * failure: The test will fail if the SNMP agent is disabled in the specified VRF.\n        * skipped: The test will be skipped if the VRF parameter is not provided.\n    \"\"\"\n\n    name = \"VerifySnmpStatus\"\n    description = \"Verifies if the SNMP agent is enabled.\"\n    categories = [\"snmp\"]\n    commands = [AntaCommand(command=\"show snmp\")]\n\n    @AntaTest.anta_test\n    def test(self, vrf: str = \"default\") -> None:\n\"\"\"\n        Run VerifySnmpStatus validation.\n\n        Args:\n            vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.\n        \"\"\"\n        if not vrf:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because vrf was not supplied\")\n        else:\n            command_output = self.instance_commands[0].json_output\n\n            if command_output[\"enabled\"] and vrf in command_output[\"vrfs\"][\"snmpVrfs\"]:\n                self.result.is_success()\n            else:\n                self.result.is_failure(f\"SNMP agent disabled in vrf {vrf}\")\n
"},{"location":"api/tests.snmp/#anta.tests.snmp.VerifySnmpStatus.test","title":"test","text":"
test(vrf: str = 'default') -> None\n

Run VerifySnmpStatus validation.

Parameters:

Name Type Description Default vrf str

The name of the VRF in which to check for the SNMP agent. Defaults to \u2018default\u2019.

'default' Source code in anta/tests/snmp.py
@AntaTest.anta_test\ndef test(self, vrf: str = \"default\") -> None:\n\"\"\"\n    Run VerifySnmpStatus validation.\n\n    Args:\n        vrf: The name of the VRF in which to check for the SNMP agent. Defaults to 'default'.\n    \"\"\"\n    if not vrf:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because vrf was not supplied\")\n    else:\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"enabled\"] and vrf in command_output[\"vrfs\"][\"snmpVrfs\"]:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"SNMP agent disabled in vrf {vrf}\")\n
"},{"location":"api/tests.software/","title":"Software","text":""},{"location":"api/tests.software/#anta-catalog-for-software-tests","title":"ANTA catalog for software tests","text":"

Test functions related to the EOS software

"},{"location":"api/tests.software/#anta.tests.software.VerifyEOSExtensions","title":"VerifyEOSExtensions","text":"

Bases: AntaTest

Verifies all EOS extensions installed on the device are enabled for boot persistence.

Source code in anta/tests/software.py
class VerifyEOSExtensions(AntaTest):\n\"\"\"\n    Verifies all EOS extensions installed on the device are enabled for boot persistence.\n    \"\"\"\n\n    name = \"VerifyEOSExtensions\"\n    description = \"Verifies all EOS extensions installed on the device are enabled for boot persistence.\"\n    categories = [\"software\"]\n    commands = [AntaCommand(command=\"show extensions\"), AntaCommand(command=\"show boot-extensions\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyEOSExtensions validation\"\"\"\n\n        boot_extensions = []\n\n        show_extensions_command_output = self.instance_commands[0].json_output\n        show_boot_extensions_command_output = self.instance_commands[1].json_output\n\n        installed_extensions = [\n            extension for extension, extension_data in show_extensions_command_output[\"extensions\"].items() if extension_data[\"status\"] == \"installed\"\n        ]\n\n        for extension in show_boot_extensions_command_output[\"extensions\"]:\n            extension = extension.strip(\"\\n\")\n            if extension != \"\":\n                boot_extensions.append(extension)\n\n        installed_extensions.sort()\n        boot_extensions.sort()\n        if installed_extensions == boot_extensions:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}\")\n
"},{"location":"api/tests.software/#anta.tests.software.VerifyEOSExtensions.test","title":"test","text":"
test() -> None\n

Run VerifyEOSExtensions validation

Source code in anta/tests/software.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyEOSExtensions validation\"\"\"\n\n    boot_extensions = []\n\n    show_extensions_command_output = self.instance_commands[0].json_output\n    show_boot_extensions_command_output = self.instance_commands[1].json_output\n\n    installed_extensions = [\n        extension for extension, extension_data in show_extensions_command_output[\"extensions\"].items() if extension_data[\"status\"] == \"installed\"\n    ]\n\n    for extension in show_boot_extensions_command_output[\"extensions\"]:\n        extension = extension.strip(\"\\n\")\n        if extension != \"\":\n            boot_extensions.append(extension)\n\n    installed_extensions.sort()\n    boot_extensions.sort()\n    if installed_extensions == boot_extensions:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}\")\n
"},{"location":"api/tests.software/#anta.tests.software.VerifyEOSVersion","title":"VerifyEOSVersion","text":"

Bases: AntaTest

Verifies the device is running one of the allowed EOS version.

Source code in anta/tests/software.py
class VerifyEOSVersion(AntaTest):\n\"\"\"\n    Verifies the device is running one of the allowed EOS version.\n    \"\"\"\n\n    name = \"VerifyEOSVersion\"\n    description = \"Verifies the device is running one of the allowed EOS version.\"\n    categories = [\"software\"]\n    commands = [AntaCommand(command=\"show version\")]\n\n    @AntaTest.anta_test\n    def test(self, versions: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyEOSVersion validation\n\n        Args:\n            versions: List of allowed EOS versions.\n        \"\"\"\n        if not versions:\n            self.result.is_skipped(\"VerifyEOSVersion was not run as no versions were given\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        if command_output[\"version\"] in versions:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f'device is running version {command_output[\"version\"]} not in expected versions: {versions}')\n
"},{"location":"api/tests.software/#anta.tests.software.VerifyEOSVersion.test","title":"test","text":"
test(versions: Optional[List[str]] = None) -> None\n

Run VerifyEOSVersion validation

Parameters:

Name Type Description Default versions Optional[List[str]]

List of allowed EOS versions.

None Source code in anta/tests/software.py
@AntaTest.anta_test\ndef test(self, versions: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyEOSVersion validation\n\n    Args:\n        versions: List of allowed EOS versions.\n    \"\"\"\n    if not versions:\n        self.result.is_skipped(\"VerifyEOSVersion was not run as no versions were given\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    if command_output[\"version\"] in versions:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f'device is running version {command_output[\"version\"]} not in expected versions: {versions}')\n
"},{"location":"api/tests.software/#anta.tests.software.VerifyTerminAttrVersion","title":"VerifyTerminAttrVersion","text":"

Bases: AntaTest

Verifies the device is running one of the allowed TerminAttr version.

Source code in anta/tests/software.py
class VerifyTerminAttrVersion(AntaTest):\n\"\"\"\n    Verifies the device is running one of the allowed TerminAttr version.\n    \"\"\"\n\n    name = \"VerifyTerminAttrVersion\"\n    description = \"Verifies the device is running one of the allowed TerminAttr version.\"\n    categories = [\"software\"]\n    commands = [AntaCommand(command=\"show version detail\")]\n\n    @AntaTest.anta_test\n    def test(self, versions: Optional[List[str]] = None) -> None:\n\"\"\"\n        Run VerifyTerminAttrVersion validation\n\n        Args:\n            versions: List of allowed TerminAttr versions.\n        \"\"\"\n\n        if not versions:\n            self.result.is_skipped(\"VerifyTerminAttrVersion was not run as no versions were given\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        command_output_data = command_output[\"details\"][\"packages\"][\"TerminAttr-core\"][\"version\"]\n        if command_output_data in versions:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"device is running TerminAttr version {command_output_data} and is not in the allowed list: {versions}\")\n
"},{"location":"api/tests.software/#anta.tests.software.VerifyTerminAttrVersion.test","title":"test","text":"
test(versions: Optional[List[str]] = None) -> None\n

Run VerifyTerminAttrVersion validation

Parameters:

Name Type Description Default versions Optional[List[str]]

List of allowed TerminAttr versions.

None Source code in anta/tests/software.py
@AntaTest.anta_test\ndef test(self, versions: Optional[List[str]] = None) -> None:\n\"\"\"\n    Run VerifyTerminAttrVersion validation\n\n    Args:\n        versions: List of allowed TerminAttr versions.\n    \"\"\"\n\n    if not versions:\n        self.result.is_skipped(\"VerifyTerminAttrVersion was not run as no versions were given\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    command_output_data = command_output[\"details\"][\"packages\"][\"TerminAttr-core\"][\"version\"]\n    if command_output_data in versions:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"device is running TerminAttr version {command_output_data} and is not in the allowed list: {versions}\")\n
"},{"location":"api/tests.stp/","title":"STP","text":""},{"location":"api/tests.stp/#anta-catalog-for-stp-tests","title":"ANTA catalog for STP tests","text":"

Test functions related to various Spanning Tree Protocol (STP) settings

"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPBlockedPorts","title":"VerifySTPBlockedPorts","text":"

Bases: AntaTest

Verifies there is no STP blocked ports.

Expected Results
  • success: The test will pass if there are NO ports blocked by STP.
  • failure: The test will fail if there are ports blocked by STP.
Source code in anta/tests/stp.py
class VerifySTPBlockedPorts(AntaTest):\n\"\"\"\n    Verifies there is no STP blocked ports.\n\n    Expected Results:\n        * success: The test will pass if there are NO ports blocked by STP.\n        * failure: The test will fail if there are ports blocked by STP.\n    \"\"\"\n\n    name = \"VerifySTPBlockedPorts\"\n    description = \"Verifies there is no STP blocked ports.\"\n    categories = [\"stp\"]\n    commands = [AntaCommand(command=\"show spanning-tree blockedports\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifySTPBlockedPorts validation\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if not (stp_instances := command_output[\"spanningTreeInstances\"]):\n            self.result.is_success()\n        else:\n            for key, value in stp_instances.items():\n                stp_instances[key] = value.pop(\"spanningTreeBlockedPorts\")\n            self.result.is_failure(f\"The following ports are blocked by STP: {stp_instances}\")\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPBlockedPorts.test","title":"test","text":"
test() -> None\n

Run VerifySTPBlockedPorts validation

Source code in anta/tests/stp.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifySTPBlockedPorts validation\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if not (stp_instances := command_output[\"spanningTreeInstances\"]):\n        self.result.is_success()\n    else:\n        for key, value in stp_instances.items():\n            stp_instances[key] = value.pop(\"spanningTreeBlockedPorts\")\n        self.result.is_failure(f\"The following ports are blocked by STP: {stp_instances}\")\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPCounters","title":"VerifySTPCounters","text":"

Bases: AntaTest

Verifies there is no errors in STP BPDU packets.

Expected Results
  • success: The test will pass if there are NO STP BPDU packet errors under all interfaces participating in STP.
  • failure: The test will fail if there are STP BPDU packet errors on one or many interface(s).
Source code in anta/tests/stp.py
class VerifySTPCounters(AntaTest):\n\"\"\"\n    Verifies there is no errors in STP BPDU packets.\n\n    Expected Results:\n        * success: The test will pass if there are NO STP BPDU packet errors under all interfaces participating in STP.\n        * failure: The test will fail if there are STP BPDU packet errors on one or many interface(s).\n    \"\"\"\n\n    name = \"VerifySTPCounters\"\n    description = \"Verifies there is no errors in STP BPDU packets.\"\n    categories = [\"stp\"]\n    commands = [AntaCommand(command=\"show spanning-tree counters\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifySTPBlockedPorts validation\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        interfaces_with_errors = [\n            interface for interface, counters in command_output[\"interfaces\"].items() if counters[\"bpduTaggedError\"] or counters[\"bpduOtherError\"] != 0\n        ]\n\n        if interfaces_with_errors:\n            self.result.is_failure(f\"The following interfaces have STP BPDU packet errors: {interfaces_with_errors}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPCounters.test","title":"test","text":"
test() -> None\n

Run VerifySTPBlockedPorts validation

Source code in anta/tests/stp.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifySTPBlockedPorts validation\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    interfaces_with_errors = [\n        interface for interface, counters in command_output[\"interfaces\"].items() if counters[\"bpduTaggedError\"] or counters[\"bpduOtherError\"] != 0\n    ]\n\n    if interfaces_with_errors:\n        self.result.is_failure(f\"The following interfaces have STP BPDU packet errors: {interfaces_with_errors}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPForwardingPorts","title":"VerifySTPForwardingPorts","text":"

Bases: AntaTest

Verifies that all interfaces are in a forwarding state for a provided list of VLAN(s).

Expected Results
  • success: The test will pass if all interfaces are in a forwarding state for the specified VLAN(s).
  • failure: The test will fail if one or many interfaces are NOT in a forwarding state in the specified VLAN(s).
  • error: The test will give an error if a list of VLAN(s) is not provided as template_params.
Source code in anta/tests/stp.py
class VerifySTPForwardingPorts(AntaTest):\n\"\"\"\n    Verifies that all interfaces are in a forwarding state for a provided list of VLAN(s).\n\n    Expected Results:\n        * success: The test will pass if all interfaces are in a forwarding state for the specified VLAN(s).\n        * failure: The test will fail if one or many interfaces are NOT in a forwarding state in the specified VLAN(s).\n        * error: The test will give an error if a list of VLAN(s) is not provided as template_params.\n    \"\"\"\n\n    name = \"VerifySTPForwardingPorts\"\n    description = \"Verifies that all interfaces are forwarding for a provided list of VLAN(s).\"\n    categories = [\"stp\"]\n    template = AntaTemplate(template=\"show spanning-tree topology vlan {vlan} status\")\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifySTPForwardingPorts validation.\n        \"\"\"\n\n        self.result.is_success()\n\n        for command in self.instance_commands:\n            if command.params and \"vlan\" in command.params:\n                vlan_id = command.params[\"vlan\"]\n\n            if not (topologies := get_value(command.json_output, \"topologies\")):\n                self.result.is_failure(f\"STP instance for VLAN {vlan_id} is not configured\")\n\n            else:\n                for value in topologies.values():\n                    if int(vlan_id) in value[\"vlans\"]:\n                        interfaces_not_forwarding = [interface for interface, state in value[\"interfaces\"].items() if state[\"state\"] != \"forwarding\"]\n\n                if interfaces_not_forwarding:\n                    self.result.is_failure(f\"The following interface(s) are not in a forwarding state for VLAN {vlan_id}: {interfaces_not_forwarding}\")\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPForwardingPorts.test","title":"test","text":"
test() -> None\n

Run VerifySTPForwardingPorts validation.

Source code in anta/tests/stp.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifySTPForwardingPorts validation.\n    \"\"\"\n\n    self.result.is_success()\n\n    for command in self.instance_commands:\n        if command.params and \"vlan\" in command.params:\n            vlan_id = command.params[\"vlan\"]\n\n        if not (topologies := get_value(command.json_output, \"topologies\")):\n            self.result.is_failure(f\"STP instance for VLAN {vlan_id} is not configured\")\n\n        else:\n            for value in topologies.values():\n                if int(vlan_id) in value[\"vlans\"]:\n                    interfaces_not_forwarding = [interface for interface, state in value[\"interfaces\"].items() if state[\"state\"] != \"forwarding\"]\n\n            if interfaces_not_forwarding:\n                self.result.is_failure(f\"The following interface(s) are not in a forwarding state for VLAN {vlan_id}: {interfaces_not_forwarding}\")\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPMode","title":"VerifySTPMode","text":"

Bases: AntaTest

Verifies the configured STP mode for a provided list of VLAN(s).

Expected Results
  • success: The test will pass if the STP mode is configured properly in the specified VLAN(s).
  • failure: The test will fail if the STP mode is NOT configured properly for one or more specified VLAN(s).
  • skipped: The test will be skipped if the STP mode is not provided.
  • error: The test will give an error if a list of VLAN(s) is not provided as template_params.
Source code in anta/tests/stp.py
class VerifySTPMode(AntaTest):\n\"\"\"\n    Verifies the configured STP mode for a provided list of VLAN(s).\n\n    Expected Results:\n        * success: The test will pass if the STP mode is configured properly in the specified VLAN(s).\n        * failure: The test will fail if the STP mode is NOT configured properly for one or more specified VLAN(s).\n        * skipped: The test will be skipped if the STP mode is not provided.\n        * error: The test will give an error if a list of VLAN(s) is not provided as template_params.\n    \"\"\"\n\n    name = \"VerifySTPMode\"\n    description = \"Verifies the configured STP mode for a provided list of VLAN(s).\"\n    categories = [\"stp\"]\n    template = AntaTemplate(template=\"show spanning-tree vlan {vlan}\")\n\n    @staticmethod\n    def _check_stp_mode(mode: str) -> None:\n\"\"\"\n        Verifies if the provided STP mode is compatible with Arista EOS devices.\n\n        Args:\n            mode: The STP mode to verify.\n        \"\"\"\n        stp_modes = [\"mstp\", \"rstp\", \"rapidPvst\"]\n\n        if mode not in stp_modes:\n            raise ValueError(f\"Wrong STP mode provided. Valid modes are: {stp_modes}\")\n\n    @AntaTest.anta_test\n    def test(self, mode: str = \"mstp\") -> None:\n\"\"\"\n        Run VerifySTPVersion validation.\n\n        Args:\n            mode: STP mode to verify. Defaults to 'mstp'.\n        \"\"\"\n        if not mode:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because mode was not supplied\")\n            return\n\n        self._check_stp_mode(mode)\n\n        self.result.is_success()\n\n        for command in self.instance_commands:\n            if command.params and \"vlan\" in command.params:\n                vlan_id = command.params[\"vlan\"]\n            if not (stp_mode := get_value(command.json_output, f\"spanningTreeVlanInstances.{vlan_id}.spanningTreeVlanInstance.protocol\")):\n                self.result.is_failure(f\"STP mode '{mode}' not configured for VLAN {vlan_id}\")\n\n            elif stp_mode != mode:\n                self.result.is_failure(f\"Wrong STP mode configured for VLAN {vlan_id}\")\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPMode.test","title":"test","text":"
test(mode: str = 'mstp') -> None\n

Run VerifySTPVersion validation.

Parameters:

Name Type Description Default mode str

STP mode to verify. Defaults to \u2018mstp\u2019.

'mstp' Source code in anta/tests/stp.py
@AntaTest.anta_test\ndef test(self, mode: str = \"mstp\") -> None:\n\"\"\"\n    Run VerifySTPVersion validation.\n\n    Args:\n        mode: STP mode to verify. Defaults to 'mstp'.\n    \"\"\"\n    if not mode:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because mode was not supplied\")\n        return\n\n    self._check_stp_mode(mode)\n\n    self.result.is_success()\n\n    for command in self.instance_commands:\n        if command.params and \"vlan\" in command.params:\n            vlan_id = command.params[\"vlan\"]\n        if not (stp_mode := get_value(command.json_output, f\"spanningTreeVlanInstances.{vlan_id}.spanningTreeVlanInstance.protocol\")):\n            self.result.is_failure(f\"STP mode '{mode}' not configured for VLAN {vlan_id}\")\n\n        elif stp_mode != mode:\n            self.result.is_failure(f\"Wrong STP mode configured for VLAN {vlan_id}\")\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPRootPriority","title":"VerifySTPRootPriority","text":"

Bases: AntaTest

Verifies the STP root priority for a provided list of VLAN or MST instance ID(s).

Expected Results
  • success: The test will pass if the STP root priority is configured properly for the specified VLAN or MST instance ID(s).
  • failure: The test will fail if the STP root priority is NOT configured properly for the specified VLAN or MST instance ID(s).
  • skipped: The test will be skipped if the STP root priority is not provided.
Source code in anta/tests/stp.py
class VerifySTPRootPriority(AntaTest):\n\"\"\"\n    Verifies the STP root priority for a provided list of VLAN or MST instance ID(s).\n\n    Expected Results:\n        * success: The test will pass if the STP root priority is configured properly for the specified VLAN or MST instance ID(s).\n        * failure: The test will fail if the STP root priority is NOT configured properly for the specified VLAN or MST instance ID(s).\n        * skipped: The test will be skipped if the STP root priority is not provided.\n    \"\"\"\n\n    name = \"VerifySTPRootPriority\"\n    description = \"Verifies the STP root priority for a provided list of VLAN or MST instance ID(s).\"\n    categories = [\"stp\"]\n    commands = [AntaCommand(command=\"show spanning-tree root detail\")]\n\n    @AntaTest.anta_test\n    def test(self, priority: Optional[int] = None, instances: Optional[List[int]] = None) -> None:\n\"\"\"\n        Run VerifySTPRootPriority validation.\n\n        Args:\n            priority: STP root priority to verify.\n            instances: List of VLAN or MST instance ID(s). By default, ALL VLAN or MST instance ID(s) will be verified.\n        \"\"\"\n        if not priority:\n            self.result.is_skipped(f\"{self.__class__.name} did not run because priority was not supplied\")\n            return\n\n        command_output = self.instance_commands[0].json_output\n\n        if not (stp_instances := command_output[\"instances\"]):\n            self.result.is_failure(\"No STP instances configured\")\n            return\n\n        for instance in stp_instances:\n            if instance.startswith(\"MST\"):\n                prefix = \"MST\"\n                break\n            if instance.startswith(\"VL\"):\n                prefix = \"VL\"\n                break\n\n        check_instances = [f\"{prefix}{instance_id}\" for instance_id in instances] if instances else command_output[\"instances\"].keys()\n\n        wrong_priority_instances = [instance for instance in check_instances if get_value(command_output, f\"instances.{instance}.rootBridge.priority\") != priority]\n\n        if wrong_priority_instances:\n            self.result.is_failure(f\"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.stp/#anta.tests.stp.VerifySTPRootPriority.test","title":"test","text":"
test(\n    priority: Optional[int] = None,\n    instances: Optional[List[int]] = None,\n) -> None\n

Run VerifySTPRootPriority validation.

Parameters:

Name Type Description Default priority Optional[int]

STP root priority to verify.

None instances Optional[List[int]]

List of VLAN or MST instance ID(s). By default, ALL VLAN or MST instance ID(s) will be verified.

None Source code in anta/tests/stp.py
@AntaTest.anta_test\ndef test(self, priority: Optional[int] = None, instances: Optional[List[int]] = None) -> None:\n\"\"\"\n    Run VerifySTPRootPriority validation.\n\n    Args:\n        priority: STP root priority to verify.\n        instances: List of VLAN or MST instance ID(s). By default, ALL VLAN or MST instance ID(s) will be verified.\n    \"\"\"\n    if not priority:\n        self.result.is_skipped(f\"{self.__class__.name} did not run because priority was not supplied\")\n        return\n\n    command_output = self.instance_commands[0].json_output\n\n    if not (stp_instances := command_output[\"instances\"]):\n        self.result.is_failure(\"No STP instances configured\")\n        return\n\n    for instance in stp_instances:\n        if instance.startswith(\"MST\"):\n            prefix = \"MST\"\n            break\n        if instance.startswith(\"VL\"):\n            prefix = \"VL\"\n            break\n\n    check_instances = [f\"{prefix}{instance_id}\" for instance_id in instances] if instances else command_output[\"instances\"].keys()\n\n    wrong_priority_instances = [instance for instance in check_instances if get_value(command_output, f\"instances.{instance}.rootBridge.priority\") != priority]\n\n    if wrong_priority_instances:\n        self.result.is_failure(f\"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}\")\n    else:\n        self.result.is_success()\n
"},{"location":"api/tests.system/","title":"System","text":""},{"location":"api/tests.system/#anta-catalog-for-system-tests","title":"ANTA catalog for system tests","text":"

Test functions related to system-level features and protocols

"},{"location":"api/tests.system/#anta.tests.system.VerifyAgentLogs","title":"VerifyAgentLogs","text":"

Bases: AntaTest

Verifies there is no agent crash reported on the device.

Source code in anta/tests/system.py
class VerifyAgentLogs(AntaTest):\n\"\"\"\n    Verifies there is no agent crash reported on the device.\n    \"\"\"\n\n    name = \"VerifyAgentLogs\"\n    description = \"Verifies there is no agent crash reported on the device.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show agent logs crash\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyAgentLogs validation\n        \"\"\"\n        command_output = self.instance_commands[0].text_output\n\n        if len(command_output) == 0:\n            self.result.is_success()\n        else:\n            pattern = re.compile(r\"^===> (.*?) <===$\", re.MULTILINE)\n            agents = \"\\n * \".join(pattern.findall(command_output))\n            self.result.is_failure(f\"device reported some agent logs:\\n * {agents}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyAgentLogs.test","title":"test","text":"
test() -> None\n

Run VerifyAgentLogs validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyAgentLogs validation\n    \"\"\"\n    command_output = self.instance_commands[0].text_output\n\n    if len(command_output) == 0:\n        self.result.is_success()\n    else:\n        pattern = re.compile(r\"^===> (.*?) <===$\", re.MULTILINE)\n        agents = \"\\n * \".join(pattern.findall(command_output))\n        self.result.is_failure(f\"device reported some agent logs:\\n * {agents}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyCPUUtilization","title":"VerifyCPUUtilization","text":"

Bases: AntaTest

Verifies the CPU utilization is less than 75%.

Source code in anta/tests/system.py
class VerifyCPUUtilization(AntaTest):\n\"\"\"\n    Verifies the CPU utilization is less than 75%.\n    \"\"\"\n\n    name = \"VerifyCPUUtilization\"\n    description = \"Verifies the CPU utilization is less than 75%.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show processes top once\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyCPUUtilization validation\n        \"\"\"\n        command_output = self.instance_commands[0].json_output\n        command_output_data = command_output[\"cpuInfo\"][\"%Cpu(s)\"][\"idle\"]\n\n        if command_output_data > 25:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"device reported a high CPU utilization ({100 - command_output_data}%)\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyCPUUtilization.test","title":"test","text":"
test() -> None\n

Run VerifyCPUUtilization validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyCPUUtilization validation\n    \"\"\"\n    command_output = self.instance_commands[0].json_output\n    command_output_data = command_output[\"cpuInfo\"][\"%Cpu(s)\"][\"idle\"]\n\n    if command_output_data > 25:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"device reported a high CPU utilization ({100 - command_output_data}%)\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyCoredump","title":"VerifyCoredump","text":"

Bases: AntaTest

Verifies there is no core file.

Source code in anta/tests/system.py
class VerifyCoredump(AntaTest):\n\"\"\"\n    Verifies there is no core file.\n    \"\"\"\n\n    name = \"VerifyCoredump\"\n    description = \"Verifies there is no core file.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"bash timeout 10 ls /var/core\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyCoredump validation\n        \"\"\"\n        command_output = self.instance_commands[0].text_output\n\n        if len(command_output) == 0:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Core-dump(s) have been found: {command_output}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyCoredump.test","title":"test","text":"
test() -> None\n

Run VerifyCoredump validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyCoredump validation\n    \"\"\"\n    command_output = self.instance_commands[0].text_output\n\n    if len(command_output) == 0:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Core-dump(s) have been found: {command_output}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyFileSystemUtilization","title":"VerifyFileSystemUtilization","text":"

Bases: AntaTest

Verifies each partition on the disk is used less than 75%.

Source code in anta/tests/system.py
class VerifyFileSystemUtilization(AntaTest):\n\"\"\"\n    Verifies each partition on the disk is used less than 75%.\n    \"\"\"\n\n    name = \"VerifyFileSystemUtilization\"\n    description = \"Verifies each partition on the disk is used less than 75%.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"bash timeout 10 df -h\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyFileSystemUtilization validation\n        \"\"\"\n        command_output = self.instance_commands[0].text_output\n\n        self.result.is_success()\n\n        for line in command_output.split(\"\\n\")[1:]:\n            if \"loop\" not in line and len(line) > 0 and (percentage := int(line.split()[4].replace(\"%\", \"\"))) > 75:\n                self.result.is_failure(f\"mount point {line} is higher than 75% (reported {percentage})\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyFileSystemUtilization.test","title":"test","text":"
test() -> None\n

Run VerifyFileSystemUtilization validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyFileSystemUtilization validation\n    \"\"\"\n    command_output = self.instance_commands[0].text_output\n\n    self.result.is_success()\n\n    for line in command_output.split(\"\\n\")[1:]:\n        if \"loop\" not in line and len(line) > 0 and (percentage := int(line.split()[4].replace(\"%\", \"\"))) > 75:\n            self.result.is_failure(f\"mount point {line} is higher than 75% (reported {percentage})\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyMemoryUtilization","title":"VerifyMemoryUtilization","text":"

Bases: AntaTest

Verifies the Memory utilization is less than 75%.

Source code in anta/tests/system.py
class VerifyMemoryUtilization(AntaTest):\n\"\"\"\n    Verifies the Memory utilization is less than 75%.\n    \"\"\"\n\n    name = \"VerifyMemoryUtilization\"\n    description = \"Verifies the Memory utilization is less than 75%.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show version\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyMemoryUtilization validation\n        \"\"\"\n        command_output = self.instance_commands[0].json_output\n\n        memory_usage = command_output[\"memFree\"] / command_output[\"memTotal\"]\n        if memory_usage > 0.25:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"device report a high memory usage: {(1 - memory_usage)*100:.2f}%\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyMemoryUtilization.test","title":"test","text":"
test() -> None\n

Run VerifyMemoryUtilization validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyMemoryUtilization validation\n    \"\"\"\n    command_output = self.instance_commands[0].json_output\n\n    memory_usage = command_output[\"memFree\"] / command_output[\"memTotal\"]\n    if memory_usage > 0.25:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"device report a high memory usage: {(1 - memory_usage)*100:.2f}%\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyNTP","title":"VerifyNTP","text":"

Bases: AntaTest

Verifies NTP is synchronised.

Source code in anta/tests/system.py
class VerifyNTP(AntaTest):\n\"\"\"\n    Verifies NTP is synchronised.\n    \"\"\"\n\n    name = \"VerifyNTP\"\n    description = \"Verifies NTP is synchronised.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show ntp status\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyNTP validation\n        \"\"\"\n        command_output = self.instance_commands[0].text_output\n\n        if command_output.split(\"\\n\")[0].split(\" \")[0] == \"synchronised\":\n            self.result.is_success()\n        else:\n            data = command_output.split(\"\\n\")[0]\n            self.result.is_failure(f\"not sync with NTP server ({data})\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyNTP.test","title":"test","text":"
test() -> None\n

Run VerifyNTP validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyNTP validation\n    \"\"\"\n    command_output = self.instance_commands[0].text_output\n\n    if command_output.split(\"\\n\")[0].split(\" \")[0] == \"synchronised\":\n        self.result.is_success()\n    else:\n        data = command_output.split(\"\\n\")[0]\n        self.result.is_failure(f\"not sync with NTP server ({data})\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyReloadCause","title":"VerifyReloadCause","text":"

Bases: AntaTest

Verifies the last reload of the device was requested by a user.

Test considers the following messages as normal and will return success. Failure is for other messages * Reload requested by the user. * Reload requested after FPGA upgrade

Source code in anta/tests/system.py
class VerifyReloadCause(AntaTest):\n\"\"\"\n    Verifies the last reload of the device was requested by a user.\n\n    Test considers the following messages as normal and will return success. Failure is for other messages\n    * Reload requested by the user.\n    * Reload requested after FPGA upgrade\n    \"\"\"\n\n    name = \"VerifyReloadCause\"\n    description = \"Verifies the device uptime is higher than a value.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show reload cause\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifyReloadCause validation\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if \"resetCauses\" not in command_output.keys():\n            self.result.is_error(\"no reload cause available\")\n            return\n\n        if len(command_output[\"resetCauses\"]) == 0:\n            # No reload causes\n            self.result.is_success()\n            return\n\n        reset_causes = command_output[\"resetCauses\"]\n        command_output_data = reset_causes[0].get(\"description\")\n        if command_output_data in [\n            \"Reload requested by the user.\",\n            \"Reload requested after FPGA upgrade\",\n        ]:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Reload cause is {command_output_data}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyReloadCause.test","title":"test","text":"
test() -> None\n

Run VerifyReloadCause validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifyReloadCause validation\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if \"resetCauses\" not in command_output.keys():\n        self.result.is_error(\"no reload cause available\")\n        return\n\n    if len(command_output[\"resetCauses\"]) == 0:\n        # No reload causes\n        self.result.is_success()\n        return\n\n    reset_causes = command_output[\"resetCauses\"]\n    command_output_data = reset_causes[0].get(\"description\")\n    if command_output_data in [\n        \"Reload requested by the user.\",\n        \"Reload requested after FPGA upgrade\",\n    ]:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Reload cause is {command_output_data}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifySyslog","title":"VerifySyslog","text":"

Bases: AntaTest

Verifies the device had no syslog message with a severity of warning (or a more severe message) during the last 7 days.

Source code in anta/tests/system.py
class VerifySyslog(AntaTest):\n\"\"\"\n    Verifies the device had no syslog message with a severity of warning (or a more severe message) during the last 7 days.\n    \"\"\"\n\n    name = \"VerifySyslog\"\n    description = \"Verifies the device had no syslog message with a severity of warning (or a more severe message) during the last 7 days.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show logging last 7 days threshold warnings\", ofmt=\"text\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"\n        Run VerifySyslog validation\n        \"\"\"\n        command_output = self.instance_commands[0].text_output\n\n        if len(command_output) == 0:\n            self.result.is_success()\n        else:\n            self.result.is_failure(\"Device has some log messages with a severity WARNING or higher\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifySyslog.test","title":"test","text":"
test() -> None\n

Run VerifySyslog validation

Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"\n    Run VerifySyslog validation\n    \"\"\"\n    command_output = self.instance_commands[0].text_output\n\n    if len(command_output) == 0:\n        self.result.is_success()\n    else:\n        self.result.is_failure(\"Device has some log messages with a severity WARNING or higher\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyUptime","title":"VerifyUptime","text":"

Bases: AntaTest

Verifies the device uptime is higher than a value.

Source code in anta/tests/system.py
class VerifyUptime(AntaTest):\n\"\"\"\n    Verifies the device uptime is higher than a value.\n    \"\"\"\n\n    name = \"VerifyUptime\"\n    description = \"Verifies the device uptime is higher than a value.\"\n    categories = [\"system\"]\n    commands = [AntaCommand(command=\"show uptime\")]\n\n    @AntaTest.anta_test\n    def test(self, minimum: Optional[int] = None) -> None:\n\"\"\"\n        Run VerifyUptime validation\n\n        Args:\n            minimum: Minimum uptime in seconds.\n        \"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if not (isinstance(minimum, (int, float))) or minimum < 0:\n            self.result.is_skipped(\"VerifyUptime was not run as incorrect minimum uptime was given\")\n            return\n\n        if command_output[\"upTime\"] > minimum:\n            self.result.is_success()\n        else:\n            self.result.is_failure(f\"Uptime is {command_output['upTime']}\")\n
"},{"location":"api/tests.system/#anta.tests.system.VerifyUptime.test","title":"test","text":"
test(minimum: Optional[int] = None) -> None\n

Run VerifyUptime validation

Parameters:

Name Type Description Default minimum Optional[int]

Minimum uptime in seconds.

None Source code in anta/tests/system.py
@AntaTest.anta_test\ndef test(self, minimum: Optional[int] = None) -> None:\n\"\"\"\n    Run VerifyUptime validation\n\n    Args:\n        minimum: Minimum uptime in seconds.\n    \"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if not (isinstance(minimum, (int, float))) or minimum < 0:\n        self.result.is_skipped(\"VerifyUptime was not run as incorrect minimum uptime was given\")\n        return\n\n    if command_output[\"upTime\"] > minimum:\n        self.result.is_success()\n    else:\n        self.result.is_failure(f\"Uptime is {command_output['upTime']}\")\n
"},{"location":"api/tests.vxlan/","title":"VxLAN","text":""},{"location":"api/tests.vxlan/#anta-catalog-for-vxlan-tests","title":"ANTA catalog for VxLAN tests","text":"

Test functions related to VXLAN

"},{"location":"api/tests.vxlan/#anta.tests.vxlan.VerifyVxlan","title":"VerifyVxlan","text":"

Bases: AntaTest

Verifies if Vxlan1 interface is configured, and is up/up

Source code in anta/tests/vxlan.py
class VerifyVxlan(AntaTest):\n\"\"\"\n    Verifies if Vxlan1 interface is configured, and is up/up\n    \"\"\"\n\n    name = \"VerifyVxlan\"\n    description = \"Verifies Vxlan1 status\"\n    categories = [\"vxlan\"]\n    commands = [AntaCommand(command=\"show interfaces description\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyVxlan validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if \"Vxlan1\" not in command_output[\"interfaceDescriptions\"]:\n            self.result.is_skipped(\"Vxlan1 interface is not configured\")\n        elif (\n            command_output[\"interfaceDescriptions\"][\"Vxlan1\"][\"lineProtocolStatus\"] == \"up\"\n            and command_output[\"interfaceDescriptions\"][\"Vxlan1\"][\"interfaceStatus\"] == \"up\"\n        ):\n            self.result.is_success()\n        else:\n            self.result.is_failure(\n                f\"Vxlan1 interface is {command_output['interfaceDescriptions']['Vxlan1']['lineProtocolStatus']}\"\n                f\"/{command_output['interfaceDescriptions']['Vxlan1']['interfaceStatus']}\"\n            )\n
"},{"location":"api/tests.vxlan/#anta.tests.vxlan.VerifyVxlan.test","title":"test","text":"
test() -> None\n

Run VerifyVxlan validation

Source code in anta/tests/vxlan.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyVxlan validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if \"Vxlan1\" not in command_output[\"interfaceDescriptions\"]:\n        self.result.is_skipped(\"Vxlan1 interface is not configured\")\n    elif (\n        command_output[\"interfaceDescriptions\"][\"Vxlan1\"][\"lineProtocolStatus\"] == \"up\"\n        and command_output[\"interfaceDescriptions\"][\"Vxlan1\"][\"interfaceStatus\"] == \"up\"\n    ):\n        self.result.is_success()\n    else:\n        self.result.is_failure(\n            f\"Vxlan1 interface is {command_output['interfaceDescriptions']['Vxlan1']['lineProtocolStatus']}\"\n            f\"/{command_output['interfaceDescriptions']['Vxlan1']['interfaceStatus']}\"\n        )\n
"},{"location":"api/tests.vxlan/#anta.tests.vxlan.VerifyVxlanConfigSanity","title":"VerifyVxlanConfigSanity","text":"

Bases: AntaTest

Verifies that there are no VXLAN config-sanity issues flagged

Source code in anta/tests/vxlan.py
class VerifyVxlanConfigSanity(AntaTest):\n\"\"\"\n    Verifies that there are no VXLAN config-sanity issues flagged\n    \"\"\"\n\n    name = \"VerifyVxlanConfigSanity\"\n    description = \"Verifies VXLAN config-sanity\"\n    categories = [\"vxlan\"]\n    commands = [AntaCommand(command=\"show vxlan config-sanity\", ofmt=\"json\")]\n\n    @AntaTest.anta_test\n    def test(self) -> None:\n\"\"\"Run VerifyVxlanConfigSanity validation\"\"\"\n\n        command_output = self.instance_commands[0].json_output\n\n        if \"categories\" not in command_output or len(command_output[\"categories\"]) == 0:\n            self.result.is_skipped(\"VXLAN is not configured on this device\")\n            return\n\n        failed_categories = {\n            category: content\n            for category, content in command_output[\"categories\"].items()\n            if category in [\"localVtep\", \"mlag\", \"pd\"] and content[\"allCheckPass\"] is not True\n        }\n\n        if len(failed_categories) > 0:\n            self.result.is_failure(f\"Vxlan config sanity check is not passing: {failed_categories}\")\n        else:\n            self.result.is_success()\n
"},{"location":"api/tests.vxlan/#anta.tests.vxlan.VerifyVxlanConfigSanity.test","title":"test","text":"
test() -> None\n

Run VerifyVxlanConfigSanity validation

Source code in anta/tests/vxlan.py
@AntaTest.anta_test\ndef test(self) -> None:\n\"\"\"Run VerifyVxlanConfigSanity validation\"\"\"\n\n    command_output = self.instance_commands[0].json_output\n\n    if \"categories\" not in command_output or len(command_output[\"categories\"]) == 0:\n        self.result.is_skipped(\"VXLAN is not configured on this device\")\n        return\n\n    failed_categories = {\n        category: content\n        for category, content in command_output[\"categories\"].items()\n        if category in [\"localVtep\", \"mlag\", \"pd\"] and content[\"allCheckPass\"] is not True\n    }\n\n    if len(failed_categories) > 0:\n        self.result.is_failure(f\"Vxlan config sanity check is not passing: {failed_categories}\")\n    else:\n        self.result.is_success()\n
"},{"location":"cli/debug/","title":"Helpers","text":""},{"location":"cli/debug/#anta-debug-commands","title":"ANTA debug commands","text":"

The ANTA CLI includes a set of debugging tools, making it easier to build and test ANTA content. This functionality is accessed via the debug subcommand and offers the following options:

  • Executing a command on a device from your inventory and retrieving the result.
  • Running a templated command on a device from your inventory and retrieving the result.

These tools are especially helpful in building the tests, as they give a visual access to the output received from the eAPI. They also facilitate the extraction of output content for use in unit tests, as described in our contribution guide.

Warning

The debug tools require a device from your inventory. Thus, you MUST use a valid ANTA Inventory.

"},{"location":"cli/debug/#executing-an-eos-command","title":"Executing an EOS command","text":"

You can use the run-cmd entrypoint to run a command, which includes the following options:

"},{"location":"cli/debug/#command-overview","title":"Command overview","text":"
$ anta debug run-cmd --help\nUsage: anta debug run-cmd [OPTIONS]\n\nRun arbitrary command to an ANTA device\n\nOptions:\n  -c, --command TEXT        Command to run  [required]\n--ofmt [json|text]        EOS eAPI format to use. can be text or json\n  -v, --version [1|latest]  EOS eAPI version\n  -r, --revision INTEGER    eAPI command revision\n  -d, --device TEXT         Device from inventory to use  [required]\n--help                    Show this message and exit.\n
"},{"location":"cli/debug/#example","title":"Example","text":"

This example illustrates how to run the show interfaces description command with a JSON format (default):

anta debug run-cmd --command \"show interfaces description\" --device DC1-SPINE1\nRun command show interfaces description on DC1-SPINE1\n{\n'interfaceDescriptions': {\n'Ethernet1': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-LEAF1A_Ethernet1', 'interfaceStatus': 'up'},\n        'Ethernet2': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-LEAF1B_Ethernet1', 'interfaceStatus': 'up'},\n        'Ethernet3': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-BL1_Ethernet1', 'interfaceStatus': 'up'},\n        'Ethernet4': {'lineProtocolStatus': 'up', 'description': 'P2P_LINK_TO_DC1-BL2_Ethernet1', 'interfaceStatus': 'up'},\n        'Loopback0': {'lineProtocolStatus': 'up', 'description': 'EVPN_Overlay_Peering', 'interfaceStatus': 'up'},\n        'Management0': {'lineProtocolStatus': 'up', 'description': 'oob_management', 'interfaceStatus': 'up'}\n}\n}\n
"},{"location":"cli/debug/#executing-an-eos-command-using-templates","title":"Executing an EOS command using templates","text":"

The run-template entrypoint allows the user to provide an f-string templated command. It is followed by a list of arguments (key-value pairs) that build a dictionary used as template parameters.

"},{"location":"cli/debug/#command-overview_1","title":"Command overview","text":"
$ anta debug run-template --help\nUsage: anta debug run-template [OPTIONS] PARAMS...\n\n  Run arbitrary templated command to an ANTA device.\n\n  Takes a list of arguments (keys followed by a value) to build a dictionary\n  used as template parameters. Example:\n\n  anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' vlan_id 1\n\nOptions:\n  -t, --template TEXT       Command template to run. E.g. 'show vlan\n                            {vlan_id}'  [required]\n--ofmt [json|text]        EOS eAPI format to use. can be text or json\n  -v, --version [1|latest]  EOS eAPI version\n  -r, --revision INTEGER    eAPI command revision\n  -d, --device TEXT         Device from inventory to use  [required]\n--help                    Show this message and exit.\n
"},{"location":"cli/debug/#example_1","title":"Example","text":"

This example uses the show vlan {vlan_id} command in a JSON format:

anta debug run-template --template \"show vlan {vlan_id}\" vlan_id 10 --device DC1-LEAF1A\nRun templated command 'show vlan {vlan_id}' with {'vlan_id': '10'} on DC1-LEAF1A\n{\n'vlans': {\n'10': {\n'name': 'VRFPROD_VLAN10',\n            'dynamic': False,\n            'status': 'active',\n            'interfaces': {\n'Cpu': {'privatePromoted': False, 'blocked': None},\n                'Port-Channel11': {'privatePromoted': False, 'blocked': None},\n                'Vxlan1': {'privatePromoted': False, 'blocked': None}\n}\n}\n},\n    'sourceDetail': ''\n}\n

Warning

If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters.

"},{"location":"cli/debug/#example-of-multiple-arguments","title":"Example of multiple arguments","text":"
anta --log DEBUG debug run-template --template \"ping {dst} source {src}\" dst \"8.8.8.8\" src Loopback0 --device DC1-SPINE1 \u00a0 \u00a0\n> {'dst': '8.8.8.8', 'src': 'Loopback0'}\n\nanta --log DEBUG debug run-template --template \"ping {dst} source {src}\" dst \"8.8.8.8\" src Loopback0 dst \"1.1.1.1\" src Loopback1 --device DC1-SPINE1 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \n> {'dst': '1.1.1.1', 'src': 'Loopback1'}\n# Notice how `src` and `dst` keep only the latest value\n
"},{"location":"cli/exec/","title":"Execute commands","text":""},{"location":"cli/exec/#executing-commands-on-devices","title":"Executing Commands on Devices","text":"

ANTA CLI provides a set of entrypoints to facilitate remote command execution on EOS devices.

"},{"location":"cli/exec/#exec-command-overview","title":"EXEC Command overview","text":"
anta exec --help\nUsage: anta exec [OPTIONS] COMMAND [ARGS]...\n\n  Execute commands to inventory devices\n\nOptions:\n  --help  Show this message and exit.\n\nCommands:\n  clear-counters        Clear counter statistics on EOS devices\n  collect-tech-support  Collect scheduled tech-support from EOS devices\n  snapshot              Collect commands output from devices in inventory\n
"},{"location":"cli/exec/#clear-interfaces-counters","title":"Clear interfaces counters","text":"

This command clears interface counters on EOS devices specified in your inventory.

"},{"location":"cli/exec/#command-overview","title":"Command overview","text":"
anta exec clear-counters --help\nUsage: anta exec clear-counters [OPTIONS]\n\nClear counter statistics on EOS devices\n\nOptions:\n  -t, --tags TEXT  List of tags using comma as separator: tag1,tag2,tag3\n  --help           Show this message and exit.\n
"},{"location":"cli/exec/#example","title":"Example","text":"
anta exec clear-counters --tags SPINE\n[20:19:13] INFO     Connecting to devices...                                                                                                                         utils.py:43\n           INFO     Clearing counters on remote devices...                                                                                                           utils.py:46\n           INFO     Cleared counters on DC1-SPINE2 (cEOSLab)                                                                                                         utils.py:41\n           INFO     Cleared counters on DC2-SPINE1 (cEOSLab)                                                                                                         utils.py:41\n           INFO     Cleared counters on DC1-SPINE1 (cEOSLab)                                                                                                         utils.py:41\n           INFO     Cleared counters on DC2-SPINE2 (cEOSLab)\n
"},{"location":"cli/exec/#collect-a-set-of-commands","title":"Collect a set of commands","text":"

This command collects all the commands specified in a commands-list file, which can be in either json or text format.

"},{"location":"cli/exec/#command-overview_1","title":"Command overview","text":"
anta exec snapshot --help\nUsage: anta exec snapshot [OPTIONS]\n\nCollect commands output from devices in inventory\n\nOptions:\n  -t, --tags TEXT           List of tags using comma as separator:\n                            tag1,tag2,tag3\n  -c, --commands-list FILE  File with list of commands to collect  [env var:\n                            ANTA_EXEC_SNAPSHOT_COMMANDS_LIST; required]\n-o, --output DIRECTORY    Directory to save commands output. Will have a\n                            suffix with the format _YEAR-MONTH-DAY_HOUR-\n                            MINUTES-SECONDS'  [env var:\n                            ANTA_EXEC_SNAPSHOT_OUTPUT; default: anta_snapshot]\n--help                    Show this message and exit.\n

The commands-list file should follow this structure:

---\njson_format:\n- show version\ntext_format:\n- show bfd peers\n
"},{"location":"cli/exec/#example_1","title":"Example","text":"
anta exec snapshot --tags SPINE --commands-list ./commands.yaml --output ./\n[20:25:15] INFO     Connecting to devices...                                                                                                                         utils.py:78\n           INFO     Collecting commands from remote devices                                                                                                          utils.py:81\n           INFO     Collected command 'show version' from device DC2-SPINE1 (cEOSLab)                                                                                utils.py:76\n           INFO     Collected command 'show version' from device DC2-SPINE2 (cEOSLab)                                                                                utils.py:76\n           INFO     Collected command 'show version' from device DC1-SPINE1 (cEOSLab)                                                                                utils.py:76\n           INFO     Collected command 'show version' from device DC1-SPINE2 (cEOSLab)                                                                                utils.py:76\n[20:25:16] INFO     Collected command 'show bfd peers' from device DC2-SPINE2 (cEOSLab)                                                                              utils.py:76\n           INFO     Collected command 'show bfd peers' from device DC2-SPINE1 (cEOSLab)                                                                              utils.py:76\n           INFO     Collected command 'show bfd peers' from device DC1-SPINE1 (cEOSLab)                                                                              utils.py:76\n           INFO     Collected command 'show bfd peers' from device DC1-SPINE2 (cEOSLab)\n

The results of the executed commands will be stored in the output directory specified during command execution:

tree _2023-07-14_20_25_15\n_2023-07-14_20_25_15\n\u251c\u2500\u2500 DC1-SPINE1\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 json\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 show version.json\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 text\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 show bfd peers.log\n\u251c\u2500\u2500 DC1-SPINE2\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 json\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 show version.json\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 text\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 show bfd peers.log\n\u251c\u2500\u2500 DC2-SPINE1\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 json\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 show version.json\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 text\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 show bfd peers.log\n\u2514\u2500\u2500 DC2-SPINE2\n    \u251c\u2500\u2500 json\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 show version.json\n    \u2514\u2500\u2500 text\n        \u2514\u2500\u2500 show bfd peers.log\n\n12 directories, 8 files\n
"},{"location":"cli/exec/#get-scheduled-tech-support","title":"Get Scheduled tech-support","text":"

EOS offers a feature that automatically creates a tech-support archive every hour by default. These archives are stored under /mnt/flash/schedule/tech-support.

leaf1#show schedule summary\nMaximum concurrent jobs  1\nPrepend host name to logfile: Yes\nName                 At Time       Last        Interval       Timeout        Max        Max     Logfile Location                  Status\n                                   Time         (mins)        (mins)         Log        Logs\n                                                                            Files       Size\n----------------- ------------- ----------- -------------- ------------- ----------- ---------- --------------------------------- ------\ntech-support           now         08:37          60            30           100         -      flash:schedule/tech-support/      Success\n\n\nleaf1#bash ls /mnt/flash/schedule/tech-support\nleaf1_tech-support_2023-03-09.1337.log.gz  leaf1_tech-support_2023-03-10.0837.log.gz  leaf1_tech-support_2023-03-11.0337.log.gz\n

For Network Readiness for Use (NRFU) tests and to keep a comprehensive report of the system state before going live, ANTA provides a command-line interface that efficiently retrieves these files.

"},{"location":"cli/exec/#command-overview_2","title":"Command overview","text":"
anta exec collect-tech-support --help\nUsage: anta exec collect-tech-support [OPTIONS]\n\nCollect scheduled tech-support from EOS devices\n\nOptions:\n  -o, --output PATH              Path for tests catalog  [default: ./tech-\n                                 support]\n--latest INTEGER               Number of scheduled show-tech to retrieve\n  --configure / --not-configure  Ensure device has 'aaa authorization exec\n                                 default local' configured (required for SCP)\n[default: not-configure]\n-t, --tags TEXT                List of tags using comma as separator:\n                                 tag1,tag2,tag3\n  --help                         Show this message and exit.\n

When executed, this command fetches tech-support files and downloads them locally into a device-specific subfolder within the designated folder. You can specify the output folder with the --output option.

ANTA uses SCP to download files from devices; ensure that all SSH Host Keys are trusted before running the command. Use the anta --insecure option if they are not.

The configuration aaa authorization exec default local must be enabled on devices for SCP to function. ANTA will not automatically configure this unless --configure is specified.

The --latest option allows retrieval of a specific number of the most recent tech-support files.

Warning

By default all the tech-support files present on the devices are retrieved.

"},{"location":"cli/exec/#example_2","title":"Example","text":"
anta --insecure exec collect-tech-support\n[15:27:19] INFO     Connecting to devices...\nINFO     Copying '/mnt/flash/schedule/tech-support/spine1_tech-support_2023-06-09.1315.log.gz' from device spine1 to 'tech-support/spine1' locally\nINFO     Copying '/mnt/flash/schedule/tech-support/leaf3_tech-support_2023-06-09.1315.log.gz' from device leaf3 to 'tech-support/leaf3' locally\nINFO     Copying '/mnt/flash/schedule/tech-support/leaf1_tech-support_2023-06-09.1315.log.gz' from device leaf1 to 'tech-support/leaf1' locally\nINFO     Copying '/mnt/flash/schedule/tech-support/leaf2_tech-support_2023-06-09.1315.log.gz' from device leaf2 to 'tech-support/leaf2' locally\nINFO     Copying '/mnt/flash/schedule/tech-support/spine2_tech-support_2023-06-09.1315.log.gz' from device spine2 to 'tech-support/spine2' locally\nINFO     Copying '/mnt/flash/schedule/tech-support/leaf4_tech-support_2023-06-09.1315.log.gz' from device leaf4 to 'tech-support/leaf4' locally\nINFO     Collected 1 scheduled tech-support from leaf2\nINFO     Collected 1 scheduled tech-support from spine2\nINFO     Collected 1 scheduled tech-support from leaf3\nINFO     Collected 1 scheduled tech-support from spine1\nINFO     Collected 1 scheduled tech-support from leaf1\nINFO     Collected 1 scheduled tech-support from leaf4\n

The output folder structure is as follows:

tree tech-support/\ntech-support/\n\u251c\u2500\u2500 leaf1\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 leaf1_tech-support_2023-06-09.1315.log.gz\n\u251c\u2500\u2500 leaf2\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 leaf2_tech-support_2023-06-09.1315.log.gz\n\u251c\u2500\u2500 leaf3\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 leaf3_tech-support_2023-06-09.1315.log.gz\n\u251c\u2500\u2500 leaf4\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 leaf4_tech-support_2023-06-09.1315.log.gz\n\u251c\u2500\u2500 spine1\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 spine1_tech-support_2023-06-09.1315.log.gz\n\u2514\u2500\u2500 spine2\n    \u2514\u2500\u2500 spine2_tech-support_2023-06-09.1315.log.gz\n\n6 directories, 6 files\n

Each device has its own subdirectory containing the collected tech-support files.

"},{"location":"cli/get-inventory-information/","title":"Get Inventory Information","text":""},{"location":"cli/get-inventory-information/#retrieving-inventory-information","title":"Retrieving Inventory Information","text":"

The ANTA CLI offers multiple entrypoints to access data from your local inventory.

"},{"location":"cli/get-inventory-information/#inventory-used-of-examples","title":"Inventory used of examples","text":"

Let\u2019s consider the following inventory:

---\nanta_inventory:\nhosts:\n- host: 172.20.20.101\nname: DC1-SPINE1\ntags: [\"SPINE\", \"DC1\"]\n\n- host: 172.20.20.102\nname: DC1-SPINE2\ntags: [\"SPINE\", \"DC1\"]\n\n- host: 172.20.20.111\nname: DC1-LEAF1A\ntags: [\"LEAF\", \"DC1\"]\n\n- host: 172.20.20.112\nname: DC1-LEAF1B\ntags: [\"LEAF\", \"DC1\"]\n\n- host: 172.20.20.121\nname: DC1-BL1\ntags: [\"BL\", \"DC1\"]\n\n- host: 172.20.20.122\nname: DC1-BL2\ntags: [\"BL\", \"DC1\"]\n\n- host: 172.20.20.201\nname: DC2-SPINE1\ntags: [\"SPINE\", \"DC2\"]\n\n- host: 172.20.20.202\nname: DC2-SPINE2\ntags: [\"SPINE\", \"DC2\"]\n\n- host: 172.20.20.211\nname: DC2-LEAF1A\ntags: [\"LEAF\", \"DC2\"]\n\n- host: 172.20.20.212\nname: DC2-LEAF1B\ntags: [\"LEAF\", \"DC2\"]\n\n- host: 172.20.20.221\nname: DC2-BL1\ntags: [\"BL\", \"DC2\"]\n\n- host: 172.20.20.222\nname: DC2-BL2\ntags: [\"BL\", \"DC2\"]\n
"},{"location":"cli/get-inventory-information/#obtaining-all-configured-tags","title":"Obtaining all configured tags","text":"

As most of ANTA\u2019s commands accommodate tag filtering, this particular command is useful for enumerating all tags configured in the inventory. Running the anta get tags command will return a list of all tags that have been configured in the inventory.

"},{"location":"cli/get-inventory-information/#command-overview","title":"Command overview","text":"
anta get tags --help\nUsage: anta get tags [OPTIONS]\n\nGet list of configured tags in user inventory.\n\nOptions:\n  --help  Show this message and exit.\n
"},{"location":"cli/get-inventory-information/#example","title":"Example","text":"

To get the list of all configured tags in the inventory, run the following command:

anta get tags\nTags found:\n[\n\"BL\",\n  \"DC1\",\n  \"DC2\",\n  \"LEAF\",\n  \"SPINE\",\n  \"all\"\n]\n\n* note that tag all has been added by anta\n

Note

Even if you haven\u2019t explicitly configured the all tag in the inventory, it is automatically added. This default tag allows to execute commands on all devices in the inventory when no tag is specified.

"},{"location":"cli/get-inventory-information/#list-devices-in-inventory","title":"List devices in inventory","text":"

This command will list all devices available in the inventory. Using the --tags option, you can filter this list to only include devices with specific tags. The --connected option allows to display only the devices where a connection has been established.

"},{"location":"cli/get-inventory-information/#command-overview_1","title":"Command overview","text":"
anta get inventory --help\nUsage: anta get inventory [OPTIONS]\n\nShow inventory loaded in ANTA.\n\nOptions:\n  -t, --tags TEXT                List of tags using comma as separator:\n                                 tag1,tag2,tag3\n  --connected / --not-connected  Display inventory after connection has been\n                                 created\n  --help                         Show this message and exit.\n

Tip

In its default mode, anta get inventory provides only information that doesn\u2019t rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, please use the --connected option.

"},{"location":"cli/get-inventory-information/#example_1","title":"Example","text":"

To retrieve a comprehensive list of all devices along with their details, execute the following command. It will provide all the data loaded into the ANTA inventory from your inventory file.

anta get inventory --tags SPINE\nCurrent inventory content is:\n{\n'DC1-SPINE1': AsyncEOSDevice(\nname='DC1-SPINE1',\n        tags=['SPINE', 'DC1', 'all'],\n        hw_model=None,\n        is_online=False,\n        established=False,\n        host='172.20.20.101',\n        eapi_port=443,\n        username='arista',\n        password='arista',\n        enable_password='arista',\n        insecure=False\n    ),\n    'DC1-SPINE2': AsyncEOSDevice(\nname='DC1-SPINE2',\n        tags=['SPINE', 'DC1', 'all'],\n        hw_model=None,\n        is_online=False,\n        established=False,\n        host='172.20.20.102',\n        eapi_port=443,\n        username='arista',\n        password='arista',\n        enable_password='arista',\n        insecure=False\n    ),\n    'DC2-SPINE1': AsyncEOSDevice(\nname='DC2-SPINE1',\n        tags=['SPINE', 'DC2', 'all'],\n        hw_model=None,\n        is_online=False,\n        established=False,\n        host='172.20.20.201',\n        eapi_port=443,\n        username='arista',\n        password='arista',\n        enable_password='arista',\n        insecure=False\n    ),\n    'DC2-SPINE2': AsyncEOSDevice(\nname='DC2-SPINE2',\n        tags=['SPINE', 'DC2', 'all'],\n        hw_model=None,\n        is_online=False,\n        established=False,\n        host='172.20.20.202',\n        eapi_port=443,\n        username='arista',\n        password='arista',\n        enable_password='arista',\n        insecure=False\n    )\n}\n
"},{"location":"cli/inv-from-cvp/","title":"Inventory from CVP","text":""},{"location":"cli/inv-from-cvp/#create-an-inventory-from-cloudvision","title":"Create an Inventory from CloudVision","text":"

In large setups, it might be beneficial to construct your inventory based on CloudVision. The from-cvp entrypoint of the get command enables the user to create an ANTA inventory from CloudVision.

"},{"location":"cli/inv-from-cvp/#command-overview","title":"Command overview","text":"
anta get from-cvp --help\nUsage: anta get from-cvp [OPTIONS]\n\nBuild ANTA inventory from Cloudvision\n\nOptions:\n  -ip, --cvp-ip TEXT              CVP IP Address  [required]\n-u, --cvp-username TEXT         CVP Username  [required]\n-p, --cvp-password TEXT         CVP Password / token  [required]\n-c, --cvp-container TEXT        Container where devices are configured\n  -d, --inventory-directory PATH  Path to save inventory file\n  --help                          Show this message and exit.\n

The output is an inventory where the name of the container is added as a tag for each host:

anta_inventory:\nhosts:\n- host: 192.168.0.13\nname: leaf2\ntags:\n- pod1\n- host: 192.168.0.15\nname: leaf4\ntags:\n- pod2\n

Warning

The current implementation only considers devices directly attached to a specific container when using the --cvp-container option.

"},{"location":"cli/inv-from-cvp/#creating-an-inventory-from-multiple-containers","title":"Creating an inventory from multiple containers","text":"

If you need to create an inventory from multiple containers, you can use a bash command and then manually concatenate files to create a single inventory file:

$ for container in pod01 pod02 spines; do anta get from-cvp -ip <cvp-ip> -u cvpadmin -p cvpadmin -c $container -d test-inventory; done\n\n[12:25:35] INFO     Getting auth token from cvp.as73.inetsix.net for user tom\n[12:25:36] INFO     Creating inventory folder /home/tom/Projects/arista/network-test-automation/test-inventory\n           WARNING  Using the new api_token parameter. This will override usage of the cvaas_token parameter if both are provided. This is because api_token and cvaas_token parameters\n                    are for the same use case and api_token is more generic\n           INFO     Connected to CVP cvp.as73.inetsix.net\n\n\n[12:25:37] INFO     Getting auth token from cvp.as73.inetsix.net for user tom\n[12:25:38] WARNING  Using the new api_token parameter. This will override usage of the cvaas_token parameter if both are provided. This is because api_token and cvaas_token parameters\n                    are for the same use case and api_token is more generic\n           INFO     Connected to CVP cvp.as73.inetsix.net\n\n\n[12:25:38] INFO     Getting auth token from cvp.as73.inetsix.net for user tom\n[12:25:39] WARNING  Using the new api_token parameter. This will override usage of the cvaas_token parameter if both are provided. This is because api_token and cvaas_token parameters\n                    are for the same use case and api_token is more generic\n           INFO     Connected to CVP cvp.as73.inetsix.net\n\n           INFO     Inventory file has been created in /home/tom/Projects/arista/network-test-automation/test-inventory/inventory-spines.yml\n
"},{"location":"cli/nrfu/","title":"NRFU","text":""},{"location":"cli/nrfu/#execute-network-readiness-for-use-nrfu-testing","title":"Execute Network Readiness For Use (NRFU) Testing","text":"

ANTA provides a set of commands for performing NRFU tests on devices. These commands are under the anta nrfu namespace and offer multiple output format options:

  • Text view
  • Table view
  • JSON view
  • Custom template view
"},{"location":"cli/nrfu/#nrfu-command-overview","title":"NRFU Command overview","text":"
anta nrfu --help\nUsage: anta nrfu [OPTIONS] COMMAND [ARGS]...\n\n  Run NRFU against inventory devices\n\nOptions:\n  -c, --catalog FILE  Path to the tests catalog YAML file  [env var:\n                      ANTA_NRFU_CATALOG; required]\n--help              Show this message and exit.\n\nCommands:\n  json        ANTA command to check network state with JSON result\n  table       ANTA command to check network states with table result\n  text        ANTA command to check network states with text result\n  tpl-report  ANTA command to check network state with templated report\n

All commands under the anta nrfu namespace require a catalog yaml file specified with the --catalog option.

"},{"location":"cli/nrfu/#performing-nrfu-with-text-rendering","title":"Performing NRFU with text rendering","text":"

The text subcommand provides a straightforward text report for each test executed on all devices in your inventory.

"},{"location":"cli/nrfu/#command-overview","title":"Command overview","text":"
anta nrfu text --help\nUsage: anta nrfu text [OPTIONS]\n\nANTA command to check network states with text result\n\nOptions:\n  -t, --tags TEXT                 List of tags using comma as separator:\n                                  tag1,tag2,tag3\n  -s, --search TEXT               Regular expression to search in both name\n                                  and test\n--skip-error / --no-skip-error  Hide tests in errors due to connectivity\n                                  issue  [default: no-skip-error]\n--help                          Show this message and exit.\n

The --tags option allows to target specific devices in your inventory, while the --search option permits filtering based on a regular expression pattern in both the hostname and the test name.

The --skip-error option can be used to exclude tests that failed due to connectivity issues or unsupported commands.

"},{"location":"cli/nrfu/#example","title":"Example","text":"

anta nrfu text --tags LEAF --search DC1-LEAF1A\n

"},{"location":"cli/nrfu/#performing-nrfu-with-table-rendering","title":"Performing NRFU with table rendering","text":"

The table command under the anta nrfu namespace offers a clear and organized table view of the test results, suitable for filtering. It also has its own set of options for better control over the output.

"},{"location":"cli/nrfu/#command-overview_1","title":"Command overview","text":"
anta nrfu table --help\nUsage: anta nrfu table [OPTIONS]\n\nANTA command to check network states with table result\n\nOptions:\n  -t, --tags TEXT    List of tags using comma as separator: tag1,tag2,tag3\n  -d, --device TEXT  Show a summary for this device\n  -t, --test TEXT    Show a summary for this test\n--help             Show this message and exit.\n

The --tags option can be used to target specific devices in your inventory.

The --device and --test options show a summarized view of the test results for a specific host or test case, respectively.

"},{"location":"cli/nrfu/#example_1","title":"Example","text":"

anta nrfu table --tags LEAF\n

For larger setups, you can also group the results by host or test to get a summarized view:

anta nrfu table --tags LEAF --device DC1-LEAF1A\n

"},{"location":"cli/nrfu/#performing-nrfu-with-json-rendering","title":"Performing NRFU with JSON rendering","text":"

The JSON rendering command in NRFU testing is useful in generating a JSON output that can subsequently be passed on to another tool for reporting purposes.

"},{"location":"cli/nrfu/#command-overview_2","title":"Command overview","text":"
anta nrfu json --help\nUsage: anta nrfu json [OPTIONS]\n\nANTA command to check network state with JSON result\n\nOptions:\n  -t, --tags TEXT    List of tags using comma as separator: tag1,tag2,tag3\n  -o, --output FILE  Path to save report as a file  [env var:\n                     ANTA_NRFU_JSON_OUTPUT]\n--help             Show this message and exit.\n

The --tags option can be used to target specific devices in your inventory.

The --output option allows you to save the JSON report as a file.

"},{"location":"cli/nrfu/#example_2","title":"Example","text":"

anta nrfu json --tags LEAF\n

"},{"location":"cli/nrfu/#performing-nrfu-with-custom-reports","title":"Performing NRFU with custom reports","text":"

ANTA offers a CLI option for creating custom reports. This leverages the Jinja2 template system, allowing you to tailor reports to your specific needs.

"},{"location":"cli/nrfu/#command-overview_3","title":"Command overview","text":"

anta nrfu tpl-report --help\nUsage: anta nrfu tpl-report [OPTIONS]\n\nANTA command to check network state with templated report\n\nOptions:\n  -tpl, --template FILE  Path to the template to use for the report  [env var:\n                         ANTA_NRFU_TPL_REPORT_TEMPLATE; required]\n-o, --output FILE      Path to save report as a file  [env var:\n                         ANTA_NRFU_TPL_REPORT_OUTPUT]\n-t, --tags TEXT        List of tags using comma as separator: tag1,tag2,tag3\n  --help                 Show this message and exit.\n
The --template option is used to specify the Jinja2 template file for generating the custom report.

The --output option allows you to choose the path where the final report will be saved.

The --tags option can be used to target specific devices in your inventory.

"},{"location":"cli/nrfu/#example_3","title":"Example","text":"

anta nrfu tpl-report --tags LEAF --template ./custom_template.j2\n

The template ./custom_template.j2 is a simple Jinja2 template:

{% for d in data %}\n* {{ d.test }} is [green]{{ d.result | upper}}[/green] for {{ d.name }}\n{% endfor %}\n

The Jinja2 template has access to all TestResult elements and their values, as described in this documentation.

You can also save the report result to a file using the --output option:

anta nrfu tpl-report --tags LEAF --template ./custom_template.j2 --output nrfu-tpl-report.txt\n

The resulting output might look like this:

cat nrfu-tpl-report.txt\n* VerifyMlagStatus is [green]SUCCESS[/green] for DC1-LEAF1A\n* VerifyMlagInterfaces is [green]SUCCESS[/green] for DC1-LEAF1A\n* VerifyMlagConfigSanity is [green]SUCCESS[/green] for DC1-LEAF1A\n* VerifyMlagReloadDelay is [green]SUCCESS[/green] for DC1-LEAF1A\n
"},{"location":"cli/overview/","title":"Overview","text":""},{"location":"cli/overview/#overview-of-antas-command-line-interface-cli","title":"Overview of ANTA\u2019s Command-Line Interface (CLI)","text":"

ANTA provides a powerful Command-Line Interface (CLI) to perform a wide range of operations. This document provides a comprehensive overview of ANTA CLI usage and its commands.

ANTA can also be used as a Python library, allowing you to build your own tools based on it. Visit this page for more details.

To start using the ANTA CLI, open your terminal and type anta.

"},{"location":"cli/overview/#invoking-anta-cli","title":"Invoking ANTA CLI","text":"
anta --help\nUsage: anta [OPTIONS] COMMAND [ARGS]...\n\n  Arista Network Test Automation (ANTA) CLI\n\nOptions:\n  --version                       Show the version and exit.\n  --username TEXT                 Username to connect to EOS  [env var:\n                                  ANTA_USERNAME; required]\n--password TEXT                 Password to connect to EOS  [env var:\n                                  ANTA_PASSWORD; required]\n--timeout INTEGER               Global connection timeout  [env var:\n                                  ANTA_TIMEOUT; default: 5]\n--insecure                      Disable SSH Host Key validation  [env var:\n                                  ANTA_INSECURE]\n--enable-password TEXT          Enable password if required to connect  [env\n                                  var: ANTA_ENABLE_PASSWORD]\n-i, --inventory FILE            Path to the inventory YAML file  [env var:\n                                  ANTA_INVENTORY; required]\n--log-level, --log [CRITICAL|ERROR|WARNING|INFO|DEBUG]\nANTA logging level  [env var:\n                                  ANTA_LOG_LEVEL; default: INFO]\n--ignore-status                 Always exit with success  [env var:\n                                  ANTA_IGNORE_STATUS]\n--ignore-error                  Only report failures and not errors  [env\n                                  var: ANTA_IGNORE_ERROR]\n--help                          Show this message and exit.\n\nCommands:\n  debug  Debug commands for building ANTA\n  exec   Execute commands to inventory devices\n  get    Get data from/to ANTA\n  nrfu   Run NRFU against inventory devices\n
"},{"location":"cli/overview/#anta-global-parameters","title":"ANTA Global Parameters","text":"

Certain parameters are globally required and can be either passed to the ANTA CLI or set as an environment variable (ENV VAR).

To pass the parameters via the CLI:

anta --username tom --password arista123 --inventory inventory.yml <anta cli>\n

To set them as ENV VAR:

export ANTA_USERNAME=tom\nexport ANTA_PASSWORD=arista123\nexport ANTA_INVENTORY=inventory.yml\n

Then, run the CLI:

anta <anta cli>\n
"},{"location":"cli/overview/#anta-exit-codes","title":"ANTA Exit Codes","text":"

ANTA utilizes different exit codes to indicate the status of the test runs.

For all subcommands, ANTA will return the exit code 0, indicating a successful operation, except for the nrfu command.

For the nrfu command, ANTA uses the following exit codes:

  • Exit code 0 - All tests passed successfully.
  • Exit code 1 - Tests were run, but at least one test returned a failure.
  • Exit code 2 - Tests were run, but at least one test returned an error.
  • Exit code 3 - An internal error occurred while executing tests.

To ignore the test status, use anta --ignore-status nrfu, and the exit code will always be 0.

To ignore errors, use anta --ignore-error nrfu, and the exit code will be 0 if all tests succeeded or 1 if any test failed.

"},{"location":"cli/overview/#shell-completion","title":"Shell Completion","text":"

You can enable shell completion for the ANTA CLI:

ZSHBASH

If you use ZSH shell, add the following line in your ~/.zshrc:

eval \"$(_ANTA_COMPLETE=zsh_source anta)\" > /dev/null\n

With bash, add the following line in your ~/.bashrc:

eval \"$(_ANTA_COMPLETE=bash_source anta)\" > /dev/null\n
"}]} \ No newline at end of file diff --git a/0.6.0/sitemap.xml b/0.6.0/sitemap.xml new file mode 100644 index 000000000..0f8724efd --- /dev/null +++ b/0.6.0/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/0.6.0/sitemap.xml.gz b/0.6.0/sitemap.xml.gz new file mode 100644 index 000000000..7c3aebf97 Binary files /dev/null and b/0.6.0/sitemap.xml.gz differ diff --git a/0.6.0/stylesheets/extra.material.css b/0.6.0/stylesheets/extra.material.css new file mode 100644 index 000000000..b401c9ad6 --- /dev/null +++ b/0.6.0/stylesheets/extra.material.css @@ -0,0 +1,207 @@ +[data-md-color-scheme="slate"] { + --md-hue: 210; +} + +:root { + /* Color schema based on Arista Color Schema */ + /* Default color shades */ + --md-default-fg-color: #000000; + --md-default-fg-color--light: #a1a0a0; + --md-default-fg-color--lighter: #FFFFFF; + --md-default-fg-color--lightest: #FFFFFF; + --md-default-bg-color: #FFFFFF; + --md-default-bg-color--light: #FFFFFF; + --md-default-bg-color--lighter: #FFFFFF; + --md-default-bg-color--lightest: #FFFFFF; + + /* Primary color shades */ + --md-primary-fg-color: #27569B; + --md-primary-fg-color--light: #FFFFFF; + --md-primary-fg-color--dark: #27569B; + --md-primary-bg-color: #FFFFFF; + --md-primary-bg-color--light: #FFFFFF; + + /* Accent color shades */ + --md-accent-fg-color: #27569B; + --md-accent-bg-color: #27569B; + --md-accent-bg-color--light: #27569B; + + /* Link color */ + --md-typeset-a-color: #27569B; + --md-typeset-a-color-fg: #FFFFFF; + --md-typeset-a-color-bg: #27569B; + + /* Code block color shades */ + --md-code-bg-color: #E6E6E6; + --md-code-border-color: #0000004f; + --block-code-bg-color: #e4e4e4; + /* --md-code-fg-color: ...; */ + + font-size: 1.1rem; + /* min-height: 100%; + position: relative; + width: 100%; */ + font-feature-settings: "kern","liga"; + font-family: var(--md-text-font-family,_),-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif; + -webkit-font-smoothing: antialiased; + +} + +[data-md-color-scheme="slate"] { + + /* Link color */ + --md-typeset-a-color: #75aaf8; + --md-typeset-a-color-fg: #FFFFFF; + --md-typeset-a-color-bg: #27569B; + + /* Code block color shades */ + /* --md-code-bg-color: #E6E6E6; */ + --md-code-border-color: #aec6db4f; + /* --block-code-bg-color: #e4e4e4; */ +} + +@media only screen and (min-width: 76.25em) { + .md-main__inner, .md-header__inner { + max-width: 85%; + } + .md-sidebar--primary { + left: 5%; + } + .md-sidebar--secondary { + right: 5%; + margin-left: 0; + -webkit-transform: none; + transform: none; + } +} + +@media only screen { + .md-typeset a:hover { + background-color: var(--md-typeset-a-color-bg); + color: var(--md-typeset-a-color-fg); + } + .md-footer-nav { + background-color: var(--md-default-bg-color--light); + color: var(--md-accent-fg-color--transparent) + } + .md-footer { + height: 2%; + } + .md-footer-nav__direction { + position: absolute; + right: 0; + left: 0; + margin-top: -1rem; + padding: 0 1rem; + color: var(--md-default-fg-color--light); + font-size: .64rem; + } + .md-footer-nav__title { + font-size: 1.2rem; + line-height: 10rem; + color: var(--md-default-fg-color--light); + } + + .md-typeset h4 h5 h6 { + font-size: 1.5rem; + margin: 1em 0; + /* font-weight: 700; */ + letter-spacing: -.01em; + line-height: 3em; + } + + .md-typeset table:not([class]) th { + min-width: 5rem; + padding: .6rem .8rem; + color: var(--md-default-fg-color); + vertical-align: top; + /* background-color: var(--md-accent-bg-color); */ + text-align: left; + /* min-width: 100%; */ + /* display: table; */ + } + .md-typeset table:not([class]) td { + /* padding: .9375em 1.25em; */ + border-collapse: collapse; + vertical-align: center; + text-align: left; + /* border-bottom: 1px solid var(--md-default-fg-color--light); */ + } + .md-typeset code { + padding: 0 .2941176471em; + font-size: 100%; + word-break: break-word; + background-color: var(--md-code-bg-color); + border-radius: .1rem; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; + } + .highlight code { + background-color: var(--md-code-bg-color); + font-size: 90%; + border-radius: 2%; + } + .md-typeset .admonition, .md-typeset details { + margin: 1.5625em 0; + padding: 0 .6rem; + overflow: hidden; + font-size: 90%; + page-break-inside: avoid; + border-left: .2rem solid var(--md-accent-bg-color); + border-left-color: var(--md-accent-bg-color); + border-radius: .1rem; + box-shadow: 0 .2rem .5rem rgba(0,0,0,.05),0 0 .05rem rgba(0,0,0,.1); + } + /* .md-typeset .note > .admonition-title, .md-typeset .note > summary { + background-color: var(--md-accent-bg-color); + color: var(--md-default-fg-color--lighter) + } */ + .md-typeset__table { + min-width: 80%; + } + .md-typeset table:not([class]) { + display: table; + } + + .mdx-content__footer { + margin-top: 20px; + text-align: center; + } + .mdx-content__footer a { + display: inline-block; + transition: transform 250ms cubic-bezier(0.1, 0.7, 0.1, 1), color 125ms; + } + .mdx-content__footer a:focus, .mdx-content__footer a:hover { + transform: scale(1.2); + } + + .md-typeset table:not([class]) th { + min-width: 5rem; + padding: .6rem .8rem; + /* color: var(--md-primary-fg-color--light); */ + bg: var(--md-footer-fg-color--lighter); + } + + .md-footer-copyright { + color: var(--md-footer-fg-color--lighter); + font-size: .64rem; + margin: auto 0.6rem; + padding: 0.4rem; + width: 100%; + text-align: center; + } + .img_center { + display: block; + margin-left: auto; + margin-right: auto; + border-radius: 1%; + /* width: 50%; */ + } +} + +/* mkdocstrings css from official repo to indent sub-elements nicely */ +/* Indentation. */ +div.doc-contents { + padding-left: 25px; + border-left: .05rem solid var(--md-typeset-table-color); +} diff --git a/0.6.0/stylesheets/highlight.js b/0.6.0/stylesheets/highlight.js new file mode 100644 index 000000000..86e50b933 --- /dev/null +++ b/0.6.0/stylesheets/highlight.js @@ -0,0 +1,3 @@ +document$.subscribe(() => { + hljs.highlightAll() +}) diff --git a/0.6.0/stylesheets/tables.js b/0.6.0/stylesheets/tables.js new file mode 100644 index 000000000..e848f07dd --- /dev/null +++ b/0.6.0/stylesheets/tables.js @@ -0,0 +1,6 @@ +document$.subscribe(function() { + var tables = document.querySelectorAll("article table") + tables.forEach(function(table) { + new Tablesort(table) + }) +}) diff --git a/0.6.0/usage-inventory-catalog/index.html b/0.6.0/usage-inventory-catalog/index.html new file mode 100644 index 000000000..dd282d64f --- /dev/null +++ b/0.6.0/usage-inventory-catalog/index.html @@ -0,0 +1,1682 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Inventory & Tests catalog - Arista Network Test Automation - ANTA + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Inventory & Tests catalog

+ +

Inventory and Catalog definition

+

This page describes how to create an inventory and a tests catalog.

+

Create an inventory file

+

anta cli needs an inventory file to list all devices to tests. This inventory is a YAML file with the folowing keys:

+
anta_inventory:
+  hosts:
+    - host: < ip address value >
+      port: < TCP port for eAPI. Default is 443 (Optional)>
+      name: < name to display in report. Default is host:port (Optional) >
+      tags: < list of tags to use to filter inventory during tests. Default is ['all']. (Optional) >
+  networks:
+    - network: < network using CIDR notation >
+      tags: < list of tags to use to filter inventory during tests. Default is ['all']. (Optional) >
+  ranges:
+    - start: < first ip address value of the range >
+      end: < last ip address value of the range >
+      tags: < list of tags to use to filter inventory during tests. Default is ['all']. (Optional) >
+
+

Your inventory file can be based on any of these 3 keys and MUST start with anta_inventory key. A full description of the inventory model is available in API documentation

+

An inventory example:

+
---
+anta_inventory:
+  hosts:
+  - host: 192.168.0.10
+    name: spine01
+    tags: ['fabric', 'spine']
+  - host: 192.168.0.11
+    name: spine02
+    tags: ['fabric', 'spine']
+  networks:
+  - network: '192.168.110.0/24'
+    tags: ['fabric', 'leaf']
+  ranges:
+  - start: 10.0.0.9
+    end: 10.0.0.11
+    tags: ['fabric', 'l2leaf']
+
+

Test Catalog

+

In addition to your inventory file, you also have to define a catalog of tests to execute against all your devices. This catalog list all your tests and their parameters. +Its format is a YAML file and keys are tests functions inherited from the python path.

+
Default tests catalog
+

All tests are located under anta.tests module and are categorised per family (one submodule). So to run test for software version, you can do:

+
anta.tests.software:
+  - VerifyEosVersion:
+
+

It will load the test VerifyEosVersion located in anta.tests.software. But since this function has parameters, we will create a catalog with the following structure:

+
anta.tests.software:
+  - VerifyEosVersion:
+      # List of allowed EOS versions.
+      versions:
+        - 4.25.4M
+        - 4.26.1F
+
+

To get a list of all available tests and their respective parameters, you can read the tests section of this website.

+

The following example gives a very minimal tests catalog you can use in almost any situation

+
---
+# Load anta.tests.software
+anta.tests.software:
+  # Verifies the device is running one of the allowed EOS version.
+  - VerifyEosVersion:
+      # List of allowed EOS versions.
+      versions:
+        - 4.25.4M
+        - 4.26.1F
+
+# Load anta.tests.system
+anta.tests.system:
+  # Verifies the device uptime is higher than a value.
+  - VerifyUptime:
+      minimum: 1
+
+# Load anta.tests.configuration
+anta.tests.configuration:
+  # Verifies ZeroTouch is disabled.
+  - VerifyZeroTouch:
+  - VerifyRunningConfigDiffs:
+
+

If your test is based on AntaTemplate, you have to provide inputs for EOS CLI template by using template_params list:

+
anta.tests.routing.bgp:
+  - VerifyBGPIPv4UnicastCount:
+      number: 3
+      template_params:
+        - vrf: default
+        - vrf: customer-01
+
+

Which is required for the following test definition:

+
class VerifyBGPIPv4UnicastCount(AntaTest):
+    """
+    ...
+    """
+
+    name = "VerifyBGPIPv4UnicastCount"
+    description = "..."
+    categories = ["routing", "bgp"]
+    template = AntaTemplate(template="show bgp ipv4 unicast summary vrf {vrf}")
+
+    @check_bgp_family_enable("ipv4")
+    @AntaTest.anta_test
+    def test(self, number: Optional[int] = None) -> None:
+        pass
+
+

If you need to run the same test but with a different number of neighbors, you can write it as follow:

+
anta.tests.routing.bgp:
+  - VerifyBGPIPv4UnicastCount:
+      number: 2
+      template_params:
+        - vrf: default
+anta.tests.routing.bgp:
+  - VerifyBGPIPv4UnicastCount:
+      number: 3
+      template_params:
+        - vrf: customer-01
+
+
Custom tests catalog
+

In case you want to leverage your own tests collection, you can use the following syntax:

+
<your package name>:
+  - <your test in your package name>:
+
+

So for instance, it could be:

+
titom73.tests.system:
+  - VerifyPlatform:
+    type: ['cEOS-LAB']
+
+
+

How to create custom tests

+

To create your custom tests, you should refer to this following documentation

+
+ +
+
+ + + Last update: + July 19, 2023 + + + +
+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versions.json b/versions.json index 4a1815e4b..ff29ff983 100644 --- a/versions.json +++ b/versions.json @@ -1 +1 @@ -[{"version": "v0.5.0", "title": "v0.5.0", "aliases": []}, {"version": "devel", "title": "devel", "aliases": []}] \ No newline at end of file +[{"version": "0.6.0", "title": "0.6.0", "aliases": []}, {"version": "v0.5.0", "title": "v0.5.0", "aliases": []}, {"version": "devel", "title": "devel", "aliases": []}] \ No newline at end of file