8000 feat(anta): Add evidence to TestResult by carl-baillargeon · Pull Request #1242 · aristanetworks/anta · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

feat(anta): Add evidence to TestResult #1242

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion anta/_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ class AntaRunContext:
Manager with the final test results.
filters: AntaRunFilters
Provided filters to the run.
dry_run
Dry-run mode flag. If `True`, run all setup steps but do not execute tests.
save_evidence
Save each test inputs and command outputs to their respective result in the ResultManager.
selected_inventory: AntaInventory
The final inventory of devices selected for testing.
selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]]
Expand All @@ -100,6 +104,7 @@ class AntaRunContext:
manager: ResultManager
filters: AntaRunFilters
dry_run: bool = False
save_evidence: bool = False

# State populated during the run
selected_inventory: AntaInventory = field(default_factory=AntaInventory)
Expand Down Expand Up @@ -205,6 +210,7 @@ async def run(
filters: AntaRunFilters | None = None,
*,
dry_run: bool = False,
save_evidence: bool = False,
) -> AntaRunContext:
"""Run ANTA.

Expand All @@ -230,6 +236,8 @@ async def run(
Filters for the ANTA run. If `None`, run all tests on all devices.
dry_run
Dry-run mode flag. If `True`, run all setup steps but do not execute tests.
save_evidence
Save each test inputs and command outputs to their respective result in the ResultManager.

Returns
-------
Expand All @@ -245,6 +253,7 @@ async def run(
manager=result_manager if result_manager is not None else ResultManager(),
filters=filters if filters is not None else AntaRunFilters(),
dry_run=dry_run,
save_evidence=save_evidence,
start_time=start_time,
)

Expand Down Expand Up @@ -396,7 +405,7 @@ def _get_test_coroutines(self, ctx: AntaRunContext) -> list[Coroutine[Any, Any,
for device, test_definitions in ctx.selected_tests.items():
for test_def in test_definitions:
try:
coros.append(test_def.test(device=device, inputs=test_def.inputs).test())
coros.append(test_def.test(device=device, inputs=test_def.inputs, save_evidence=ctx.save_evidence).test())
except Exception as exc: # noqa: BLE001, PERF203
# An AntaTest instance is potentially user-defined code.
# We need to catch everything and exit gracefully with an error message.
Expand Down
24 changes: 21 additions & 3 deletions anta/cli/nrfu/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,31 @@ def table(ctx: click.Context, group_by: Literal["device", "test"] | None) -> Non
required=False,
help="Path to save report as a JSON file",
)
def json(ctx: click.Context, output: pathlib.Path | None) -> None:
@click.option(
"--evidence",
help="Include each test inputs and command outputs in their respective test result.",
show_envvar=True,
is_flag=True,
default=False,
)
@click.option(
"--run-metadata",
help="Include additional run metadata in the JSON output.",
show_envvar=True,
is_flag=True,
default=False,
)
def json(ctx: click.Context, output: pathlib.Path | None, *, evidence: bool, run_metadata: bool) -> None:
"""ANTA command to check network state with JSON results.

If no `--output` is specified, the output is printed to stdout.
"""
run_tests(ctx)
print_json(ctx, output=output)
ctx.obj["save_evidence"] = evidence
run_context = run_tests(ctx)
if run_metadata:
print_json(ctx, output, run_context)
else:
print_json(ctx, output)
exit_with_code(ctx)


Expand Down
74 changes: 44 additions & 30 deletions anta/cli/nrfu/utils.py
10000
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import asyncio
import json
import logging
from typing import TYPE_CHECKING, Literal
from typing import TYPE_CHECKING, Any, Literal

import rich
from rich.panel import Panel
Expand Down Expand Up @@ -48,6 +48,8 @@ def run_tests(ctx: click.Context) -> AntaRunContext:

catalog = ctx.obj["catalog"]
inventory = ctx.obj["inventory"]
result_manager = ctx.obj["result_manager"]
save_evidence = ctx.obj.get("save_evidence", False)

print_settings(inventory, catalog)
with anta_progress_bar() as AntaTest.progress:
Expand All @@ -57,7 +59,9 @@ def run_tests(ctx: click.Context) -> AntaRunContext:
tests=set(test) if test else None,
tags=tags,
)
run_ctx = asyncio.run(runner.run(inventory=inventory, catalog=catalog, result_manager=ctx.obj["result_manager"], filters=filters, dry_run=dry_run))
run_ctx = asyncio.run(
runner.run(inventory=inventory, catalog=catalog, result_manager=result_manager, filters=filters, dry_run=dry_run, save_evidence=save_evidence)
)

if dry_run:
ctx.exit()
Expand All @@ -72,6 +76,34 @@ def _get_result_manager(ctx: click.Context, *, apply_hide_filter: bool = True) -
return ctx.obj["result_manager"]


# TODO: Update function docstring
def _get_run_metadata(run_context: AntaRunContext, *, json_serializable: bool = False) -> dict[str, Any]:
"""Get a dictionary with run metadata built from an AntaRunContext."""
active_filters_dict = {}
if run_context.filters.tags:
active_filters_dict["tags"] = sorted(run_context.filters.tags)
if run_context.filters.tests:
active_filters_dict["tests"] = sorted(run_context.filters.tests)
if run_context.filters.devices:
active_filters_dict["devices"] = sorted(run_context.filters.devices)

metadata = {
"anta_version": anta_version,
"test_execution_start_time": run_context.start_time.isoformat() if (json_serializable and run_context.start_time) else run_context.start_time,
"test_execution_end_time": run_context.end_time.isoformat() if (json_serializable and run_context.end_time) else run_context.end_time,
"total_duration": run_context.duration.total_seconds() if (json_serializable and run_context.duration) else run_context.duration,
"total_devices_in_inventory": run_context.total_devices_in_inventory,
"devices_unreachable_at_setup": run_context.devices_unreachable_at_setup,
"devices_filtered_at_setup": run_context.devices_filtered_at_setup,
"filters_applied": active_filters_dict if active_filters_dict else None,
}

if run_context.warnings_at_setup:
metadata["warnings_at_setup"] = run_context.warnings_at_setup

return metadata


def print_settings(
inventory: AntaInventory,
catalog: AntaCatalog,
Expand All @@ -96,18 +128,23 @@ def print_table(ctx: click.Context, group_by: Literal["device", "test"] | None =
console.print(reporter.report_all(results))


def print_json(ctx: click.Context, output: pathlib.Path | None = None) -> None:
# TODO: Update function docstring
def print_json(ctx: click.Context, output: pathlib.Path | None = None, run_context: AntaRunContext | None = None) -> None:
"""Print results as JSON. If output is provided, save to file instead."""
results = _get_result_manager(ctx)
result_manager = _get_result_manager(ctx)
results = (
result_manager.dump if run_context is None else {"run_metadata": _get_run_metadata(run_context, json_serializable=True), "test_results": result_manager.dump}
)
json_results = json.dumps(results, indent=4)

if output is None:
console.print()
console.print(Panel("JSON results", style="cyan"))
rich.print_json(results.json)
rich.print_json(json_results)
else:
try:
with output.open(mode="w", encoding="utf-8") as file:
file.write(results.json)
file.write(json_results)
console.print(f"JSON results saved to {output} ✅", style="cyan")
except OSError:
console.print(f"Failed to save JSON results to {output} ❌", style="cyan")
Expand Down Expand Up @@ -161,34 +198,11 @@ def save_markdown_report(ctx: click.Context, md_output: pathlib.Path, run_contex
Optional `AntaRunContext` instance returned from `AntaRunner.run()`.
If provided, a `Run Overview` section will be generated in the report including the run context information.
"""
extra_data = None
if run_context is not None:
active_filters_dict = {}
if run_context.filters.tags:
active_filters_dict["tags"] = sorted(run_context.filters.tags)
if run_context.filters.tests:
active_filters_dict["tests"] = sorted(run_context.filters.tests)
if run_context.filters.devices:
active_filters_dict["devices"] = sorted(run_context.filters.devices)

extra_data = {
"anta_version": anta_version,
"test_execution_start_time": run_context.start_time,
"test_execution_end_time": run_context.end_time,
"total_duration": run_context.duration,
"total_devices_in_inventory": run_context.total_devices_in_inventory,
"devices_unreachable_at_setup": run_context.devices_unreachable_at_setup,
"devices_filtered_at_setup": run_context.devices_filtered_at_setup,
"filters_applied": active_filters_dict if active_filters_dict else None,
}

if run_context.warnings_at_setup:
extra_data["warnings_at_setup"] = run_context.warnings_at_setup

try:
manager = _get_result_manager(ctx, apply_hide_filter=False).sort(["name", "categories", "test"])
filtered_manager = _get_result_manager(ctx, apply_hide_filter=True).sort(["name", "categories", "test"])
sections = [(section, filtered_manager) if section.__name__ == "TestResults" else (section, manager) for section in MDReportGenerator.DEFAULT_SECTIONS]
extra_data = None if run_context is None else _get_run_metadata(run_context)
MDReportGenerator.generate_sections(md_filename=md_output, sections=sections, extra_data=extra_data)
console.print(f"Markdown report saved to {md_output} ✅", style="cyan")
except OSError:
Expand Down
7 changes: 7 additions & 0 deletions anta/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,8 @@ def __init__(
device: AntaDevice,
inputs: dict[str, Any] | AntaTest.Input | None = None,
eos_data: list[dict[Any, Any] | str] | None = None,
*,
save_evidence: bool = False,
) -> None:
"""Initialize an AntaTest instance.

Expand All @@ -443,6 +445,8 @@ def __init__(
eos_data
Populate outputs of the test commands instead of collecting from devices.
This list must have the same length and order than the `instance_commands` instance attribute.
save_evidence
Save the test inputs and commands used to run the test in the TestResult object.
"""
self.logger: logging.Logger = logging.getLogger(f"{self.module}.{self.__class__.__name__}")
self.device: AntaDevice = device
Expand All @@ -458,6 +462,9 @@ def __init__(
if self.result.result == AntaTestStatus.UNSET:
self._init_commands(eos_data)

if save_evidence:
self.result.evidence = {"inputs": self.inputs, "commands": self.instance_commands}

def _init_inputs(self, inputs: dict[str, Any] | AntaTest.Input | None) -> None:
"""Instantiate the `inputs` instance attribute with an `AntaTest.Input` instance to validate test inputs using the model.

Expand Down
50 changes: 49 additions & 1 deletion anta/result_manager/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@

from dataclasses import dataclass, field
from enum import Enum
from typing import Any

from pydantic import BaseModel
from pydantic import BaseModel, FieldSerializationInfo, field_serializer, field_validator


class AntaTestStatus(str, Enum):
Expand Down Expand Up @@ -47,6 +48,9 @@ class TestResult(BaseModel):
Messages to report after the test, if any.
custom_field : str | None
Custom field to store a string for flexibility in integrating with ANTA.
evidence : dict[str, Any] | None
Optional evidence attached to the result.
If provided, the dictionary must follow this structure: `{"inputs": AntaTest.Input, "commands": list[AntaCommand]}`.

"""

Expand All @@ -58,6 +62,50 @@ class TestResult(BaseModel):
messages: list[str] = []
custom_field: str | None = None

# Using Any to prevent a circular import from anta.models (AntaCommand, AntaTest.Input)
# TODO: Replace with a stricter type (TypedDict or dataclass) once module refactoring in ANTA v2.0.0 resolves the circular import from anta.models
evidence: dict[str, Any] | None = None

@field_serializer("evidence")
def serialize_evidence(self, evidence: dict[str, Any] | None, _info: FieldSerializationInfo) -> dict[str, Any] | None:
"""Serialize the evidence field if present."""
if evidence is None:
return None

inputs = evidence["inputs"].model_dump(mode="json", exclude_unset=True)
commands = [command.model_dump(mode="json", exclude={"template", "params", "use_cache"}) for command in evidence["commands"]]
return {"inputs": inputs, "commands": commands}

# TODO: Remove this validator when a stricter type will be associated to evidence
@field_validator("evidence", mode="after")
@classmethod
def validate_evidence(cls, evidence: dict[str, Any] | None) -> dict[str, Any] | None:
"""Validate the evidence field if present."""
if evidence is None:
return None

errors: list[str] = []
expected_keys = {"inputs", "commands"}
actual_keys = set(evidence.keys())

# Check for missing required keys
if missing_keys := expected_keys - actual_keys:
errors.append(f"evidence is missing required key(s): {sorted(missing_keys)}")

# Check for unexpected extra keys
if extra_keys := actual_keys - expected_keys:
errors.append(f"evidence has unexpected key(s): {sorted(extra_keys)}")

# Check if 'commands' value is a list (only if the key is present)
if "commands" in actual_keys and not isinstance(evidence["commands"], list):
errors.append(f"'commands' must be a list, but got '{type(evidence['commands']).__name__}'")

# If any errors were collected, raise a single exception
if errors:
raise ValueError("\n".join(errors))

return evidence

def is_success(self, message: str | None = None) -> None:
"""Set status to success.

Expand Down
5 changes: 5 additions & 0 deletions docs/snippets/anta_nrfu_json_help.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,9 @@ Usage: anta nrfu json [OPTIONS]
Options:
-o, --output FILE Path to save report as a JSON file [env var:
ANTA_NRFU_JSON_OUTPUT]
--evidence Include each test inputs and command outputs in their
respective test result. [env var:
ANTA_NRFU_JSON_EVIDENCE]
--run-metadata Include additional run metadata in the JSON output. [env
var: ANTA_NRFU_JSON_RUN_METADATA]
--help Show this message and exit.
Loading
0