From 40a78b343cd584eb155a628c03dc74806b89f051 Mon Sep 17 00:00:00 2001 From: Carl Baillargeon Date: Tue, 31 Dec 2024 16:25:42 -0500 Subject: [PATCH 1/3] fix(anta): Fix various issues in CSV and Markdown reporters --- anta/cli/nrfu/utils.py | 2 +- anta/constants.py | 21 +++++- anta/reporter/__init__.py | 4 +- anta/reporter/csv_reporter.py | 1 + anta/reporter/md_reporter.py | 13 ++-- anta/result_manager/__init__.py | 45 ++++++++++--- tests/data/test_md_report.md | 30 ++++----- tests/units/result_manager/test__init__.py | 77 ++++++++++++++++++++-- 8 files changed, 154 insertions(+), 39 deletions(-) diff --git a/anta/cli/nrfu/utils.py b/anta/cli/nrfu/utils.py index 375e6e1ed..3dd3068f4 100644 --- a/anta/cli/nrfu/utils.py +++ b/anta/cli/nrfu/utils.py @@ -157,7 +157,7 @@ def save_markdown_report(ctx: click.Context, md_output: pathlib.Path) -> None: Path to save the markdown report. """ try: - MDReportGenerator.generate(results=_get_result_manager(ctx), md_filename=md_output) + MDReportGenerator.generate(results=_get_result_manager(ctx).sort(["name", "categories", "test"]), md_filename=md_output) console.print(f"Markdown report saved to {md_output} ✅", style="cyan") except OSError: console.print(f"Failed to save Markdown report to {md_output} ❌", style="cyan") diff --git a/anta/constants.py b/anta/constants.py index 4dcef3050..4ea61e189 100644 --- a/anta/constants.py +++ b/anta/constants.py @@ -5,7 +5,26 @@ from __future__ import annotations -ACRONYM_CATEGORIES: set[str] = {"aaa", "mlag", "snmp", "bgp", "ospf", "vxlan", "stp", "igmp", "ip", "lldp", "ntp", "bfd", "ptp", "lanz", "stun", "vlan"} +ACRONYM_CATEGORIES: set[str] = { + "aaa", + "avt", + "bfd", + "bgp", + "igmp", + "ip", + "isis", + "lanz", + "lldp", + "mlag", + "ntp", + "ospf", + "ptp", + "snmp", + "stp", + "stun", + "vlan", + "vxlan", +} """A set of network protocol or feature acronyms that should be represented in uppercase.""" MD_REPORT_TOC = """**Table of Contents:** diff --git a/anta/reporter/__init__.py b/anta/reporter/__init__.py index 9e5fa1b30..821c3d6f9 100644 --- a/anta/reporter/__init__.py +++ b/anta/reporter/__init__.py @@ -168,7 +168,7 @@ def report_summary_tests( self.Headers.list_of_error_nodes, ] table = self._build_headers(headers=headers, table=table) - for test, stats in sorted(manager.test_stats.items()): + for test, stats in manager.test_stats.items(): if tests is None or test in tests: table.add_row( test, @@ -214,7 +214,7 @@ def report_summary_devices( self.Headers.list_of_error_tests, ] table = self._build_headers(headers=headers, table=table) - for device, stats in sorted(manager.device_stats.items()): + for device, stats in manager.device_stats.items(): if devices is None or device in devices: table.add_row( device, diff --git a/anta/reporter/csv_reporter.py b/anta/reporter/csv_reporter.py index 3f5592388..a8d3df5f6 100644 --- a/anta/reporter/csv_reporter.py +++ b/anta/reporter/csv_reporter.py @@ -111,6 +111,7 @@ def generate(cls, results: ResultManager, csv_filename: pathlib.Path) -> None: csvwriter = csv.writer( csvfile, delimiter=",", + lineterminator="\n", ) csvwriter.writerow(headers) for entry in results.results: diff --git a/anta/reporter/md_reporter.py b/anta/reporter/md_reporter.py index 94c4a8668..35232324e 100644 --- a/anta/reporter/md_reporter.py +++ b/anta/reporter/md_reporter.py @@ -237,7 +237,7 @@ class SummaryTotalsDeviceUnderTest(MDReportBase): def generate_rows(self) -> Generator[str, None, None]: """Generate the rows of the summary totals device under test table.""" for device, stat in self.results.device_stats.items(): - total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + stat.tests_unset_count categories_skipped = ", ".join(sorted(convert_categories(list(stat.categories_skipped)))) categories_failed = ", ".join(sorted(convert_categories(list(stat.categories_failed)))) yield ( @@ -261,10 +261,11 @@ class SummaryTotalsPerCategory(MDReportBase): def generate_rows(self) -> Generator[str, None, None]: """Generate the rows of the summary totals per category table.""" - for category, stat in self.results.sorted_category_stats.items(): - total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + for category, stat in self.results.category_stats.items(): + converted_category = convert_categories([category])[0] + total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + stat.tests_unset_count yield ( - f"| {category} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} " + f"| {converted_category} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} " f"| {stat.tests_error_count} |\n" ) @@ -284,9 +285,9 @@ class TestResults(MDReportBase): def generate_rows(self) -> Generator[str, None, None]: """Generate the rows of the all test results table.""" - for result in self.results.get_results(sort_by=["name", "test"]): + for result in self.results.results: messages = self.safe_markdown(", ".join(result.messages)) - categories = ", ".join(convert_categories(result.categories)) + categories = ", ".join(sorted(convert_categories(result.categories))) yield ( f"| {result.name or '-'} | {categories or '-'} | {result.test or '-'} " f"| {result.description or '-'} | {self.safe_markdown(result.custom_field) or '-'} | {result.result or '-'} | {messages or '-'} |\n" diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index b5b0f393f..20a16a4c4 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -7,6 +7,7 @@ import json import logging +import warnings from collections import defaultdict from functools import cached_property from itertools import chain @@ -143,28 +144,41 @@ def json(self) -> str: return json.dumps(self.dump, indent=4) @property - def device_stats(self) -> defaultdict[str, DeviceStats]: + def device_stats(self) -> dict[str, DeviceStats]: """Get the device statistics.""" self._ensure_stats_in_sync() - return self._device_stats + return dict(sorted(self._device_stats.items())) @property - def category_stats(self) -> defaultdict[str, CategoryStats]: + def category_stats(self) -> dict[str, CategoryStats]: """Get the category statistics.""" self._ensure_stats_in_sync() - return self._category_stats + return dict(sorted(self._category_stats.items())) @property - def test_stats(self) -> defaultdict[str, TestStats]: + def test_stats(self) -> dict[str, TestStats]: """Get the test statistics.""" self._ensure_stats_in_sync() - return self._test_stats + return dict(sorted(self._test_stats.items())) @property def sorted_category_stats(self) -> dict[str, CategoryStats]: - """A property that returns the category_stats dictionary sorted by key name.""" + """A property that returns the category_stats dictionary sorted by key name. + + Deprecated + ---------- + This property is deprecated and will be removed in ANTA v2.0.0. + Use `category_stats` instead as it is now sorted by default. + + TODO: Remove this property in ANTA v2.0.0. + """ + warnings.warn( + "sorted_category_stats is deprecated and will be removed in ANTA v2.0.0. Use category_stats instead as it is now sorted by default.", + DeprecationWarning, + stacklevel=2, + ) self._ensure_stats_in_sync() - return dict(sorted(self.category_stats.items())) + return self.category_stats @cached_property def results_by_status(self) -> dict[AntaTestStatus, list[TestResult]]: @@ -316,6 +330,21 @@ def get_status(self, *, ignore_error: bool = False) -> str: """Return the current status including error_status if ignore_error is False.""" return "error" if self.error_status and not ignore_error else self.status + def sort(self, sort_by: list[str]) -> ResultManager: + """Sort the ResultManager results based on TestResult fields. + + Parameters + ---------- + sort_by + List of TestResult fields to sort the results. + """ + accepted_fields = TestResult.model_fields.keys() + if not set(sort_by).issubset(set(accepted_fields)): + msg = f"Invalid sort_by fields: {sort_by}. Accepted fields are: {list(accepted_fields)}" + raise ValueError(msg) + self._result_entries.sort(key=lambda result: [getattr(result, field) for field in sort_by]) + return self + def filter(self, hide: set[AntaTestStatus]) -> ResultManager: """Get a filtered ResultManager based on test status. diff --git a/tests/data/test_md_report.md b/tests/data/test_md_report.md index 9360dbc74..db8d47f9a 100644 --- a/tests/data/test_md_report.md +++ b/tests/data/test_md_report.md @@ -21,8 +21,8 @@ | Device Under Test | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error | Categories Skipped | Categories Failed | | ------------------| ----------- | ------------- | ------------- | ------------- | ----------- | -------------------| ------------------| -| DC1-SPINE1 | 15 | 2 | 2 | 10 | 1 | MLAG, VXLAN | AAA, BFD, BGP, Connectivity, Routing, SNMP, STP, Services, Software, System | | DC1-LEAF1A | 15 | 5 | 0 | 9 | 1 | - | AAA, BFD, BGP, Connectivity, SNMP, STP, Services, Software, System | +| DC1-SPINE1 | 15 | 2 | 2 | 10 | 1 | MLAG, VXLAN | AAA, BFD, BGP, Connectivity, Routing, SNMP, STP, Services, Software, System | ### Summary Totals Per Category @@ -47,33 +47,33 @@ | Device Under Test | Categories | Test | Description | Custom Field | Result | Messages | | ----------------- | ---------- | ---- | ----------- | ------------ | ------ | -------- | +| DC1-LEAF1A | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default | | DC1-LEAF1A | BFD | VerifyBFDSpecificPeers | Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF. | - | failure | Following BFD peers are not configured, status is not up or remote disc is zero: {'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}} | | DC1-LEAF1A | BGP | VerifyBGPPeerCount | Verifies the count of BGP peers. | - | failure | Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Expected: 2, Actual: 1'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Expected: 3, Actual: 0'}}] | -| DC1-LEAF1A | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] | -| DC1-LEAF1A | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-LEAF1A' instead. | -| DC1-LEAF1A | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - | | DC1-LEAF1A | Connectivity | VerifyLLDPNeighbors | Verifies that the provided LLDP neighbors are connected properly. | - | failure | Wrong LLDP neighbor(s) on port(s): Ethernet1 DC1-SPINE1_Ethernet1 Ethernet2 DC1-SPINE2_Ethernet1 Port(s) not configured: Ethernet7 | -| DC1-LEAF1A | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | success | - | -| DC1-LEAF1A | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' | | DC1-LEAF1A | Connectivity | VerifyReachability | Test the network reachability to one or many destination IP(s). | - | error | ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1 | +| DC1-LEAF1A | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - | +| DC1-LEAF1A | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | success | - | | DC1-LEAF1A | Routing | VerifyRoutingTableEntry | Verifies that the provided routes are present in the routing table of a specified VRF. | - | success | - | -| DC1-LEAF1A | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | Wrong STP mode configured for the following VLAN(s): [10, 20] | | DC1-LEAF1A | SNMP | VerifySnmpStatus | Verifies if the SNMP agent is enabled. | - | failure | SNMP agent disabled in vrf default | -| DC1-LEAF1A | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default | +| DC1-LEAF1A | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | Wrong STP mode configured for the following VLAN(s): [10, 20] | | DC1-LEAF1A | Security | VerifyTelnetStatus | Verifies if Telnet is disabled in the default VRF. | - | success | - | +| DC1-LEAF1A | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-LEAF1A' instead. | +| DC1-LEAF1A | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] | +| DC1-LEAF1A | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' | | DC1-LEAF1A | VXLAN | VerifyVxlan1Interface | Verifies the Vxlan1 interface status. | - | success | - | +| DC1-SPINE1 | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default | | DC1-SPINE1 | BFD | VerifyBFDSpecificPeers | Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF. | - | failure | Following BFD peers are not configured, status is not up or remote disc is zero: {'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}} | | DC1-SPINE1 | BGP | VerifyBGPPeerCount | Verifies the count of BGP peers. | - | failure | Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Not Configured', 'default': 'Expected: 3, Actual: 4'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Not Configured'}}, {'afi': 'evpn', 'vrfs': {'default': 'Expected: 2, Actual: 4'}}] | -| DC1-SPINE1 | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] | -| DC1-SPINE1 | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-SPINE1' instead. | -| DC1-SPINE1 | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - | | DC1-SPINE1 | Connectivity | VerifyLLDPNeighbors | Verifies that the provided LLDP neighbors are connected properly. | - | failure | Wrong LLDP neighbor(s) on port(s): Ethernet1 DC1-LEAF1A_Ethernet1 Ethernet2 DC1-LEAF1B_Ethernet1 Port(s) not configured: Ethernet7 | -| DC1-SPINE1 | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | skipped | MLAG is disabled | -| DC1-SPINE1 | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' | | DC1-SPINE1 | Connectivity | VerifyReachability | Test the network reachability to one or many destination IP(s). | - | error | ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1 | +| DC1-SPINE1 | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - | +| DC1-SPINE1 | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | skipped | MLAG is disabled | | DC1-SPINE1 | Routing | VerifyRoutingTableEntry | Verifies that the provided routes are present in the routing table of a specified VRF. | - | failure | The following route(s) are missing from the routing table of VRF default: ['10.1.0.2'] | -| DC1-SPINE1 | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | STP mode 'rapidPvst' not configured for the following VLAN(s): [10, 20] | | DC1-SPINE1 | SNMP | VerifySnmpStatus | Verifies if the SNMP agent is enabled. | - | failure | SNMP agent disabled in vrf default | -| DC1-SPINE1 | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default | +| DC1-SPINE1 | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | STP mode 'rapidPvst' not configured for the following VLAN(s): [10, 20] | | DC1-SPINE1 | Security | VerifyTelnetStatus | Verifies if Telnet is disabled in the default VRF. | - | success | - | +| DC1-SPINE1 | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-SPINE1' instead. | +| DC1-SPINE1 | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] | +| DC1-SPINE1 | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' | | DC1-SPINE1 | VXLAN | VerifyVxlan1Interface | Verifies the Vxlan1 interface status. | - | skipped | Vxlan1 interface is not configured | diff --git a/tests/units/result_manager/test__init__.py b/tests/units/result_manager/test__init__.py index e41a436f6..fe8fba888 100644 --- a/tests/units/result_manager/test__init__.py +++ b/tests/units/result_manager/test__init__.py @@ -86,13 +86,14 @@ def test_sorted_category_stats(self, list_result_factory: Callable[[int], list[T result_manager.results = results - # Check the current categories order - expected_order = ["ospf", "bgp", "vxlan", "system"] + # Check that category_stats returns sorted order by default + expected_order = ["bgp", "ospf", "system", "vxlan"] assert list(result_manager.category_stats.keys()) == expected_order - # Check the sorted categories order - expected_order = ["bgp", "ospf", "system", "vxlan"] - assert list(result_manager.sorted_category_stats.keys()) == expected_order + # Verify deprecation warning for sorted_category_stats + with pytest.warns(DeprecationWarning, match="sorted_category_stats is deprecated and will be removed in ANTA v2.0.0"): + deprecated_stats = result_manager.sorted_category_stats + assert list(deprecated_stats.keys()) == expected_order @pytest.mark.parametrize( ("starting_status", "test_status", "expected_status", "expected_raise"), @@ -465,7 +466,6 @@ def test_stats_property_computation(self, test_result_factory: Callable[[], Test with caplog.at_level(logging.INFO): _ = result_manager.category_stats _ = result_manager.test_stats - _ = result_manager.sorted_category_stats assert "Computing statistics" not in caplog.text # Add another result - should mark stats as unsynced @@ -480,3 +480,68 @@ def test_stats_property_computation(self, test_result_factory: Callable[[], Test _ = result_manager.device_stats assert "Computing statistics for all results" in caplog.text assert result_manager._stats_in_sync is True + + def test_sort(self, test_result_factory: Callable[[], TestResult]) -> None: + """Test ResultManager.sort method.""" + result_manager = ResultManager() + + # Add specific test results for predictable sorting + test1 = test_result_factory() + test1.name = "Device3" + test1.result = AntaTestStatus.SUCCESS + test1.test = "Test1" + test1.categories = ["VXLAN", "networking"] + + test2 = test_result_factory() + test2.name = "Device1" + test2.result = AntaTestStatus.FAILURE + test2.test = "Test2" + test2.categories = ["BGP", "routing"] + + test3 = test_result_factory() + test3.name = "Device2" + test3.result = AntaTestStatus.ERROR + test3.test = "Test3" + test3.categories = ["system", "hardware"] + + result_manager.results = [test1, test2, test3] + + # Sort by result and check order + sorted_manager = result_manager.sort(["result"]) + assert [r.result for r in sorted_manager.results] == ["error", "failure", "success"] + + # Sort by device name + sorted_manager = result_manager.sort(["name"]) + assert [r.name for r in sorted_manager.results] == ["Device1", "Device2", "Device3"] + + # Sort by categories (which are lists) + sorted_manager = result_manager.sort(["categories"]) + results = sorted_manager.results + + # Python sorts lists by first element, so order should be: + # BGP < VXLAN < system + assert results[0].categories == ["BGP", "routing"] + assert results[1].categories == ["VXLAN", "networking"] + assert results[2].categories == ["system", "hardware"] + + # Test multiple sort fields + sorted_manager = result_manager.sort(["result", "test"]) + results = sorted_manager.results + assert results[0].result == "error" + assert results[0].test == "Test3" + assert results[1].result == "failure" + assert results[1].test == "Test2" + assert results[2].result == "success" + assert results[2].test == "Test1" + + # Test invalid sort field + with pytest.raises( + ValueError, + match=re.escape( + "Invalid sort_by fields: ['bad_field']. Accepted fields are: ['name', 'test', 'categories', 'description', 'result', 'messages', 'custom_field']", + ), + ): + result_manager.sort(["bad_field"]) + + # Verify the method is chainable + assert isinstance(result_manager.sort(["name"]), ResultManager) From 365f978918be1689a63b046daec4c719f53f4b11 Mon Sep 17 00:00:00 2001 From: Carl Baillargeon Date: Wed, 1 Jan 2025 10:20:19 -0500 Subject: [PATCH 2/3] Add sorting in unit test --- tests/units/reporter/test_md_reporter.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/units/reporter/test_md_reporter.py b/tests/units/reporter/test_md_reporter.py index c0676bb62..f5d2423ec 100644 --- a/tests/units/reporter/test_md_reporter.py +++ b/tests/units/reporter/test_md_reporter.py @@ -1,11 +1,11 @@ -# Copyright (c) 2023-2024 Arista Networks, Inc. +# Copyright (c) 2023-2025 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Test anta.reporter.md_reporter.py.""" from __future__ import annotations -from io import BytesIO, TextIOWrapper +from io import StringIO from pathlib import Path import pytest @@ -22,7 +22,7 @@ def test_md_report_generate(tmp_path: Path, result_manager: ResultManager) -> No expected_report = "test_md_report.md" # Generate the Markdown report - MDReportGenerator.generate(result_manager, md_filename) + MDReportGenerator.generate(result_manager.sort(sort_by=["name", "categories", "test"]), md_filename) assert md_filename.exists() # Load the existing Markdown report to compare with the generated one @@ -46,7 +46,7 @@ def generate_section(self) -> None: results = ResultManager() - with TextIOWrapper(BytesIO(b"1 2 3")) as mock_file: + with StringIO() as mock_file: report = FakeMDReportBase(mock_file, results) assert report.generate_heading_name() == "Fake MD Report Base" From 50da83ac3f9f7c106b19e0ce2fcf8c39114702d0 Mon Sep 17 00:00:00 2001 From: Carl Baillargeon Date: Thu, 2 Jan 2025 10:59:37 -0500 Subject: [PATCH 3/3] Fix per review comments --- anta/reporter/csv_reporter.py | 3 +- tests/units/result_manager/test__init__.py | 82 ++++++++++++++-------- 2 files changed, 54 insertions(+), 31 deletions(-) diff --git a/anta/reporter/csv_reporter.py b/anta/reporter/csv_reporter.py index 51b3bbb7d..2a0a4de2c 100644 --- a/anta/reporter/csv_reporter.py +++ b/anta/reporter/csv_reporter.py @@ -8,6 +8,7 @@ import csv import logging +import os from dataclasses import dataclass from typing import TYPE_CHECKING @@ -111,7 +112,7 @@ def generate(cls, results: ResultManager, csv_filename: pathlib.Path) -> None: csvwriter = csv.writer( csvfile, delimiter=",", - lineterminator="\n", + lineterminator=os.linesep, ) csvwriter.writerow(headers) for entry in results.results: diff --git a/tests/units/result_manager/test__init__.py b/tests/units/result_manager/test__init__.py index eb4036f9e..c84e39b9a 100644 --- a/tests/units/result_manager/test__init__.py +++ b/tests/units/result_manager/test__init__.py @@ -20,6 +20,7 @@ from anta.result_manager.models import TestResult +# pylint: disable=too-many-public-methods class TestResultManager: """Test ResultManager class.""" @@ -481,60 +482,79 @@ def test_stats_property_computation(self, test_result_factory: Callable[[], Test assert "Computing statistics for all results" in caplog.text assert result_manager._stats_in_sync is True - def test_sort(self, test_result_factory: Callable[[], TestResult]) -> None: - """Test ResultManager.sort method.""" + def test_sort_by_result(self, test_result_factory: Callable[[], TestResult]) -> None: + """Test sorting by result.""" result_manager = ResultManager() - - # Add specific test results for predictable sorting test1 = test_result_factory() - test1.name = "Device3" test1.result = AntaTestStatus.SUCCESS - test1.test = "Test1" - test1.categories = ["VXLAN", "networking"] - test2 = test_result_factory() - test2.name = "Device1" test2.result = AntaTestStatus.FAILURE - test2.test = "Test2" - test2.categories = ["BGP", "routing"] - test3 = test_result_factory() - test3.name = "Device2" test3.result = AntaTestStatus.ERROR - test3.test = "Test3" - test3.categories = ["system", "hardware"] result_manager.results = [test1, test2, test3] - - # Sort by result and check order sorted_manager = result_manager.sort(["result"]) assert [r.result for r in sorted_manager.results] == ["error", "failure", "success"] - # Sort by device name + def test_sort_by_name(self, test_result_factory: Callable[[], TestResult]) -> None: + """Test sorting by name.""" + result_manager = ResultManager() + test1 = test_result_factory() + test1.name = "Device3" + test2 = test_result_factory() + test2.name = "Device1" + test3 = test_result_factory() + test3.name = "Device2" + + result_manager.results = [test1, test2, test3] sorted_manager = result_manager.sort(["name"]) assert [r.name for r in sorted_manager.results] == ["Device1", "Device2", "Device3"] - # Sort by categories (which are lists) + def test_sort_by_categories(self, test_result_factory: Callable[[], TestResult]) -> None: + """Test sorting by categories.""" + result_manager = ResultManager() + test1 = test_result_factory() + test1.categories = ["VXLAN", "networking"] + test2 = test_result_factory() + test2.categories = ["BGP", "routing"] + test3 = test_result_factory() + test3.categories = ["system", "hardware"] + + result_manager.results = [test1, test2, test3] sorted_manager = result_manager.sort(["categories"]) results = sorted_manager.results - # Python sorts lists by first element, so order should be: - # BGP < VXLAN < system assert results[0].categories == ["BGP", "routing"] assert results[1].categories == ["VXLAN", "networking"] assert results[2].categories == ["system", "hardware"] - # Test multiple sort fields + def test_sort_multiple_fields(self, test_result_factory: Callable[[], TestResult]) -> None: + """Test sorting by multiple fields.""" + result_manager = ResultManager() + test1 = test_result_factory() + test1.result = AntaTestStatus.ERROR + test1.test = "Test3" + test2 = test_result_factory() + test2.result = AntaTestStatus.ERROR + test2.test = "Test1" + test3 = test_result_factory() + test3.result = AntaTestStatus.FAILURE + test3.test = "Test2" + + result_manager.results = [test1, test2, test3] sorted_manager = result_manager.sort(["result", "test"]) results = sorted_manager.results - assert results[0].result == "error" - assert results[0].test == "Test3" - assert results[1].result == "failure" - assert results[1].test == "Test2" - assert results[2].result == "success" - assert results[2].test == "Test1" - # Test invalid sort field + assert results[0].result == "error" + assert results[0].test == "Test1" + assert results[1].result == "error" + assert results[1].test == "Test3" + assert results[2].result == "failure" + assert results[2].test == "Test2" + + def test_sort_invalid_field(self) -> None: + """Test that sort method raises ValueError for invalid sort_by fields.""" + result_manager = ResultManager() with pytest.raises( ValueError, match=re.escape( @@ -543,5 +563,7 @@ def test_sort(self, test_result_factory: Callable[[], TestResult]) -> None: ): result_manager.sort(["bad_field"]) - # Verify the method is chainable + def test_sort_is_chainable(self) -> None: + """Test that the sort method is chainable.""" + result_manager = ResultManager() assert isinstance(result_manager.sort(["name"]), ResultManager)