From be1968e88be6ecd37a6d4315d664c27b3dab798e Mon Sep 17 00:00:00 2001 From: Adrian Montagu <168636369+AdrianMontaguSmartDCSIT@users.noreply.github.com> Date: Tue, 28 May 2024 13:48:54 +0100 Subject: [PATCH] move reporting related pv code within test harness to pv folder (#44) * move reporting related pv code to pv folder * update fixtures and fix linting * Delete test_harness/log_file_store/error_log_d8dce7ed-5684-4937-90b8-9558e8647422.txt * remove commented out code from test harness conftest * fix linting * add debug print to check GitHub actions workflow * update mock_start in test_send_events * add mock_del --- .../protocol_verifier/pvperformanceresults.py | 4 +- .../protocol_verifier/reporting/__init__.py | 67 ++ .../reporting/log_analyser.py | 0 .../reporting/report_results.py | 581 ++++++++++++++++ test_harness/protocol_verifier/test_utils.py | 4 +- .../protocol_verifier/tests/conftest.py | 4 + .../tests/reporting/conftest.py | 628 +++++++++++++++++ .../tests}/reporting/test_log_analyser.py | 2 +- .../tests}/reporting/test_report_results.py | 2 +- .../tests/test_send_events.py | 2 + test_harness/reporting/__init__.py | 68 -- test_harness/reporting/report_results.py | 638 +----------------- tests/test_harness/conftest.py | 16 - tests/test_harness/reporting/conftest.py | 292 +------- 14 files changed, 1315 insertions(+), 993 deletions(-) create mode 100644 test_harness/protocol_verifier/reporting/__init__.py rename test_harness/{ => protocol_verifier}/reporting/log_analyser.py (100%) create mode 100644 test_harness/protocol_verifier/reporting/report_results.py create mode 100644 test_harness/protocol_verifier/tests/reporting/conftest.py rename {tests/test_harness => test_harness/protocol_verifier/tests}/reporting/test_log_analyser.py (99%) rename {tests/test_harness => test_harness/protocol_verifier/tests}/reporting/test_report_results.py (98%) diff --git a/test_harness/protocol_verifier/pvperformanceresults.py b/test_harness/protocol_verifier/pvperformanceresults.py index 0168b06..310cd9e 100644 --- a/test_harness/protocol_verifier/pvperformanceresults.py +++ b/test_harness/protocol_verifier/pvperformanceresults.py @@ -18,7 +18,9 @@ from prometheus_client.parser import text_fd_to_metric_families from pygrok import Grok -from test_harness.reporting.log_analyser import yield_grok_metrics_from_files +from test_harness.protocol_verifier.reporting.log_analyser import ( + yield_grok_metrics_from_files +) from test_harness.results.aggregation import ( AggregationBin, AggregationCount, diff --git a/test_harness/protocol_verifier/reporting/__init__.py b/test_harness/protocol_verifier/reporting/__init__.py new file mode 100644 index 0000000..c376756 --- /dev/null +++ b/test_harness/protocol_verifier/reporting/__init__.py @@ -0,0 +1,67 @@ +"""__init__ file. Contains methods to create and save report files""" + +from pandas import DataFrame + +from test_harness.protocol_verifier.reporting.log_analyser import ( + logs_validity_df_to_results +) +from test_harness.protocol_verifier.reporting.report_results import ( + get_report_files_mapping_from_dataframe_report, +) +from test_harness.reporting.report_delivery import deliver_test_report_files + + +def create_and_save_report_files( + log_string: str, + validity_df: DataFrame, + test_name: str, + output_directory_path: str, +) -> None: + """Method to create report files from logs and validity dataframe and save + report files with a prefix in an output directory + + :param log_string: String representing the log files + :type log_string: `str` + :param validity_df: :class:`DataFrame` holding the information on the test + files + :type validity_df: :class:`DataFrame` + :param test_name: The test name (or prefix) to give to the report files + :type test_name: `str` + :param output_directory_path: The path of the output directory to store + the results + :type output_directory_path: `str` + """ + report_files_mapping = create_report_files( + log_string=log_string, validity_df=validity_df, test_name=test_name + ) + deliver_test_report_files( + report_files_mapping=report_files_mapping, + output_directory=output_directory_path, + ) + + +def create_report_files( + log_string: str, + validity_df: DataFrame, + test_name: str, + event_id_job_id_map: dict[str, str] | None = None, +) -> dict[str, str | DataFrame]: + """Method to create report files from logs and validity dataframe + + :param log_string: String representing the log files + :type log_string: `str` + :param validity_df: :class:`DataFrame` holding the information on the test + files + :type validity_df: :class:`DataFrame` + :param test_name: The test name (or prefix) to give to the report files + :type test_name: `str` + """ + results_df = logs_validity_df_to_results( + log_string=log_string, + validity_df=validity_df, + event_id_job_id_map=event_id_job_id_map, + ) + report_files_mapping = get_report_files_mapping_from_dataframe_report( + results_df=results_df, results_prefix=test_name + ) + return report_files_mapping diff --git a/test_harness/reporting/log_analyser.py b/test_harness/protocol_verifier/reporting/log_analyser.py similarity index 100% rename from test_harness/reporting/log_analyser.py rename to test_harness/protocol_verifier/reporting/log_analyser.py diff --git a/test_harness/protocol_verifier/reporting/report_results.py b/test_harness/protocol_verifier/reporting/report_results.py new file mode 100644 index 0000000..c744db5 --- /dev/null +++ b/test_harness/protocol_verifier/reporting/report_results.py @@ -0,0 +1,581 @@ +"""Methods to create junit report into html""" + +from __future__ import annotations +import pandas as pd +from typing import Any, Literal +from junit2htmlreport.parser import Junit + +from test_harness.reporting.report_results import TestPrint + + +class PerformanceTestCase(TestPrint): + """Sub class of :class:`TestPrint` to create xml + Test cases for a peformance test + + :param name: The name of the test + :type name: `str` + :param results: The given results: + * 'num_tests' + * 'num_failures' + * 'num_errors' + :type results: `dict`[`str`, `int`] + """ + + def __init__(self, name: str, results: dict[str, int]) -> None: + """Constructor method""" + super().__init__(name=name) + self.results = results + self.result = self.calc_result() + + def count_tests(self) -> tuple[int, int, int]: + """Method to count the number of tests failures and errors + + :return: Returns the tests failures and errors + :rtype: tuple[`int`, `int`, `int`] + """ + return ( + self.results["num_tests"], + self.results["num_failures"], + self.results["num_errors"], + ) + + def print_case(self, indent: int = 4, level: int = 0) -> str: + """Method to print the test case + + :param indent: Indent used in the output, defaults to `4` + :type indent: `int`, optional + :param level: The level of indent the test case is on, defaults to `0` + :type level: `int`, optional + :return: Returns the string representation of the test case + :rtype: `str` + """ + print_string = "" + indent_string = self.create_indent_string(indent * level) + print_string += indent_string + print_string += self.create_tag_start() + if self.result == "Pass": + print_string += " />" + return print_string + # anything other than pass + next_level = level + 1 + next_level_indent = self.create_indent_string(indent * next_level) + print_string += ">\n" + next_level_indent + failure_indent = self.create_indent_string(indent * (next_level + 1)) + if self.result == "Fail": + print_string += ( + '\n' + ) + print_string += failure_indent + ( + f"{self.results['num_failures']}" + f" of {self.results['num_tests']} events failed to be" + " processed " + "correctly by the PV\n" + ) + print_string += next_level_indent + ("\n") + else: + print_string += ( + '\n' + ) + print_string += failure_indent + ( + f"{self.results['num_errors']}" + f" of {self.results['num_tests']} events failed to be sent " + "correctly by the Test Harness\n" + ) + print_string += next_level_indent + "\n" + print_string += indent_string + "" + return print_string + + def calc_result(self) -> Literal["Pass", "Error", "Fail"]: + """Method to calculate the result of the test given the results + + :return: Returns: + * "Pass" + * "Error" + * "Fail" + :rtype: :class:`Literal`[`'Pass'`, `'Error'`, `'Fail'`] + """ + if self.results["num_failures"] + self.results["num_errors"] == 0: + return "Pass" + if self.results["num_errors"] > 0: + return "Error" + return "Fail" + + def create_tag_start(self) -> str: + """Method to create the starting tag for the string representation of + the test case + + :raises RuntimeError: Raises a :class:`RuntimeError` is there is no + parent + :return: Returns the test case starting tag + :rtype: `str` + """ + if not self.parent: + raise RuntimeError("Cannot create tag without parent") + return f' None: + """Constructor method""" + super().__init__(name) + self.result = result + self.pv_failure_reason = None + self.file_name = file_name + + @property + def pv_failure_reason(self) -> list | None: + """Property for protocol verifier failure reasons + + :return: Returns the list of failure reasons or `None` if there aren't + any + :rtype: `list` | `None` + """ + return self._pv_failure_reason + + @pv_failure_reason.setter + def pv_failure_reason(self, reasons: list[Any] | None) -> None: + """Setter for the property `pv_failure_reason` + + :param reasons: The list of reasons. Filters out non string values + :type reasons: `list`[`Any`] | `None` + """ + if not reasons: + self._pv_failure_reason = None + else: + self._pv_failure_reason = list( + set( + reason + ";\n" + for reason in reasons + if isinstance(reason, str) + ) + ) + + def print_case(self, indent: int = 4, level: int = 0) -> str: + """Method to print the test case + + :param indent: Indent used in the output, defaults to `4` + :type indent: `int`, optional + :param level: The level of indent the test case is on, defaults to `0` + :type level: `int`, optional + :return: Returns the string representation of the test case + :rtype: `str` + """ + print_string = "" + indent_string = self.create_indent_string(indent * level) + print_string += indent_string + print_string += self.create_tag_start() + if self.result == "Pass": + print_string += " />" + return print_string + # anything other than pass + next_level = level + 1 + next_level_indent = self.create_indent_string(indent * next_level) + print_string += ">\n" + next_level_indent + failure_indent = self.create_indent_string(indent * (next_level + 1)) + if self.result == "Fail": + print_string += ( + '\n' + ) + if self.pv_failure_reason: + print_string += failure_indent + ( + "PV Result was a fail when sequence is valid. PV failure " + "reasons below:\n" + ) + print_string += "".join( + self.create_indent_string(indent * (next_level + 2)) + + reason + for reason in self.pv_failure_reason + ) + else: + print_string += failure_indent + ( + "PV Result was a success when sequence is invalid\n" + ) + print_string += next_level_indent + ("\n") + elif self.result == ( + "Inconclusive|No SVDC Success|No Notification Failure" + ): + print_string += ( + '\n' + ) + print_string += failure_indent + ( + "Time out was allowed but " + "Protocol Verifier showed no success or failure messages\n" + ) + print_string += next_level_indent + "\n" + else: + print_string += ( + '\n' + ) + print_string += failure_indent + ( + "Protocol Verifier showed success and failure messages. PV " + "Failure reasons below:\n" + ) + if self.pv_failure_reason: + print_string += "".join( + self.create_indent_string(indent * (next_level + 2)) + + reason + for reason in self.pv_failure_reason + ) + print_string += next_level_indent + "\n" + print_string += indent_string + "" + return print_string + + def create_tag_start(self) -> str: + """Method to create the starting tag for the string representation of + the test case + + :raises RuntimeError: Raises a :class:`RuntimeError` is there is no + parent + :return: Returns the test case starting tag + :rtype: `str` + """ + if not self.parent: + raise RuntimeError("Cannot create tag without parent") + return f' tuple[int, int, int]: + """Method to count the number of tests failures and errors + + :return: Returns the tests failures and errors + :rtype: tuple[`int`, `int`, `int`] + """ + tests, failures, errors = 1, 0, 0 + if self.result == "Fail": + failures = 1 + elif "Inconclusive" in self.result: + errors = 1 + return tests, failures, errors + + +class TestSuite(TestPrint): + """Class to hold information and children of a xml test suite + Subclass of :class:`TestPrint` + """ + + def __init__( + self, + name: str, + is_suites: bool = False, + properties: dict[str, Any] | None = None, + ) -> None: + """Constructor method""" + super().__init__(name) + self.children: list[TestCase | TestSuite] = [] + self.is_suites = is_suites + self.properties = properties + + def add_child(self, child: TestCase | TestSuite) -> None: + """Method to adda child to the instancees children + + :param child: A child test suite or test case + :type child: :class:`TestCase` | :class:`TestSuite` + """ + self.children.append(child) + child.parent = self + + def add_children(self, children: list[TestCase | TestSuite]) -> None: + """Method to add multiple children to the test case + + :param children: A list of children + :type children: `list`[:class:`TestCase` | :class:`TestSuite`] + """ + for child in children: + self.add_child(child) + + def print_case(self, indent: int = 4, level: int = 0) -> str: + """Method to provide a string representation of the instance + + :param indent: Indent used in the output, defaults to `4` + :type indent: `int`, optional + :param level: The level of indent the test case is on, defaults to `0` + :type level: `int`, optional + :return: Returns the string representation of the test suite + :rtype: `str` + """ + print_string = "" + indent_string = self.create_indent_string(indent * level) + print_string += indent_string + print_string += self.create_tag() + if self.properties: + print_string += self.create_properties_string( + indent=indent, level=level + 1 + ) + print_string += "\n".join( + child.print_case(indent=indent, level=level + 1) + for child in self.children + ) + print_string += ( + f"\n{indent_string}" f'' + ) + return print_string + + def create_tag(self) -> str: + """MEthod to create the starting tag for the test suite + + :return: Returns the xml testsuite starting tag + :rtype: `str` + """ + tests, failures, errors = self.count_tests() + tag = ( + f'\n' + ) + return tag + + def count_tests(self) -> tuple[int, int, int]: + """Method to count test, failure and error numbers + + :return: Returns a tuple of counts for test, failure and error numbers + :rtype: `tuple`[`int`, `int`, `int`] + """ + tests = 0 + failures = 0 + errors = 0 + for child in self.children: + child_tests, child_failures, child_errors = child.count_tests() + tests += child_tests + failures += child_failures + errors += child_errors + return tests, failures, errors + + def create_properties_string(self, indent: int = 4, level: int = 0) -> str: + """Method to provide a string representation of the properties + + :param indent: Indent used in the output, defaults to `4` + :type indent: `int`, optional + :param level: The level of indent the test case is on, defaults to `0` + :type level: `int`, optional + :return: Returns the string representation of the test suite + :rtype: `str` + """ + indent_string = self.create_indent_string(indent * level) + sub_indent_string = self.create_indent_string(indent * (level + 1)) + properties_string = indent_string + "\n" + properties_string += "".join( + sub_indent_string + f'\n' + for name, value in self.properties.items() + ) + properties_string += indent_string + "\n" + return properties_string + + +def generate_performance_test_reports( + results: dict[str, int], properties: dict[str, Any] | None = None +) -> tuple[str, str]: + """Method to generate perfromance test: + * xml junit report + * html report based off the xml + + :param results: The results of the test + :type results: `dict`[`str`, `int`] + :param properties: Extra properties to be written as results, defaults to + `None` + :type properties: `dict`[`str`, `Any`] | `None`, optional + :return: Returns a tuple of: + * html report string + * xml report string + :rtype: `tuple`[`str`, `str`] + """ + suites = TestSuite( + name="Performance tests run", + is_suites=True, + ) + if properties is None: + properties = {} + suite = TestSuite( + name="Performance test run", properties={**results, **properties} + ) + suite.add_child(PerformanceTestCase("Run Result", results=results)) + suites.add_child(suite) + xml_string = '\n' + xml_string += suites.print_case() + report = Junit(xmlstring=xml_string) + html_string = report.html() + return html_string, xml_string + + +def generate_html_report_string( + results_df: pd.DataFrame, fields: list[str], field_depth: int = 0 +) -> tuple[str, str]: + """Method to generate an html report string from a results dataframe and + its junit xml it was generated from + + :param results_df: Dataframe containing test results + :type results_df: :class:`pd`.`DataFrame` + :param fields: The list of fields with which to group tests into + :type fields: `list`[`str`] + :param field_depth: The depth of the fields list with which to create + nested test suite, defaults to `0` + :type field_depth: `int`, optional + :return: Returns a generated html report and the junit xml it was + generated from + :rtype: `tuple`[`str`, `str`] + """ + xml_string = generate_junit_xml( + results_df=results_df, fields=fields, field_depth=field_depth + ) + report = Junit(xmlstring=xml_string) + html_string = report.html() + return html_string, xml_string + + +def generate_junit_xml( + results_df: pd.DataFrame, fields: list[str], field_depth: int = 0 +) -> str: + """Method to generate a a junit xml string from a results dataframe + + :param results_df: DataFrame of results + :type results_df: :class:`pd`.`DataFrame` + :param fields: The list of fields with which to group tests into + :type fields: `list`[`str`] + :param field_depth: The depth of the fields list with which to create + nested test suites. `field_depth = 0` represents creating nested suits for + all fields and `field_depth = len(fields)` would be no nesting of test + cases, defaults to `0` + :type field_depth: `int`, optional + :return: Returns a xml string representation of the results + :rtype: `str` + """ + suites = TestSuite(name="Tests Run", is_suites=True) + children = get_test_suites_from_results_dataframe( + results_df=results_df, fields=fields, nth_field=field_depth + ) + suites.add_children(children) + junit_string = '\n' + junit_string += suites.print_case() + return junit_string + + +def get_test_suites_from_results_dataframe( + results_df: pd.DataFrame, fields: list[str], nth_field: int = 0 +) -> list[TestSuite | TestCase]: + """Method to obtain test suites and test cases from results dataframe + + :param results_df: Dataframe of results + :type results_df: :class:`pd`.`DataFrame` + :param fields: The fields with which to categorise the results + :type fields: `list`[`str`] + :param nth_field: Integer to indicate at what index of fields list to + begin, defaults to `0` + :type nthe_field: `int`, optional + :return: Returns a list of :class:`TestSuite`'s or :class:`TestCase`'s + :rtype: `list`[:class:`TestSuite` | :class:`TestCase`] + """ + nth_field += 1 + children: list[TestSuite | TestCase] = [] + if nth_field <= len(fields): + for key, idx in results_df.groupby(fields[:nth_field]).groups.items(): + if isinstance(key, tuple): + name = ".".join(str(col_val) for col_val in key) + else: + name = str(key) + child = TestSuite(name=name) + child_children = get_test_suites_from_results_dataframe( + results_df=results_df.loc[idx], + fields=fields, + nth_field=nth_field, + ) + child.add_children(child_children) + children.append(child) + else: + for idx, row in results_df.iterrows(): + child = TestCase( + name=( + f"JobId={str(idx)}" + + ( + f", FileName={row['FileName']}" + if "FileName" in row + else "" + ) + ), + result=row["TestResult"], + file_name=row["FileName"] if "FileName" in row else None, + ) + if ( + (row["TestResult"] == "Fail" and row["Validity"]) + or ( + (row["TestResult"]) + == ("Inconclusive|SVDC Success|Notified Failure") + ) + or (row["TestResult"] == "Pass" and not row["Validity"]) + ): + if "FailureReason" in row: + child.pv_failure_reason = row["FailureReason"] + children.append(child) + return children + + +def generate_html_from_csv_report( + test_report_csv_path: str, html_report_file_path: str +) -> None: + """Method to generate and html file from csv report + + :param test_report_csv_path: The path to the csv report + :type test_report_csv_path: `str` + :param html_report_file_path: The output path of the html report + :type html_report_file_path: str + """ + results_df = pd.read_csv(test_report_csv_path, index_col="JobId") + html_string, _ = generate_html_report_string( + results_df=results_df, + fields=["SequenceName", "Validity", "Category"], + field_depth=2, + ) + with open(html_report_file_path, "w", encoding="utf-8") as file: + file.write(html_string) + + +def get_report_files_mapping_from_dataframe_report( + results_df: pd.DataFrame, results_prefix: str +) -> dict[str, str | pd.DataFrame]: + """Method to get report files mapping from a results dataframe and a + prefix for the tests + + :param results_df: :class:`pd`.`DataFrame` of results + :type results_df: :class:`pd`.`DataFrame` + :param results_prefix: The prefix for the results file names + :type results_prefix: `str` + :return: Returns a dictionary mapping file name to file + :rtype: `dict`[`str`, `str` | :class:`pd`.`DataFrame`] + """ + html_string, xml_string = generate_html_report_string( + results_df=results_df, + fields=["SequenceName", "Validity", "Category"], + field_depth=2, + ) + return { + f"{results_prefix}.html": html_string, + f"{results_prefix}.xml": xml_string, + f"{results_prefix}.csv": results_df, + } + + +if __name__ == "__main__": + import sys + + args = sys.argv[1:] + generate_html_from_csv_report(args[0], args[1]) diff --git a/test_harness/protocol_verifier/test_utils.py b/test_harness/protocol_verifier/test_utils.py index 0479de9..7d40623 100644 --- a/test_harness/protocol_verifier/test_utils.py +++ b/test_harness/protocol_verifier/test_utils.py @@ -57,8 +57,8 @@ ) from test_harness.message_buses.message_buses import get_producer_context from test_harness.reporting.report_delivery import deliver_test_report_files -from test_harness.reporting import create_report_files -from test_harness.reporting.report_results import ( +from test_harness.protocol_verifier.reporting import create_report_files +from test_harness.protocol_verifier.reporting.report_results import ( generate_performance_test_reports, ) from test_harness.requests_th import send_get_request diff --git a/test_harness/protocol_verifier/tests/conftest.py b/test_harness/protocol_verifier/tests/conftest.py index 71aabc7..0612288 100644 --- a/test_harness/protocol_verifier/tests/conftest.py +++ b/test_harness/protocol_verifier/tests/conftest.py @@ -295,9 +295,13 @@ def mock_stop(*agrs, **kwargs): action_list.append("stop") return None + def mock_del(*args, **kwargs): + pass + monkeypatch.setattr(kafka3.KafkaProducer, "send", mock_send) monkeypatch.setattr(kafka3.KafkaProducer, "__init__", mock_start) monkeypatch.setattr(kafka3.KafkaProducer, "close", mock_stop) + monkeypatch.setattr(kafka3.KafkaProducer, "__del__", mock_del) return action_list diff --git a/test_harness/protocol_verifier/tests/reporting/conftest.py b/test_harness/protocol_verifier/tests/reporting/conftest.py new file mode 100644 index 0000000..4550f4f --- /dev/null +++ b/test_harness/protocol_verifier/tests/reporting/conftest.py @@ -0,0 +1,628 @@ +# pylint: disable=W0621 +# pylint: disable=C0301 +"""Fixtures for reporting tests""" +import pytest +import pandas as pd +import numpy as np +from typing import Literal + + +@pytest.fixture +def validity_df() -> pd.DataFrame: + """Fixture provide a dataframe with validity and job id amogsnt other + fields + + :return: Returns a dataframe of validity and job id + :rtype: :class:`pd`.`DataFrame` + """ + data = [ + ["job_1", "job_name", True, "ValidSols", "file_1"], + ["job_2", "job_name", True, "ValidSols", "file_2"], + ["job_3", "job_name", False, "StackedSols", "file_3"], + ["job_4", "job_name", False, "ANDConstraintBreak", "file_4"], + ["job_5", "job_name", False, "MissingEdges", "file_5"], + ] + validity = pd.DataFrame( + data, + columns=["JobId", "SequenceName", "Validity", "Category", "FileName"], + ) + validity.set_index("JobId", inplace=True) + return validity + + +@pytest.fixture +def validity_df_json_validity() -> pd.DataFrame: + """Fixture provide a dataframe with validity and job id amogsnt other + + :return: Returns a dataframe of validity and job id + :rtype: :class:`pd`.`DataFrame` + """ + data = [ + ["job_1", "job_name", True, "ValidJSON", "file_1"], + ["job_2", "job_name", True, "ValidJSON", "file_2"], + ["job_3", "job_name", False, "InvalidJSON", "file_3"], + ["job_4", "job_name", False, "InvalidJSON", "file_4"], + ] + validity = pd.DataFrame( + data, + columns=["JobId", "SequenceName", "Validity", "Category", "FileName"], + ) + validity.set_index("JobId", inplace=True) + return validity + + +@pytest.fixture +def pv_results_df() -> pd.DataFrame: + """Fixture to provide a mocked proctocol verifier results dataframe + + :return: Mocked PV results dataframe + :rtype: :class:`pd`.`DataFrame` + """ + data = [ + ["job_1", [np.nan], [True]], + ["job_2", ["It Failed"], [False]], + ["job_3", ["It Failed", "It Failed"], [False] * 2], + ["job_4", ["It Failed", np.nan], [False, True]], + ] + results_df = pd.DataFrame( + data=data, columns=["JobId", "FailureReason", "PVResult"] + ) + results_df.set_index("JobId", inplace=True) + return results_df + + +@pytest.fixture +def expected_results() -> pd.DataFrame: + """Fixture providing dataframe of expected results + + :return: Returns dataframe of expected results + :rtype: :class:`pd`.`DataFrame` + """ + data = [ + [ + "job_1", + "job_name", + True, + "ValidSols", + "file_1", + [np.nan], + [True], + "Pass", + ], + [ + "job_2", + "job_name", + True, + "ValidSols", + "file_2", + ["It Failed"], + [False], + "Fail", + ], + [ + "job_3", + "job_name", + False, + "StackedSols", + "file_3", + ["It Failed"] * 2, + [False, False], + "Pass", + ], + [ + "job_4", + "job_name", + False, + "ANDConstraintBreak", + "file_4", + ["It Failed", np.nan], + [False, True], + "Inconclusive|SVDC Success|Notified Failure", + ], + [ + "job_5", + "job_name", + False, + "MissingEdges", + "file_5", + np.nan, + np.nan, + "Inconclusive|No SVDC Success|No Notification Failure", + ], + ] + results_df = pd.DataFrame( + data, + columns=[ + "JobId", + "SequenceName", + "Validity", + "Category", + "FileName", + "FailureReason", + "PVResult", + "TestResult", + ], + ) + results_df.set_index("JobId", inplace=True) + return results_df + + +@pytest.fixture +def expected_junit_string() -> str: + """Fixture providing an expected J-Unit xml string from expected results + + :return: Returns a string representation of the xml file + :rtype: `str` + """ + return ( + '\n\n \n \n \n ' + " Protocol Verifier showed success and failure messages. PV" + " Failure reasons below:\n It Failed;\n " + " \n \n \n \n \n \n ' + " Time out was allowed but Protocol Verifier showed no" + " success or failure messages\n \n " + " \n \n \n \n \n ' + ' \n \n \n \n' + " PV Result was a fail when sequence is valid. PV" + " failure reasons below:\n It Failed;\n " + " \n \n \n" + ) + + +@pytest.fixture +def expected_html_string() -> str: + """Fixture providing an html string of expected results + + :return: Returns a string representation of the html file + :rtype: `str` + """ + return ( + '\n\n\n \n Test Results\n \n body {\n background-color: white;\n ' + " padding-bottom: 20em;\n margin: 0;\n min-height:" + " 15cm;\n}\n\nh1, h2, h3, h4, h5, h6, h7 {\n font-family:" + " sans-serif;\n}\n\nh1 {\n background-color: #007acc;\n color:" + " white;\n padding: 3mm;\n margin-top: 0;\n margin-bottom:" + " 1mm;\n}\n\n.footer {\n font-style: italic;\n font-size:" + " small;\n text-align: right;\n padding: 1em;\n}\n\n.testsuite" + " {\n padding-bottom: 2em;\n margin-left: 1em;\n}\n\n.proplist" + " {\n width: 100%;\n margin-bottom: 2em;\n border-collapse:" + " collapse;\n border: 1px solid grey;\n}\n\n.proplist th {\n " + " background-color: silver;\n width: 5em;\n padding: 2px;\n " + " padding-right: 1em;\n text-align: left;\n}\n\n.proplist td {\n " + " padding: 2px;\n}\n\n.index-table {\n width: 90%;\n " + " margin-left: 1em;\n}\n\n.index-table td {\n vertical-align:" + " top;\n width: 50%;\n}\n\n.failure-index {\n\n}\n\n.toc {\n " + " margin-bottom: 2em;\n font-family: monospace;\n}\n\n.stdio, pre" + " {\n min-height: 1em;\n background-color: #1e1e1e;\n color:" + " silver;\n padding: 0.5em;\n}\n.tdpre {\n background-color:" + " #1e1e1e;\n}\n\n.test {\n margin-left: 0.5cm;\n}\n\n.outcome {\n " + " border-left: 1em;\n padding: 2px;\n}\n\n.outcome-failed {\n " + " border-left: 1em solid lightcoral;\n}\n\n.outcome-passed {\n " + " border-left: 1em solid lightgreen;\n}\n\n.outcome-skipped {\n " + " border-left: 1em solid lightyellow;\n}\n\n.stats-table" + " {\n}\n\n.stats-table td {\n min-width: 4em;\n text-align:" + " right;\n}\n\n.stats-table .failed {\n background-color:" + " lightcoral;\n}\n\n.stats-table .passed {\n background-color:" + " lightgreen;\n}\n\n.matrix-table {\n table-layout: fixed;\n " + " border-spacing: 0;\n width: available;\n margin-left:" + " 1em;\n}\n\n.matrix-table td {\n vertical-align:" + " center;\n}\n\n.matrix-table td:last-child {\n width:" + " 0;\n}\n\n.matrix-table tr:hover {\n background-color:" + " yellow;\n}\n\n.matrix-axis-name {\n white-space: nowrap;\n " + " padding-right: 0.5em;\n border-left: 1px solid black;\n " + " border-top: 1px solid black;\n text-align:" + " right;\n}\n\n.matrix-axis-line {\n border-left: 1px solid" + " black;\n width: 0.5em;\n}\n\n.matrix-classname {\n text-align:" + " left;\n width: 100%;\n border-top: 2px solid grey;\n " + " border-bottom: 1px solid silver;\n}\n\n.matrix-casename {\n " + " text-align: left;\n font-weight: normal;\n font-style:" + " italic;\n padding-left: 1em;\n border-bottom: 1px solid" + " silver;\n}\n\n.matrix-result {\n display: block;\n width:" + " 1em;\n text-align: center;\n padding: 1mm;\n margin:" + " 0;\n}\n\n.matrix-result-combined {\n white-space: nowrap;\n " + " padding-right: 0.2em;\n text-align:" + " right;\n}\n\n.matrix-result-failed {\n background-color:" + " lightcoral;\n}\n\n.matrix-result-passed {\n background-color:" + " lightgreen;\n}\n\n.matrix-result-skipped {\n background-color:" + " lightyellow;\n}\n\n.matrix-even {\n background-color:" + " lightgray;\n}\n \n\n\n \n

\n Test" + ' Report : Test Results\n

\n\n\n \n \n \n \n \n ' + "
  • job_name.False.ANDConstraintBreak\n
      \n " + " \n
    • JobId=job_4,' + " FileName=file_4
    • \n \n " + "
    \n
  • \n \n \n " + " \n
  • job_name.False.MissingEdges\n " + "
      \n \n
    • JobId=job_5,' + " FileName=file_5
    • \n \n " + "
    \n
  • \n \n \n " + " \n
  • job_name.False.StackedSols\n " + "
      \n \n
    • JobId=job_3,' + " FileName=file_3
    • \n \n " + "
    \n
  • \n \n \n " + " \n
  • job_name.True.ValidSols\n " + " \n
  • \n \n \n " + ' \n \n \n ' + '
      \n \n \n ' + " \n \n
    • [F]' + " job_name.False.ANDConstraintBreak : JobId=job_4," + " FileName=file_4
    • \n \n " + " \n \n \n \n " + " \n \n
    • [F]' + " job_name.False.MissingEdges : JobId=job_5," + " FileName=file_5
    • \n \n " + " \n \n \n \n " + " \n \n \n \n " + " \n \n \n " + " \n \n \n " + '
    • [F]' + " job_name.True.ValidSols : JobId=job_2, FileName=file_2
    • \n " + " \n \n \n " + " \n
    \n \n \n\n\n\n " + '
    \n

    Test Suite:' + " job_name.False.ANDConstraintBreak

    \n \n \n \n' + '

    Results

    \n \n ' + " \n \n " + " \n \n " + " \n \n \n " + " \n \n " + '
    Duration0.0 sec
    Tests1
    Failures1
    \n\n
    \n ' + '

    Tests

    \n \n
    \n ' + "

    job_name.False.ANDConstraintBreak

    \n " + '
    \n \n ' + '
    \n ' + ' \n ' + ' \n ' + " \n " + " \n " + " \n " + " \n \n \n " + " \n
    Test case:JobId=job_4," + " FileName=file_4
    Outcome:Failed
    Duration:0.0 sec
    FailedSVDC Success" + " and PV failure
    \n\n " + " \n
    \n                Protocol Verifier"
    +        " showed success and failure messages. PV Failure reasons below:\n    "
    +        "                It Failed;\n            
    \n " + " \n \n\n \n " + " \n \n " + "
    \n \n
    \n " + "
    \n \n
    \n
    \n \n\n \n

    Test Suite:' + " job_name.False.MissingEdges

    \n \n \n \n' + '

    Results

    \n \n ' + " \n \n " + " \n \n " + " \n \n \n " + " \n \n " + '
    Duration0.0 sec
    Tests1
    Failures1
    \n\n
    \n ' + '

    Tests

    \n \n
    \n ' + "

    job_name.False.MissingEdges

    \n " + '
    \n \n \n \n ' + ' \n ' + " \n " + " \n " + " \n " + " \n \n \n " + " \n
    Test case:JobId=job_5," + " FileName=file_5
    Outcome:Failed
    Duration:0.0 sec
    FailedNo PV SVDC" + " Success and no PV failure
    \n\n " + " \n
    \n                Time out"
    +        " was allowed but Protocol Verifier showed no success or failure"
    +        " messages\n            
    \n \n " + " \n\n \n \n " + " \n
    \n " + " \n
    \n
    \n \n " + ' \n \n \n\n
    \n ' + "

    Test Suite: job_name.False.StackedSols

    \n \n \n \n' + '

    Results

    \n \n ' + " \n \n " + " \n \n " + " \n \n \n " + " \n \n " + '
    Duration0.0 sec
    Tests1
    Failures0
    \n\n
    \n ' + '

    Tests

    \n \n
    \n ' + "

    job_name.False.StackedSols

    \n " + '
    \n \n \n \n ' + ' \n ' + " \n " + " \n " + " \n " + " \n " + " \n \n " + " \n
    Test case:JobId=job_3," + " FileName=file_3
    Outcome:Passed
    Duration:0.0 sec
    FailedNone
    \n\n " + " \n
    None
    \n " + " \n \n\n \n " + " \n \n " + "
    \n \n
    \n " + "
    \n \n
    \n \n \n\n \n

    Test Suite:' + " job_name.True.ValidSols

    \n \n \n \n' + '

    Results

    \n \n ' + " \n \n " + " \n \n " + " \n \n \n " + " \n \n " + '
    Duration0.0 sec
    Tests2
    Failures1
    \n\n
    \n ' + '

    Tests

    \n \n
    \n ' + "

    job_name.True.ValidSols

    \n \n \n \n \n ' + ' \n ' + " \n " + " \n " + " \n " + " \n " + " \n \n " + " \n
    Test case:JobId=job_1," + " FileName=file_1
    Outcome:Passed
    Duration:0.0 sec
    FailedNone
    \n\n " + " \n
    None
    \n " + " \n \n\n \n " + " \n \n " + '
    \n \n
    \n \n ' + ' \n ' + " \n " + " \n " + " \n " + " \n \n \n " + " \n
    Test case:JobId=job_2," + " FileName=file_2
    Outcome:Failed
    Duration:0.0 sec
    FailedPV Result" + " does not match validity
    \n\n " + " \n
    \n                PV Result"
    +        " was a fail when sequence is valid. PV failure reasons below:\n      "
    +        "              It Failed;\n            
    \n " + " \n \n\n \n " + " \n \n
    \n " + " \n
    \n \n " + ' \n \n \n \n\n\n\n\n\n" + ) + + +@pytest.fixture +def report_files_mapping( + expected_results: pd.DataFrame, + expected_junit_string: str, + expected_html_string: str, +) -> dict[str, str | pd.DataFrame]: + """Fixture providing a dictionary mapping file name to file + + :param expected_results: Fixture providing expected results dataframe + :type expected_results: :class:`pd`.`DataFrame` + :param expected_junit_string: Fixture providing expected junit string + :type expected_junit_string: `str` + :param expected_html_string: Fixture providing expected html string + :type expected_html_string: `str` + :return: Dictionary mapping filename to file + :rtype: `dict`[`str`, `str` | :class:`pd`.`DataFrame`] + """ + return { + "test.csv": expected_results, + "test.xml": expected_junit_string, + "test.html": expected_html_string, + } + + +@pytest.fixture +def pass_performance_results() -> dict[str, int]: + """Fixture to provide performance test results + + :return: The results + :rtype: `dict`[`str`, `int`] + """ + return {"num_tests": 5, "num_failures": 0, "num_errors": 0} + + +@pytest.fixture +def expected_performance_xml_pass() -> str: + """Expected xml for a pass + + :return: The xml string + :rtype: `str` + """ + return ( + '\n' + '\n' + ' \n' + " \n" + ' \n' + ' \n' + ' \n' + " \n" + ' \n" + " \n" + "" + ) + + +@pytest.fixture +def fail_performance_results() -> dict[str, int]: + """Fixture to provide performance test results with a fail + + :return: The results + :rtype: `dict`[`str`, `int`] + """ + return {"num_tests": 5, "num_failures": 1, "num_errors": 0} + + +@pytest.fixture +def expected_performance_xml_fail() -> str: + """Expected xml for a fail + + :return: The xml string + :rtype: `str` + """ + return ( + '\n' + '\n' + ' \n' + " \n" + ' \n' + ' \n' + ' \n' + " \n" + ' \n' + ' \n' + " 1 of 5 events failed to be processed correctly by the" + " PV\n" + " \n" + " \n" + " \n" + "" + ) + + +@pytest.fixture +def error_performance_results() -> dict[str, int]: + """Fixture to provide performance test results with a error + + :return: The results + :rtype: `dict`[`str`, `int`] + """ + return {"num_tests": 5, "num_failures": 1, "num_errors": 1} + + +@pytest.fixture +def expected_performance_xml_error() -> str: + """Expected xml for an error + + :return: The xml string + :rtype: `str` + """ + return ( + '\n' + '\n' + ' \n' + " \n" + ' \n' + ' \n' + ' \n' + " \n" + ' \n' + ' \n' + " 1 of 5 events failed to be sent correctly by the Test" + " Harness\n" + " \n" + " \n" + " \n" + "" + ) + + +@pytest.fixture +def performance_junit_properties() -> dict[str, float]: + """Extra properties for the junit xml + + :return: Dictionary of properties + :rtype: `dict`[`str`, `float`] + """ + return { + "th_end_time": 20.0, + "aer_end_time": 30.0, + "pv_end_time": 40.0, + "average_sent_per_sec": 12.0, + "average_processed_per_sec": 12.0, + "average_queue_time": 1.0, + "average_response_time": 1.0, + } + + +@pytest.fixture +def expected_performance_xml_properties() -> str: + """Expected xml for with extra properties + + :return: The xml string + :rtype: `str` + """ + return ( + '\n' + '\n' + ' \n' + " \n" + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + " \n" + ' \n" + " \n" + "" + ) + + +@pytest.fixture +def log_file_string() -> ( + Literal["2023-09-28T19:27:23.434758Z 1 svdc_new_job_started…"] +): + """Fixture providing a log file string + + :return: Returns the log file string + :rtype: `str` + """ + return ( + "2023-09-28T19:27:23.434758Z 1 svdc_new_job_started : JobId =" + " eeba705f-eac4-467c-8826-bf31673e745f : EventId =" + " 3cf78438-8084-494d-8d7b-efd7ea46f7d4 : EventType = A" + ) diff --git a/tests/test_harness/reporting/test_log_analyser.py b/test_harness/protocol_verifier/tests/reporting/test_log_analyser.py similarity index 99% rename from tests/test_harness/reporting/test_log_analyser.py rename to test_harness/protocol_verifier/tests/reporting/test_log_analyser.py index db19715..f409407 100644 --- a/tests/test_harness/reporting/test_log_analyser.py +++ b/test_harness/protocol_verifier/tests/reporting/test_log_analyser.py @@ -8,7 +8,7 @@ import pytest from pygrok import Grok -from test_harness.reporting.log_analyser import ( +from test_harness.protocol_verifier.reporting.log_analyser import ( check_test_result, get_job_id_failure_successes, grok_line_priority, diff --git a/tests/test_harness/reporting/test_report_results.py b/test_harness/protocol_verifier/tests/reporting/test_report_results.py similarity index 98% rename from tests/test_harness/reporting/test_report_results.py rename to test_harness/protocol_verifier/tests/reporting/test_report_results.py index fddd135..2c4ae97 100644 --- a/tests/test_harness/reporting/test_report_results.py +++ b/test_harness/protocol_verifier/tests/reporting/test_report_results.py @@ -2,7 +2,7 @@ """ import re from pandas import DataFrame -from test_harness.reporting.report_results import ( +from test_harness.protocol_verifier.reporting.report_results import ( generate_junit_xml, generate_html_report_string, get_report_files_mapping_from_dataframe_report, diff --git a/test_harness/protocol_verifier/tests/test_send_events.py b/test_harness/protocol_verifier/tests/test_send_events.py index 8e5da45..8a7c30b 100644 --- a/test_harness/protocol_verifier/tests/test_send_events.py +++ b/test_harness/protocol_verifier/tests/test_send_events.py @@ -508,6 +508,8 @@ def mock_send(*args, **kwargs): assert isinstance(results[5], datetime) assert sync_kafka_producer_mock[0] == "start" assert sync_kafka_producer_mock[-1] == "stop" + # Debug print + print("sync_kafka_producer_mock:", sync_kafka_producer_mock) assert all( action == "send" for action in sync_kafka_producer_mock[1:-1] ) diff --git a/test_harness/reporting/__init__.py b/test_harness/reporting/__init__.py index bd44963..e69de29 100644 --- a/test_harness/reporting/__init__.py +++ b/test_harness/reporting/__init__.py @@ -1,68 +0,0 @@ -"""__init__ file. Contains methods to create and save report files -""" -from pandas import DataFrame - -from test_harness.reporting.log_analyser import logs_validity_df_to_results -from test_harness.reporting.report_results import ( - get_report_files_mapping_from_dataframe_report -) -from test_harness.reporting.report_delivery import deliver_test_report_files - - -def create_and_save_report_files( - log_string: str, - validity_df: DataFrame, - test_name: str, - output_directory_path: str -) -> None: - """Method to create report files from logs and validity dataframe and save - report files with a prefix in an output directory - - :param log_string: String representing the log files - :type log_string: `str` - :param validity_df: :class:`DataFrame` holding the information on the test - files - :type validity_df: :class:`DataFrame` - :param test_name: The test name (or prefix) to give to the report files - :type test_name: `str` - :param output_directory_path: The path of the output directory to store - the results - :type output_directory_path: `str` - """ - report_files_mapping = create_report_files( - log_string=log_string, - validity_df=validity_df, - test_name=test_name - ) - deliver_test_report_files( - report_files_mapping=report_files_mapping, - output_directory=output_directory_path - ) - - -def create_report_files( - log_string: str, - validity_df: DataFrame, - test_name: str, - event_id_job_id_map: dict[str, str] | None = None -) -> dict[str, str | DataFrame]: - """Method to create report files from logs and validity dataframe - - :param log_string: String representing the log files - :type log_string: `str` - :param validity_df: :class:`DataFrame` holding the information on the test - files - :type validity_df: :class:`DataFrame` - :param test_name: The test name (or prefix) to give to the report files - :type test_name: `str` - """ - results_df = logs_validity_df_to_results( - log_string=log_string, - validity_df=validity_df, - event_id_job_id_map=event_id_job_id_map - ) - report_files_mapping = get_report_files_mapping_from_dataframe_report( - results_df=results_df, - results_prefix=test_name - ) - return report_files_mapping diff --git a/test_harness/reporting/report_results.py b/test_harness/reporting/report_results.py index 46ff914..21add91 100644 --- a/test_harness/reporting/report_results.py +++ b/test_harness/reporting/report_results.py @@ -1,10 +1,8 @@ -"""Methods to create junit report into html +""" +Contains base class for printing test cases """ from __future__ import annotations -from typing import Any, Literal from abc import ABC, abstractmethod -import pandas as pd -from junit2htmlreport.parser import Junit class TestPrint(ABC): @@ -72,635 +70,3 @@ def count_tests(self) -> tuple[int, int, int]: :rtype: `tuple`[`int`, `int`, `int`] """ return (0, 0, 0) - - -class PerformanceTestCase(TestPrint): - """Sub class of :class:`TestPrint` to create xml - Test cases for a peformance test - - :param name: The name of the test - :type name: `str` - :param results: The given results: - * 'num_tests' - * 'num_failures' - * 'num_errors' - :type results: `dict`[`str`, `int`] - """ - def __init__( - self, - name: str, - results: dict[str, int] - ) -> None: - """Constructor method - """ - super().__init__( - name=name - ) - self.results = results - self.result = self.calc_result() - - def count_tests(self) -> tuple[int, int, int]: - """Method to count the number of tests failures and errors - - :return: Returns the tests failures and errors - :rtype: tuple[`int`, `int`, `int`] - """ - return ( - self.results["num_tests"], - self.results["num_failures"], - self.results["num_errors"] - ) - - def print_case(self, indent: int = 4, level: int = 0) -> str: - """Method to print the test case - - :param indent: Indent used in the output, defaults to `4` - :type indent: `int`, optional - :param level: The level of indent the test case is on, defaults to `0` - :type level: `int`, optional - :return: Returns the string representation of the test case - :rtype: `str` - """ - print_string = "" - indent_string = self.create_indent_string(indent * level) - print_string += indent_string - print_string += self.create_tag_start() - if self.result == "Pass": - print_string += ' />' - return print_string - # anything other than pass - next_level = level + 1 - next_level_indent = self.create_indent_string(indent * next_level) - print_string += '>\n' + next_level_indent - failure_indent = self.create_indent_string(indent * (next_level + 1)) - if self.result == "Fail": - print_string += ( - '\n' - ) - print_string += failure_indent + ( - f"{self.results['num_failures']}" - f" of {self.results['num_tests']} events failed to be" - " processed " - "correctly by the PV\n" - ) - print_string += next_level_indent + ( - '\n' - ) - else: - print_string += ( - '\n' - ) - print_string += failure_indent + ( - f"{self.results['num_errors']}" - f" of {self.results['num_tests']} events failed to be sent " - "correctly by the Test Harness\n" - ) - print_string += next_level_indent + "\n" - print_string += indent_string + "
    " - return print_string - - def calc_result(self) -> Literal['Pass', 'Error', 'Fail']: - """Method to calculate the result of the test given the results - - :return: Returns: - * "Pass" - * "Error" - * "Fail" - :rtype: :class:`Literal`[`'Pass'`, `'Error'`, `'Fail'`] - """ - if self.results["num_failures"] + self.results["num_errors"] == 0: - return "Pass" - if self.results["num_errors"] > 0: - return "Error" - return "Fail" - - def create_tag_start(self) -> str: - """Method to create the starting tag for the string representation of - the test case - - :raises RuntimeError: Raises a :class:`RuntimeError` is there is no - parent - :return: Returns the test case starting tag - :rtype: `str` - """ - if not self.parent: - raise RuntimeError("Cannot create tag without parent") - return f' None: - """Constructor method - """ - super().__init__(name) - self.result = result - self.pv_failure_reason = None - self.file_name = file_name - - @property - def pv_failure_reason(self) -> list | None: - """Property for protocol verifier failure reasons - - :return: Returns the list of failure reasons or `None` if there aren't - any - :rtype: `list` | `None` - """ - return self._pv_failure_reason - - @pv_failure_reason.setter - def pv_failure_reason(self, reasons: list[Any] | None) -> None: - """Setter for the property `pv_failure_reason` - - :param reasons: The list of reasons. Filters out non string values - :type reasons: `list`[`Any`] | `None` - """ - if not reasons: - self._pv_failure_reason = None - else: - self._pv_failure_reason = list(set( - reason + ';\n' - for reason in reasons - if isinstance(reason, str) - )) - - def print_case(self, indent: int = 4, level: int = 0) -> str: - """Method to print the test case - - :param indent: Indent used in the output, defaults to `4` - :type indent: `int`, optional - :param level: The level of indent the test case is on, defaults to `0` - :type level: `int`, optional - :return: Returns the string representation of the test case - :rtype: `str` - """ - print_string = "" - indent_string = self.create_indent_string(indent * level) - print_string += indent_string - print_string += self.create_tag_start() - if self.result == "Pass": - print_string += ' />' - return print_string - # anything other than pass - next_level = level + 1 - next_level_indent = self.create_indent_string(indent * next_level) - print_string += '>\n' + next_level_indent - failure_indent = self.create_indent_string(indent * (next_level + 1)) - if self.result == "Fail": - print_string += ( - '\n' - ) - if self.pv_failure_reason: - print_string += failure_indent + ( - "PV Result was a fail when sequence is valid. PV failure " - "reasons below:\n" - ) - print_string += "".join( - self.create_indent_string(indent * (next_level + 2)) - + reason - for reason in self.pv_failure_reason - ) - else: - print_string += failure_indent + ( - "PV Result was a success when sequence is invalid\n" - ) - print_string += next_level_indent + ( - '\n' - ) - elif self.result == ( - "Inconclusive|No SVDC Success|No Notification Failure" - ): - print_string += ( - '\n' - ) - print_string += failure_indent + ( - "Time out was allowed but " - "Protocol Verifier showed no success or failure messages\n" - ) - print_string += next_level_indent + "\n" - else: - print_string += ( - '\n' - ) - print_string += failure_indent + ( - "Protocol Verifier showed success and failure messages. PV " - "Failure reasons below:\n" - ) - if self.pv_failure_reason: - print_string += "".join( - self.create_indent_string(indent * (next_level + 2)) - + reason - for reason in self.pv_failure_reason - ) - print_string += next_level_indent + "\n" - print_string += indent_string + "" - return print_string - - def create_tag_start(self) -> str: - """Method to create the starting tag for the string representation of - the test case - - :raises RuntimeError: Raises a :class:`RuntimeError` is there is no - parent - :return: Returns the test case starting tag - :rtype: `str` - """ - if not self.parent: - raise RuntimeError("Cannot create tag without parent") - return f' tuple[int, int, int]: - """Method to count the number of tests failures and errors - - :return: Returns the tests failures and errors - :rtype: tuple[`int`, `int`, `int`] - """ - tests, failures, errors = 1, 0, 0 - if self.result == "Fail": - failures = 1 - elif "Inconclusive" in self.result: - errors = 1 - return tests, failures, errors - - -class TestSuite(TestPrint): - """Class to hold information and children of a xml test suite - Subclass of :class:`TestPrint` - """ - def __init__( - self, - name: str, - is_suites: bool = False, - properties: dict[str, Any] | None = None - ) -> None: - """Constructor method - """ - super().__init__(name) - self.children: list[TestCase | TestSuite] = [] - self.is_suites = is_suites - self.properties = properties - - def add_child( - self, - child: TestCase | TestSuite - ) -> None: - """Method to adda child to the instancees children - - :param child: A child test suite or test case - :type child: :class:`TestCase` | :class:`TestSuite` - """ - self.children.append(child) - child.parent = self - - def add_children( - self, - children: list[TestCase | TestSuite] - ) -> None: - """Method to add multiple children to the test case - - :param children: A list of children - :type children: `list`[:class:`TestCase` | :class:`TestSuite`] - """ - for child in children: - self.add_child(child) - - def print_case(self, indent: int = 4, level: int = 0) -> str: - """Method to provide a string representation of the instance - - :param indent: Indent used in the output, defaults to `4` - :type indent: `int`, optional - :param level: The level of indent the test case is on, defaults to `0` - :type level: `int`, optional - :return: Returns the string representation of the test suite - :rtype: `str` - """ - print_string = "" - indent_string = self.create_indent_string(indent * level) - print_string += indent_string - print_string += self.create_tag() - if self.properties: - print_string += self.create_properties_string( - indent=indent, - level=level + 1 - ) - print_string += "\n".join( - child.print_case(indent=indent, level=level + 1) - for child in self.children - ) - print_string += ( - f'\n{indent_string}' - f'' - ) - return print_string - - def create_tag(self) -> str: - """MEthod to create the starting tag for the test suite - - :return: Returns the xml testsuite starting tag - :rtype: `str` - """ - tests, failures, errors = self.count_tests() - tag = ( - f'\n' - ) - return tag - - def count_tests(self) -> tuple[int, int, int]: - """Method to count test, failure and error numbers - - :return: Returns a tuple of counts for test, failure and error numbers - :rtype: `tuple`[`int`, `int`, `int`] - """ - tests = 0 - failures = 0 - errors = 0 - for child in self.children: - child_tests, child_failures, child_errors = ( - child.count_tests() - ) - tests += child_tests - failures += child_failures - errors += child_errors - return tests, failures, errors - - def create_properties_string(self, indent: int = 4, level: int = 0) -> str: - """Method to provide a string representation of the properties - - :param indent: Indent used in the output, defaults to `4` - :type indent: `int`, optional - :param level: The level of indent the test case is on, defaults to `0` - :type level: `int`, optional - :return: Returns the string representation of the test suite - :rtype: `str` - """ - indent_string = self.create_indent_string(indent * level) - sub_indent_string = self.create_indent_string(indent * (level + 1)) - properties_string = indent_string + "\n" - properties_string += "".join( - sub_indent_string - + f'\n' - for name, value in self.properties.items() - ) - properties_string += indent_string + "\n" - return properties_string - - -def generate_performance_test_reports( - results: dict[str, int], - properties: dict[str, Any] | None = None -) -> tuple[str, str]: - """Method to generate perfromance test: - * xml junit report - * html report based off the xml - - :param results: The results of the test - :type results: `dict`[`str`, `int`] - :param properties: Extra properties to be written as results, defaults to - `None` - :type properties: `dict`[`str`, `Any`] | `None`, optional - :return: Returns a tuple of: - * html report string - * xml report string - :rtype: `tuple`[`str`, `str`] - """ - suites = TestSuite( - name="Performance tests run", - is_suites=True, - ) - if properties is None: - properties = {} - suite = TestSuite( - name="Performance test run", - properties={ - **results, - **properties - } - - ) - suite.add_child( - PerformanceTestCase( - "Run Result", - results=results - ) - ) - suites.add_child(suite) - xml_string = '\n' - xml_string += suites.print_case() - report = Junit(xmlstring=xml_string) - html_string = report.html() - return html_string, xml_string - - -def generate_html_report_string( - results_df: pd.DataFrame, - fields: list[str], - field_depth: int = 0 -) -> tuple[str, str]: - """Method to generate an html report string from a results dataframe and - its junit xml it was generated from - - :param results_df: Dataframe containing test results - :type results_df: :class:`pd`.`DataFrame` - :param fields: The list of fields with which to group tests into - :type fields: `list`[`str`] - :param field_depth: The depth of the fields list with which to create - nested test suite, defaults to `0` - :type field_depth: `int`, optional - :return: Returns a generated html report and the junit xml it was - generated from - :rtype: `tuple`[`str`, `str`] - """ - xml_string = generate_junit_xml( - results_df=results_df, - fields=fields, - field_depth=field_depth - ) - report = Junit(xmlstring=xml_string) - html_string = report.html() - return html_string, xml_string - - -def generate_junit_xml( - results_df: pd.DataFrame, - fields: list[str], - field_depth: int = 0 -) -> str: - """Method to generate a a junit xml string from a results dataframe - - :param results_df: DataFrame of results - :type results_df: :class:`pd`.`DataFrame` - :param fields: The list of fields with which to group tests into - :type fields: `list`[`str`] - :param field_depth: The depth of the fields list with which to create - nested test suites. `field_depth = 0` represents creating nested suits for - all fields and `field_depth = len(fields)` would be no nesting of test - cases, defaults to `0` - :type field_depth: `int`, optional - :return: Returns a xml string representation of the results - :rtype: `str` - """ - suites = TestSuite( - name="Tests Run", - is_suites=True - ) - children = get_test_suites_from_results_dataframe( - results_df=results_df, - fields=fields, - nth_field=field_depth - ) - suites.add_children(children) - junit_string = '\n' - junit_string += suites.print_case() - return junit_string - - -def get_test_suites_from_results_dataframe( - results_df: pd.DataFrame, - fields: list[str], - nth_field: int = 0 -) -> list[TestSuite | TestCase]: - """Method to obtain test suites and test cases from results dataframe - - :param results_df: Dataframe of results - :type results_df: :class:`pd`.`DataFrame` - :param fields: The fields with which to categorise the results - :type fields: `list`[`str`] - :param nth_field: Integer to indicate at what index of fields list to - begin, defaults to `0` - :type nthe_field: `int`, optional - :return: Returns a list of :class:`TestSuite`'s or :class:`TestCase`'s - :rtype: `list`[:class:`TestSuite` | :class:`TestCase`] - """ - nth_field += 1 - children: list[TestSuite | TestCase] = [] - if nth_field <= len(fields): - for key, idx in results_df.groupby( - fields[:nth_field] - ).groups.items(): - if isinstance(key, tuple): - name = ".".join(str(col_val) for col_val in key) - else: - name = str(key) - child = TestSuite( - name=name - ) - child_children = get_test_suites_from_results_dataframe( - results_df=results_df.loc[idx], - fields=fields, - nth_field=nth_field - ) - child.add_children(child_children) - children.append(child) - else: - for idx, row in results_df.iterrows(): - child = TestCase( - name=( - f"JobId={str(idx)}" + ( - f", FileName={row['FileName']}" - if "FileName" in row else "" - ) - ), - result=row["TestResult"], - file_name=row["FileName"] if "FileName" in row else None - ) - if ( - (row["TestResult"] == "Fail" and row["Validity"]) - or ( - ( - row["TestResult"] - ) == ( - "Inconclusive|SVDC Success|Notified Failure" - ) - ) - or (row["TestResult"] == "Pass" and not row["Validity"]) - ): - if "FailureReason" in row: - child.pv_failure_reason = row["FailureReason"] - children.append(child) - return children - - -def generate_html_from_csv_report( - test_report_csv_path: str, - html_report_file_path: str -) -> None: - """Method to generate and html file from csv report - - :param test_report_csv_path: The path to the csv report - :type test_report_csv_path: `str` - :param html_report_file_path: The output path of the html report - :type html_report_file_path: str - """ - results_df = pd.read_csv( - test_report_csv_path, - index_col="JobId" - ) - html_string, _ = generate_html_report_string( - results_df=results_df, - fields=["SequenceName", "Validity", "Category"], - field_depth=2 - ) - with open(html_report_file_path, 'w', encoding="utf-8") as file: - file.write(html_string) - - -def get_report_files_mapping_from_dataframe_report( - results_df: pd.DataFrame, - results_prefix: str -) -> dict[str, str | pd.DataFrame]: - """Method to get report files mapping from a results dataframe and a - prefix for the tests - - :param results_df: :class:`pd`.`DataFrame` of results - :type results_df: :class:`pd`.`DataFrame` - :param results_prefix: The prefix for the results file names - :type results_prefix: `str` - :return: Returns a dictionary mapping file name to file - :rtype: `dict`[`str`, `str` | :class:`pd`.`DataFrame`] - """ - html_string, xml_string = generate_html_report_string( - results_df=results_df, - fields=["SequenceName", "Validity", "Category"], - field_depth=2 - ) - return { - f"{results_prefix}.html": html_string, - f"{results_prefix}.xml": xml_string, - f"{results_prefix}.csv": results_df, - - } - - -if __name__ == "__main__": - import sys - args = sys.argv[1:] - generate_html_from_csv_report( - args[0], - args[1] - ) diff --git a/tests/test_harness/conftest.py b/tests/test_harness/conftest.py index b783de5..3e7d899 100644 --- a/tests/test_harness/conftest.py +++ b/tests/test_harness/conftest.py @@ -65,22 +65,6 @@ def runner(test_app: HarnessApp) -> FlaskCliRunner: return test_app.test_cli_runner() -@pytest.fixture -def log_file_string() -> ( - Literal["2023-09-28T19:27:23.434758Z 1 svdc_new_job_started…"] -): - """Fixture providing a log file string - - :return: Returns the log file string - :rtype: `str` - """ - return ( - "2023-09-28T19:27:23.434758Z 1 svdc_new_job_started : JobId =" - " eeba705f-eac4-467c-8826-bf31673e745f : EventId =" - " 3cf78438-8084-494d-8d7b-efd7ea46f7d4 : EventType = A" - ) - - @pytest.fixture def grok_priority_patterns() -> list[Grok]: """Fixture providing a list of grok patterns in priority order diff --git a/tests/test_harness/reporting/conftest.py b/tests/test_harness/reporting/conftest.py index 4a46898..a56eb75 100644 --- a/tests/test_harness/reporting/conftest.py +++ b/tests/test_harness/reporting/conftest.py @@ -7,70 +7,6 @@ import numpy as np -@pytest.fixture -def validity_df() -> pd.DataFrame: - """Fixture provide a dataframe with validity and job id amogsnt other - fields - - :return: Returns a dataframe of validity and job id - :rtype: :class:`pd`.`DataFrame` - """ - data = [ - ["job_1", "job_name", True, "ValidSols", "file_1"], - ["job_2", "job_name", True, "ValidSols", "file_2"], - ["job_3", "job_name", False, "StackedSols", "file_3"], - ["job_4", "job_name", False, "ANDConstraintBreak", "file_4"], - ["job_5", "job_name", False, "MissingEdges", "file_5"], - ] - validity = pd.DataFrame( - data, - columns=["JobId", "SequenceName", "Validity", "Category", "FileName"], - ) - validity.set_index("JobId", inplace=True) - return validity - - -@pytest.fixture -def validity_df_json_validity() -> pd.DataFrame: - """Fixture provide a dataframe with validity and job id amogsnt other - - :return: Returns a dataframe of validity and job id - :rtype: :class:`pd`.`DataFrame` - """ - data = [ - ["job_1", "job_name", True, "ValidJSON", "file_1"], - ["job_2", "job_name", True, "ValidJSON", "file_2"], - ["job_3", "job_name", False, "InvalidJSON", "file_3"], - ["job_4", "job_name", False, "InvalidJSON", "file_4"], - ] - validity = pd.DataFrame( - data, - columns=["JobId", "SequenceName", "Validity", "Category", "FileName"], - ) - validity.set_index("JobId", inplace=True) - return validity - - -@pytest.fixture -def pv_results_df() -> pd.DataFrame: - """Fixture to provide a mocked proctocol verifier results dataframe - - :return: Mocked PV results dataframe - :rtype: :class:`pd`.`DataFrame` - """ - data = [ - ["job_1", [np.nan], [True]], - ["job_2", ["It Failed"], [False]], - ["job_3", ["It Failed", "It Failed"], [False] * 2], - ["job_4", ["It Failed", np.nan], [False, True]], - ] - results_df = pd.DataFrame( - data=data, columns=["JobId", "FailureReason", "PVResult"] - ) - results_df.set_index("JobId", inplace=True) - return results_df - - @pytest.fixture def expected_results() -> pd.DataFrame: """Fixture providing dataframe of expected results @@ -186,6 +122,30 @@ def expected_junit_string() -> str: ) +@pytest.fixture +def report_files_mapping( + expected_results: pd.DataFrame, + expected_junit_string: str, + expected_html_string: str, +) -> dict[str, str | pd.DataFrame]: + """Fixture providing a dictionary mapping file name to file + + :param expected_results: Fixture providing expected results dataframe + :type expected_results: :class:`pd`.`DataFrame` + :param expected_junit_string: Fixture providing expected junit string + :type expected_junit_string: `str` + :param expected_html_string: Fixture providing expected html string + :type expected_html_string: `str` + :return: Dictionary mapping filename to file + :rtype: `dict`[`str`, `str` | :class:`pd`.`DataFrame`] + """ + return { + "test.csv": expected_results, + "test.xml": expected_junit_string, + "test.html": expected_html_string, + } + + @pytest.fixture def expected_html_string() -> str: """Fixture providing an html string of expected results @@ -418,207 +378,3 @@ def expected_html_string() -> str: ' \n \n \n \n\n\n\n\n\n" ) - - -@pytest.fixture -def report_files_mapping( - expected_results: pd.DataFrame, - expected_junit_string: str, - expected_html_string: str, -) -> dict[str, str | pd.DataFrame]: - """Fixture providing a dictionary mapping file name to file - - :param expected_results: Fixture providing expected results dataframe - :type expected_results: :class:`pd`.`DataFrame` - :param expected_junit_string: Fixture providing expected junit string - :type expected_junit_string: `str` - :param expected_html_string: Fixture providing expected html string - :type expected_html_string: `str` - :return: Dictionary mapping filename to file - :rtype: `dict`[`str`, `str` | :class:`pd`.`DataFrame`] - """ - return { - "test.csv": expected_results, - "test.xml": expected_junit_string, - "test.html": expected_html_string, - } - - -@pytest.fixture -def pass_performance_results() -> dict[str, int]: - """Fixture to provide performance test results - - :return: The results - :rtype: `dict`[`str`, `int`] - """ - return { - "num_tests": 5, - "num_failures": 0, - "num_errors": 0 - } - - -@pytest.fixture -def expected_performance_xml_pass() -> str: - """Expected xml for a pass - - :return: The xml string - :rtype: `str` - """ - return ( - '\n' - '\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - '' - ) - - -@pytest.fixture -def fail_performance_results() -> dict[str, int]: - """Fixture to provide performance test results with a fail - - :return: The results - :rtype: `dict`[`str`, `int`] - """ - return { - "num_tests": 5, - "num_failures": 1, - "num_errors": 0 - } - - -@pytest.fixture -def expected_performance_xml_fail() -> str: - """Expected xml for a fail - - :return: The xml string - :rtype: `str` - """ - return ( - '\n' - '\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' 1 of 5 events failed to be processed correctly by the' - ' PV\n' - ' \n' - ' \n' - ' \n' - '' - ) - - -@pytest.fixture -def error_performance_results() -> dict[str, int]: - """Fixture to provide performance test results with a error - - :return: The results - :rtype: `dict`[`str`, `int`] - """ - return { - "num_tests": 5, - "num_failures": 1, - "num_errors": 1 - } - - -@pytest.fixture -def expected_performance_xml_error() -> str: - """Expected xml for an error - - :return: The xml string - :rtype: `str` - """ - return ( - '\n' - '\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' 1 of 5 events failed to be sent correctly by the Test' - ' Harness\n' - ' \n' - ' \n' - ' \n' - '' - ) - - -@pytest.fixture -def performance_junit_properties() -> dict[str, float]: - """Extra properties for the junit xml - - :return: Dictionary of properties - :rtype: `dict`[`str`, `float`] - """ - return { - "th_end_time": 20.0, - "aer_end_time": 30.0, - "pv_end_time": 40.0, - "average_sent_per_sec": 12.0, - "average_processed_per_sec": 12.0, - "average_queue_time": 1.0, - "average_response_time": 1.0 - } - - -@pytest.fixture -def expected_performance_xml_properties() -> str: - """Expected xml for with extra properties - - :return: The xml string - :rtype: `str` - """ - return ( - '\n' - '\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - '' - )