From 19440773a0b18de86c9c1ab048b74db5125c785a Mon Sep 17 00:00:00 2001 From: Misbah Anjum N Date: Thu, 9 May 2024 20:34:16 +0530 Subject: [PATCH] Generate .html analysis file for avocado runs In addition to Analysis.xlsx, this patch generates Analysis.html file as output. This can be generated for new analysis as well as comparison analysis. The Analysis.xlsx file is converted to Analysis.html file and both the outputs are generated. This .html file can be used to display the summary of the avocado based results in a web page for a dashboard based view. This .html output has filter table drop downs as an enhancement to the .xlsx output. This filter can be used for further breaking down of the results. Signed-off-by: Misbah Anjum N --- README.md | 6 +- analysis.py | 326 +++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 315 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index fc71124..4630306 100644 --- a/README.md +++ b/README.md @@ -297,7 +297,7 @@ So, if such tests are identified and need to be "not run" for a particular envir ----- ### Analysis of results: -analysis.py script is used to simplify analysis/comparison of avocado test runs by generating an excel file (.xlsx) of the same. The results.json file which gets created after avocado run, is passed as input in command line. Depending on the flag/options provided, the excel sheet will be generated. +analysis.py script is used to simplify analysis/comparison of avocado test runs by generating excel and html files (.xlsx and .html) of the same. The results.json file which gets created after avocado run, is passed as input in command line. Depending on the flag/options provided, the excel sheet will be generated. > `$ python3 analysis.py [--options] [input(s)]` @@ -316,7 +316,9 @@ analysis.py script is used to simplify analysis/comparison of avocado test runs > `$ python3 analysis.py --new-analysis [json_file]` 2. `--add-to-existing`: - > This flag is used to append new results.json dat to an already existing excel file. This flag can be used to compare new runs with the previous runs so that we can have our own analysis. + > Work in Progress (Future Enhancement) + + > This flag is used to append new results.json data to an already existing excel file. This flag can be used to compare mutliple new runs with the previous runs so that we can have our own analysis. > `$ python3 analysis.py --add-to-existing [xlsx_file] [json_file]` diff --git a/analysis.py b/analysis.py index 3304a51..e31d67e 100644 --- a/analysis.py +++ b/analysis.py @@ -17,10 +17,10 @@ ''' analysis.py script is aimed to help in analysis/comparison of avocado test runs - by generating a simple excel file (.xlsx). The results.json file which gets created - after avocado run is passed as input in command line while running this script and - depending on the flag/options provided, the excel analysis/omparison sheet will be - generated. + by generating a simple excel (.xlsx) and html (.html) files. The results.json file + which gets created after avocado run is passed as input in command line while running + this script and depending on the flag/options provided, the excel analysis/comparison + sheet will be generated. Prerequsites:- pip3 install pandas[excel] @@ -29,11 +29,9 @@ flags/options:- 1. --new-analysis - 2. --add-to-existing - 3. --compare-two-results + 2. --compare-two-results python3 analysis.py --new-analysis - python3 analysis.py --add-to-existing python3 analysis.py --compare-two-results Check README.md for more explanation @@ -77,6 +75,28 @@ def test_analysis(data): # Save the DataFrame to a Excel file dataframe.to_excel('Analysis.xlsx', index=False) + # Save the DataFrame to a HTML page + summary = { + 'Analysis_Type': 'New', + 'Regression': None, + 'Solved': None, + 'Diff': None, + 'New': { + 'Name': dataframe.loc[0].iat[-1], + 'Fail': dataframe.loc[1].iat[-1], + 'Error': dataframe.loc[2].iat[-1], + 'Skip': dataframe.loc[3].iat[-1], + 'Interrupt': dataframe.loc[4].iat[-1], + 'Cancel': dataframe.loc[5].iat[-1], + 'Pass': dataframe.loc[6].iat[-1], + 'White-Board': dataframe.loc[7].iat[-1] + } + } + json_object = json.dumps(summary, indent=4) + if "--new-analysis" in sys.argv: + print(json_object) + analysis_to_html(summary) + def comparison_analysis(excel, data): ''' @@ -124,11 +144,15 @@ def comparison_analysis(excel, data): new_dataframe.loc[len(old_dataframe.index) - 1] = [test['status'], test['fail_reason']] + # Save the DataFrame to a Excel file final_res = pd.concat([old_dataframe, new_dataframe], axis=1) final_res.to_excel(excel, index=False) # Add the Result column to compare two results if "--compare-two-results" in sys.argv: + regression_count = 0 + solved_count = 0 + difference_count = 0 dataframe = pd.read_excel(excel) results = [] for i in range(len(dataframe.index)): @@ -137,20 +161,45 @@ def comparison_analysis(excel, data): else: if dataframe.loc[i].iat[-4] == "PASS" and not pd.isnull(dataframe.loc[i].iat[-2]): results.append("REGRESSION") + regression_count += 1 elif dataframe.loc[i].iat[-2] == "PASS" and not pd.isnull(dataframe.loc[i].iat[-4]): results.append("SOLVED") + solved_count += 1 elif pd.isnull(dataframe.loc[i].iat[-4]) or pd.isnull(dataframe.loc[i].iat[-2]): results.append("") else: results.append("DIFF") + difference_count += 1 result_dataframe = pd.DataFrame(columns=['Result']) for i in range(8, len(results)): result_dataframe.loc[i] = results[i] + # Save the DataFrame to a Excel file final_dataframe = pd.concat([dataframe, result_dataframe], axis=1) final_dataframe.to_excel(excel, index=False) + # Save the DataFrame to a HTML page + summary = { + 'Analysis_Type': 'Comparison', + 'Regression': regression_count, + 'Solved': solved_count, + 'Diff': difference_count, + 'New': { + 'Name': new_dataframe.loc[0].iat[-1], + 'Fail': new_dataframe.loc[1].iat[-1], + 'Error': new_dataframe.loc[2].iat[-1], + 'Skip': new_dataframe.loc[3].iat[-1], + 'Interrupt': new_dataframe.loc[4].iat[-1], + 'Cancel': new_dataframe.loc[5].iat[-1], + 'Pass': new_dataframe.loc[6].iat[-1], + 'White-Board': new_dataframe.loc[7].iat[-1] + } + } + json_object = json.dumps(summary, indent=4) + print(json_object) + analysis_to_html(summary) + def deco(excel): ''' @@ -215,6 +264,261 @@ def deco(excel): workbook.save(excel) +def analysis_to_html(summary): + ''' + This function is used convert the .xlsx output to .html + ''' + + analysis_type = summary['Analysis_Type'] + + # Read Excel file + excel_file = pd.ExcelFile('Analysis.xlsx') + sheet_names = excel_file.sheet_names + + # Function to apply color based on status + def apply_formatting(status): + if status == 'PASS': + return ('green', 'lightgreen') # Light green + elif status == 'FAIL': + return ('red', 'lightcoral') # Light red + elif status == 'ERROR': + return ('orange', 'lightsalmon') # Light orange + else: + return ('black', 'white') + + # Read each sheet and convert to HTML with custom formatting + html_tables = {} + summary_tables = {} + for sheet_name in sheet_names: + dataFrame = excel_file.parse(sheet_name) + + # Replace NaN values with blank cells + dataFrame = dataFrame.fillna('') + + # Apply formatting to "Status" columns + for col in dataFrame.columns: + if col.startswith('Status'): + dataFrame.loc[8:, col] = dataFrame.loc[7:, col].apply(lambda x: f'{x}') + + # Apply formatting to "Result" column + if 'Result' in dataFrame.columns: + dataFrame['Result'] = dataFrame['Result'].apply(lambda x: f'
{x}
') + + # Convert DataFrame to HTML Tables + if analysis_type == "New": + summary_table = dataFrame.iloc[:8, 1:].to_html(index=False, header=False) + else: + summary_table = dataFrame.iloc[:8, 1:-1].to_html(index=False, header=False) + summary_table = summary_table.replace('', '
') + summary_tables[sheet_name] = summary_table + + html_table = dataFrame.iloc[8:].to_html(escape=False, index=False, border=1) + html_table = html_table.replace('
', '
') + html_tables[sheet_name] = html_table + + # Apply styling to the html table + css_style = """ + + """ + + # JavaScript code to handle dropdown menu selection and filter the table + js_script = """ + + """ + + # Dropdown menu HTML code + dropdown_result = """ +
+ + +
+ """ + + dropdown_status = """ +
+ + +
+ """ + + dropdown_status1 = """ +
+ + +
+ """ + + quick_summary = f""" +
+
+ + + + + + + + + + +
REGRESSION
SOLVED
DIFF
{summary["Regression"]}{summary["Solved"]}{summary["Diff"]}
+ + """ + + # Save combined HTML table with JavaScript to 'Analysis.html' + with open('Analysis.html', 'w') as file: + file.write(css_style) + file.write('

Analysis

') + + if analysis_type == "Comparison": + file.write(quick_summary) + + file.write('
') + file.write(dropdown_status) + if analysis_type == "Comparison": + file.write(dropdown_status1) + file.write(dropdown_result) + file.write('

') + + file.write('
') + for sheet_name, summary_table in summary_tables.items(): + file.write(summary_table) + file.write('

') + + file.write('
') + for sheet_name, html_table in html_tables.items(): + file.write(html_table) + file.write(js_script) + file.write('

') + + def main(): try: if "--new-analysis" in sys.argv: @@ -223,13 +527,6 @@ def main(): test_analysis(data) deco("Analysis.xlsx") - elif "--add-to-existing" in sys.argv: - with open(sys.argv[-1], 'r') as json_file: - data = json.load(json_file) - excel = sys.argv[-2] - comparison_analysis(excel, data) - deco(excel) - elif "--compare-two-results" in sys.argv: with open(sys.argv[-2], 'r') as json_file: data = json.load(json_file) @@ -252,7 +549,6 @@ def main(): def usage(): return ("python3 analysis.py --new-analysis \n\ -python3 analysis.py --add-to-existing \n\ python3 analysis.py --compare-two-results \n")