Skip to content

Commit

Permalink
Merge branch 'lop-devops:master' into jenkins
Browse files Browse the repository at this point in the history
  • Loading branch information
misanjumn authored Apr 5, 2024
2 parents 615b7b8 + 1a0bbd2 commit 24c9cbb
Show file tree
Hide file tree
Showing 14 changed files with 678 additions and 160 deletions.
35 changes: 35 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,41 @@ So, if such tests are identified and need to be "not run" for a particular envir

-----

### Analysis of results:
analysis.py script is used to simplify analysis/comparison of avocado test runs by generating an excel file (.xlsx) of the same. The results.json file which gets created after avocado run, is passed as input in command line. Depending on the flag/options provided, the excel sheet will be generated.

> `$ python3 analysis.py [--options] [input(s)]`
#### Prerequisites:
1. `python3`
2. `python pandas and excel dependencies`
> `$ pip3 install pandas[excel]`
#### Inputs:
>After avocado run, the `results.json` file generated
#### Options:
1. `--new-analysis`:
> This flag is used to generate an excel file which contains analysis having test case name as "ID", test status as "Status" and fail reason if any as "Fail Reason". It can used to generate analysis on a single test run results.json file.
> `$ python3 analysis.py --new-analysis [json_file]`
2. `--add-to-existing`:
> This flag is used to append new results.json dat to an already existing excel file. This flag can be used to compare new runs with the previous runs so that we can have our own analysis.
> `$ python3 analysis.py --add-to-existing [xlsx_file] [json_file]`
3. `--compare-two-results`:
> This flag is used to compare exactly two results.json files by the addition of a new column "Result" in the excel file. This column will simply point which results are different from the last run of the same corresponding test names (ID).
> 1. "DIFF" value represents different test status (Status)
> 2. "REGRESSION" value represents tests which had passed in the previous run but did not now
> 3. "SOLVED" value represents tests which have passed now and di not earlier
> 4. "" value represents no change in the test status.
> `$ python3 analysis.py --compare-two-results [old_json_file] [new_json_file]`
-----

### References:
* [Avocado Test Framework](https://github.com/avocado-framework/avocado)
* [Avocado Test Framework documenation](http://avocado-framework.readthedocs.org)
Expand Down
248 changes: 248 additions & 0 deletions analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,248 @@
#!/usr/bin/env python3

# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2023 IBM
# Author: Misbah Anjum N <[email protected]>


'''
analysis.py script is aimed to help in analysis/comparison of avocado test runs
by generating a simple excel file (.xlsx). The results.json file which gets created
after avocado run is passed as input in command line while running this script and
depending on the flag/options provided, the excel analysis/omparison sheet will be
generated.
Prerequsites:-
pip3 install pandas[excel]
(or)
dnf install python3-pandas python3-numpy python3-openpyxl
flags/options:-
1. --new-analysis
2. --add-to-existing
3. --compare-two-results
python3 analysis.py --new-analysis <json_file>
python3 analysis.py --add-to-existing <xlsx_file> <json_file>
python3 analysis.py --compare-two-results <old_json_file> <new_json_file>
Check README.md for more explanation
'''


import sys
import json
import pandas as pd
from openpyxl.styles import Font, Border, Side, PatternFill, Alignment


def test_analysis(data):
'''
This function is used to generate an excel file which contains a summary of
avaocado based test runs. It reads results.json as input and generates a .xlsx
file as output.
'''

# Extract job name from debuglog attribute
job_name = data['debuglog'].split('/')[-2]

# Create a DataFrame
dataframe = pd.DataFrame(columns=['Name', 'Status', 'Fail Reason'])

# Add rows for job name and other attributes
dataframe.loc[0] = ['', 'Job Name', job_name]
dataframe.loc[1] = ['', 'Fail', data['failures']]
dataframe.loc[2] = ['', 'Error', data['errors']]
dataframe.loc[3] = ['', 'Skip', data['skip']]
dataframe.loc[4] = ['', 'Interrupt', data['interrupt']]
dataframe.loc[5] = ['', 'Cancel', data['cancel']]
dataframe.loc[6] = ['', 'Pass', data['pass']]
dataframe.loc[7] = ['', 'White-Board', data['tests'][0]['whiteboard']]

# Loop through the 'tests' list in the JSON data and add rows
for i, test in enumerate(data['tests']):
dataframe.loc[i + 8] = [test['name'], test['status'], test['fail_reason']]

# Save the DataFrame to a Excel file
dataframe.to_excel('Analysis.xlsx', index=False)


def Comparison_Analysis(excel, data):
'''
This function is used to generate an excel sheet which gives delta comparison
of two test avocado based test runs. Using excel sheet produced from the
function: test_analysis(data) and results.json as inputs, it generate a .xlsx
file as output.
'''

# store test_names in existing excel file
old_dataframe = pd.read_excel(excel)
test_names = old_dataframe[old_dataframe.columns[0]]
test_names = [test_names[x] for x in range(8, len(old_dataframe.index))]

# Extract job name from debuglog attribute
job_name = data['debuglog'].split('/')[-2]

# Create a DataFrame
new_dataframe = pd.DataFrame(columns=['Status', 'Fail Reason'])

# Add rows for job name and other attributes
new_dataframe.loc[0] = ['Job Name', job_name]
new_dataframe.loc[1] = ['Fail', data['failures']]
new_dataframe.loc[2] = ['Error', data['errors']]
new_dataframe.loc[3] = ['Skip', data['skip']]
new_dataframe.loc[4] = ['Interrupt', data['interrupt']]
new_dataframe.loc[5] = ['Cancel', data['cancel']]
new_dataframe.loc[6] = ['Pass', data['pass']]
new_dataframe.loc[7] = ['White-Board', data['tests'][0]['whiteboard']]

# Loop through the 'tests' list in the JSON data and add rows
for i, test in enumerate(data['tests']):
found = 0
for j in range(len(test_names)):
if test['name'] == test_names[j]:
new_dataframe.loc[j + 8] = [test['status'], test['fail_reason']]
found = 1
break
if found == 0:
new = [test['name']]
for i in range(len(old_dataframe.columns)-1):
new.append("")
old_dataframe.loc[len(old_dataframe.index)] = new
new_dataframe.loc[len(old_dataframe.index)-1] = [test['status'], test['fail_reason']]

final_res = pd.concat([old_dataframe, new_dataframe], axis=1)
final_res.to_excel(excel, index=False)

# Add the Result column to compare two results
if "--compare-two-results" in sys.argv:
dataframe = pd.read_excel(excel)
results = []
for i in range(len(dataframe.index)):
if dataframe.loc[i].iat[-2] == dataframe.loc[i].iat[-4]:
results.append("")
else:
if dataframe.loc[i].iat[-4] == "PASS" and not pd.isnull(dataframe.loc[i].iat[-2]):
results.append("REGRESSION")
elif dataframe.loc[i].iat[-2] == "PASS" and not pd.isnull(dataframe.loc[i].iat[-4]):
results.append("SOLVED")
else:
results.append("DIFF")

result_dataframe = pd.DataFrame(columns=['Result'])
for i in range(8, len(results)):
result_dataframe.loc[i] = results[i]

final_dataframe = pd.concat([dataframe, result_dataframe], axis=1)
final_dataframe.to_excel(excel, index=False)


def deco(excel):
'''
This function is used to conditionally format the xlsx file
Libraries used: ExcelWriter, openpyxl
'''

# Create a sample DataFrame
dataframe = pd.read_excel(excel)

# Create a Pandas ExcelWriter object and write to Excel file
excel_writer = pd.ExcelWriter(excel, engine='openpyxl')
dataframe.to_excel(excel_writer, sheet_name='Sheet1', index=False)

# Access the workbook and worksheet objects
workbook = excel_writer.book
worksheet = excel_writer.sheets['Sheet1']

# Column Width
worksheet.column_dimensions['A'].width = 60
worksheet.column_dimensions['B'].width = 20
worksheet.column_dimensions['C'].width = 80
worksheet.column_dimensions['D'].width = 20
worksheet.column_dimensions['E'].width = 80
worksheet.column_dimensions['F'].width = 20

# Apply styles to the entire sheet
for row in worksheet.iter_rows(min_row=2, max_row=len(dataframe) + 1):
for cell in row:
cell.font = Font(size=15)
cell.border = Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin', color='000000'),
top=Side(border_style='thin', color='000000'),
bottom=Side(border_style='thin', color='000000'))
cell.alignment = Alignment(wrap_text=True, vertical='center')

# Apply header formatting
for cell in worksheet[1]:
cell.font = Font(size=18, bold=True) # White text color
cell.fill = PatternFill(start_color='ADD8E6', end_color='ADD8E6', fill_type='solid') # Blue background color

# Conditional formatting for the "Result" column if present
try:
for idx, value in enumerate(dataframe['Result'], start=2):
cell = worksheet.cell(row=idx, column=6)
if value == 'DIFF':
cell.fill = PatternFill(start_color='FF0000', end_color='FF0000', fill_type='solid') # Red
elif value == 'SOLVED':
cell.fill = PatternFill(start_color='39E75F', end_color='39E75F', fill_type='solid') # Green
elif value == 'REGRESSION':
cell.fill = PatternFill(start_color='FFA500', end_color='FFA500', fill_type='solid') # Orange
except Exception as e:
pass

# Save the styled Excel file
workbook.save(excel)


def main():
try:
if "--new-analysis" in sys.argv:
with open(sys.argv[-1], 'r') as json_file:
data = json.load(json_file)
test_analysis(data)
deco("Analysis.xlsx")

elif "--add-to-existing" in sys.argv:
with open(sys.argv[-1], 'r') as json_file:
data = json.load(json_file)
excel = sys.argv[-2]
Comparison_Analysis(excel, data)
deco(excel)

elif "--compare-two-results" in sys.argv:
with open(sys.argv[-2], 'r') as json_file:
data = json.load(json_file)
test_analysis(data)
deco("Analysis.xlsx")

with open(sys.argv[-1], 'r') as json_file:
data = json.load(json_file)
Comparison_Analysis("Analysis.xlsx", data)
deco("Analysis.xlsx")

else:
raise Exception

except:
print("\nPay attention on the usage:\n"+usage())
sys.exit(1)


def usage():
return("python3 analysis.py --new-analysis <json_file>\n\
python3 analysis.py --add-to-existing <xlsx_file> <json_file>\n\
python3 analysis.py --compare-two-results <old_json_file> <new_json_file>\n")


if __name__ == '__main__':
main()
9 changes: 9 additions & 0 deletions avocado-setup.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -626,6 +626,10 @@ def parse_test_config(test_config_file, avocado_bin, enable_kvm):
parser.add_argument('--nrunner', dest="nrunner", action='store_true',
default=False, help='enable Parallel run')

parser.add_argument('--run-tests', dest="run_tests", action='store',
default=None,
help="To run the host tests provided in the option and publish result [Note: test names(full path) and separated by comma]")

args = parser.parse_args()
if helper.get_machine_type() == 'pHyp':
args.enable_kvm = False
Expand Down Expand Up @@ -668,6 +672,11 @@ def parse_test_config(test_config_file, avocado_bin, enable_kvm):
bootstrap(args.enable_kvm)
bootstraped = True

if args.run_tests:
with open("%s/host/dynamic_test_suite" % TEST_CONF_PATH+".cfg","w+") as fp:
fp.write('\n'.join(args.run_tests.split(",")))
args.run_suite = str(args.run_suite)+","+"host_dynamic_test_suite"

if args.inputfile:
if not os.path.isfile(args.inputfile):
logger.debug(
Expand Down
3 changes: 3 additions & 0 deletions config/inputs/io_fc_input.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ fsstress_loop = 10
num_of_dlpar = 10
iteration = 10
run_type = 'rpm'
max_freeze = 6
function = 4
err = 1
manageSystem = ""
userid = ""
password = ""
Expand Down
26 changes: 13 additions & 13 deletions config/inputs/io_nvme_input.txt
Original file line number Diff line number Diff line change
@@ -1,26 +1,26 @@
[io_nvme]
device = "nvme0"
disk = "/dev/nvme0n1"
disks = "/dev/nvme0n2 /dev/nvme0n3 /dev/nvme0n4 /dev/nvme0n5"
lv_disks = "/dev/nvme0n6 /dev/nvme0n7 /dev/nvme0n8"
htx_disks = "/dev/nvme0n9 /dev/nvme0n10"
namespace = 10
firmware_url = ""
utilization = "100"
disks = "/dev/nvme0n2 /dev/nvme0n3 /dev/nvme0n4 /dev/nvme0n5"
pci_device = ""
pci_devices = ""
only_io = True
iteration = 20
module = "nvme_core"
count = 10
fsstress_loop = 3
num_of_dlpar = 20
num_of_hotplug = 20
iteration = 20
run_type = "rpm"
max_freeze = 6
function = 4
err = 1
hmc_pwd =
hmc_username =
namespace = 10
utilization = "100"
only_io = True
manageSystem = ""
fsstress_loop = 3
num_of_hotplug = 20
num_of_dlpar = 20
count = 10
hmc_username =
hmc_pwd =
htx_rpm_link = ""
run_type = "rpm"
firmware_url = ""
Loading

0 comments on commit 24c9cbb

Please sign in to comment.