Skip to content

Commit

Permalink
adding more generic way of comparing with working ingress
Browse files Browse the repository at this point in the history
Signed-off-by: Paige Rubendall <[email protected]>
  • Loading branch information
paigerube14 committed Jan 31, 2024
1 parent 6ed1919 commit 30b6c09
Show file tree
Hide file tree
Showing 3 changed files with 103 additions and 52 deletions.
45 changes: 34 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,43 @@ tests :
# ipsec: false
metrics :
- metric : podReadyLatency
metricType : latency
- name: podReadyLatency
metricName: podLatencyQuantilesMeasurement
quantileName: Ready
metric_of_interest: P99
not:
- jobConfig.name: "garbage-collection"
- metric : apiserverCPU
metricType : cpu
namespace: openshift-kube-apiserver
- name: apiserverCPU
metricName : containerCPU
labels.namespace: openshift-kube-apiserver
metric_of_interest: value
agg:
value: cpu
agg_type: avg
- metric: ovnCPU
metricType: cpu
namespace: openshift-ovn-kubernetes
- name: ovnCPU
metricName : containerCPU
labels.namespace: openshift-ovn-kubernetes
metric_of_interest: value
agg:
value: cpu
agg_type: avg
- name: etcdCPU
metricName : containerCPU
labels.namespace: openshift-etcd
metric_of_interest: value
agg:
value: cpu
agg_type: avg
- metric: etcdCPU
metricType: cpu
namespace: openshift-ovn-kubernetes
- name: etcdDisck
metricName : 99thEtcdDiskBackendCommitDurationSeconds
metric_of_interest: value
agg:
value: duration
agg_type: avg
```
Expand Down
108 changes: 68 additions & 40 deletions orion.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ def cli():
cli function to group commands
"""


# pylint: disable=too-many-locals
@click.command()
@click.option("--config", default="config.yaml", help="Path to the configuration file")
Expand Down Expand Up @@ -60,46 +59,19 @@ def orion(config, debug, output):
print("No UUID present for given metadata")
sys.exit()

runs = match.match_kube_burner(uuids)
ids = match.filter_runs(runs, runs)
if metadata["benchmark"] == "k8s-netperf" :
index = "k8s-netperf"
ids = uuids
elif metadata["benchmark"] == "ingress-perf" :
index = "ingress-performance"
ids = uuids
else:
index = "ripsaw-kube-burner"
runs = match.match_kube_burner(uuids)
ids = match.filter_runs(runs, runs)

metrics = test["metrics"]
dataframe_list = []

for metric in metrics:
logger.info("Collecting %s", metric["metric"])
if metric["metricType"] == "latency":
if metric["metric"] == "podReadyLatency":
try:
podl = match.burner_results("", ids, "ripsaw-kube-burner*")
podl_df = match.convert_to_df(
podl, columns=["uuid", "timestamp", "P99"]
)
dataframe_list.append(podl_df)
logger.debug(podl_df)
except Exception as e: # pylint: disable=broad-exception-caught
logger.error(
"The namespace %s does not exist, exception %s",
metric["namespace"],
e,
)

elif metric["metricType"] == "cpu":
try:
cpu = match.burner_cpu_results(
ids, metric["namespace"], "ripsaw-kube-burner*"
)
cpu_df = match.convert_to_df(cpu, columns=["uuid", "cpu_avg"])
cpu_df = cpu_df.rename(
columns={"cpu_avg": metric["metric"] + "_cpu_avg"}
)
dataframe_list.append(cpu_df)
logger.debug(cpu_df)
except Exception as e: # pylint: disable=broad-exception-caught
logger.error(
"The namespace %s does not exist, exception %s",
metric["namespace"],
e,
)
dataframe_list = get_metric_data(ids, index, metrics, match, logger)

merged_df = reduce(
lambda left, right: pd.merge(left, right, on="uuid", how="inner"),
Expand All @@ -108,6 +80,62 @@ def orion(config, debug, output):
match.save_results(merged_df, csv_file_path=output)


def get_metric_data(ids, index, metrics, match, logger):
"""Gets details metrics basked on metric yaml list
Args:
ids (list): list of all uuids
index (dict): index in es of where to find data
metrics (dict): metrics to gather data on
match (Matcher): current matcher instance
logger (logger): log data to one output
Returns:
dataframe_list: dataframe of the all metrics
"""
dataframe_list = []
for metric in metrics:
metric_name = metric['name']
logger.info("Collecting %s", metric_name)
metric_of_interest = metric['metric_of_interest']

if "agg" in metric.keys():
try:
cpu = match.get_agg_metric_query(
ids, index, metric
)
agg_value = metric['agg']['value']
agg_type = metric['agg']['agg_type']
agg_name = agg_value + "_" + agg_type
cpu_df = match.convert_to_df(cpu, columns=["uuid", agg_name])
cpu_df = cpu_df.rename(
columns={agg_name: metric_name+ "_" + agg_name}
)
dataframe_list.append(cpu_df)
logger.debug(cpu_df)

except Exception as e: # pylint: disable=broad-exception-caught
logger.error(
"Couldn't get agg metrics %s, exception %s",
metric_name,
e,
)
else:
try:
podl = match.getResults("", ids, index, metric)
podl_df = match.convert_to_df(
podl, columns=["uuid", "timestamp", metric_of_interest]
)
dataframe_list.append(podl_df)
logger.debug(podl_df)
except Exception as e: # pylint: disable=broad-exception-caught
logger.error(
"Couldn't get metrics %s, exception %s",
metric_name,
e,
)
return dataframe_list

def get_metadata(test,logger):
"""Gets metadata of the run from each test
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ click==8.1.7
elastic-transport==8.11.0
elasticsearch==8.11.1
elasticsearch7==7.13.0
fmatch==0.0.3
fmatch==0.0.4
numpy==1.26.3
pandas==2.1.4
python-dateutil==2.8.2
Expand Down

0 comments on commit 30b6c09

Please sign in to comment.