forked from CGCL-codes/naturalcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval.py
86 lines (75 loc) · 2.94 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import math
import os
import random
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import StratifiedKFold
import torch
from dataset.opencl_large import (
ATTRIBUTES_DIR,
)
from ncc.utils.file_ops import json_io
from ncc.eval.mapping import mapping_metrics
from ncc.models.mapping import StaticMapping
from ncc.utils.file_ops import json_io
SEED = 204
def cli_main():
data = []
for i, platform in enumerate(['amd', 'nvidia']):
def get_attr(attr):
oracle_file = os.path.join(ATTRIBUTES_DIR, f'{platform}.{attr}')
with open(oracle_file, 'r') as reader:
out = [json_io.json_loads(line) for line in reader]
return np.asarray(out)
platform_name = mapping_metrics.platform2str(platform)
devices = get_attr('oracle')
benchmarks = get_attr('benchmark')
runtime_cpus = get_attr('runtime_cpu')
runtime_gpus = get_attr('runtime_gpu')
# staic mapping model
model = StaticMapping.build_model(devices)
# optimal mappings
src_tokens = torch.from_numpy(np.zeros(len(devices)))
ground_truth = torch.from_numpy(np.array([1 if x == 1 else 0 for x in devices]))
predictions = model(src_tokens)
accuracy = (predictions == ground_truth).tolist()
# runtimes of baseline mapping (CPU on AMD, GPU on NVIDIA)
gt_runtimes = (runtime_cpus if platform == "amd" else runtime_gpus)
pred_runtimes = [
(runtime_cpus if pred == 0 else runtime_gpus)[idx]
for idx, pred in enumerate(predictions)
]
speedup = gt_runtimes / pred_runtimes
# record results
for benchmark_, o_, p_, accuracy_, p_speedup_ in \
zip(benchmarks, ground_truth, predictions, accuracy, speedup):
data.append({
"Model": model.__class__.__name__,
"Platform": platform_name,
'Benchmark': mapping_metrics.escape_benchmark_name(benchmark_),
'Benchmark Suite': mapping_metrics.escape_suite_name(benchmark_),
"Oracle Mapping": o_,
"Predicted Mapping": p_,
"Accuracy": accuracy_,
"Speedup": p_speedup_,
})
performance = pd.DataFrame(
data, index=range(1, len(data) + 1), columns=[
"Model",
"Platform",
"Benchmark",
"Benchmark Suite",
"Oracle Mapping",
"Predicted Mapping",
"Accuracy",
"Speedup"
])
benchmark_out = performance.groupby(['Platform', 'Benchmark Suite'])[['Platform', 'Accuracy', 'Speedup']].mean()
benchmark_out['Accuracy'] = round(benchmark_out['Accuracy'] * 100, 2)
print(benchmark_out)
out = performance.groupby(['Platform'])[['Platform', 'Accuracy', 'Speedup']].mean()
out['Accuracy'] = round(out['Accuracy'] * 100, 2)
print(out)
if __name__ == '__main__':
cli_main()