forked from PaddlePaddle/continuous_evaluation
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheva.xsh
executable file
·122 lines (104 loc) · 3.06 KB
/
eva.xsh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/usr/bin/env xonsh
$RAISE_SUBPROC_ERROR = True
$XONSH_SHOW_TRACEBACK = True
import sys; sys.path.insert(0, '')
import subprocess
import _config
from _config import pjoin
from utils import PathRecover, log
import os
import argparse
from analysis_kpis import AnalysisKpiData
$ceroot=_config.workspace
os.environ['ceroot'] = _config.workspace
def parse_args():
parser= argparse.ArgumentParser("model benchmark")
parser.add_argument(
'--task_dir',
type=str,
help='The model dir.')
parser.add_argument(
'--times', type=int, default=5, help='The run times')
args = parser.parse_args()
return args
def get_changed_tasks(args):
tasks = []
print (args.task_dir, args.times)
if args.task_dir:
tasks = args.task_dir.split()
return tasks
cd @(_config.baseline_path)
out = $(git diff master | grep "diff --git")
out = out.strip()
for item in out.split('\n'):
task = item.split()[3].split('/')[1]
if task not in tasks:
tasks.append(task)
log.warn("changed tasks: %s" % tasks)
return tasks
def main():
args = parse_args()
suc = True
fail_models = []
tasks = get_changed_tasks(args)
times = args.times
for task in tasks:
try:
kpis_status, kpis_list = run_task(task, times)
print(kpis_list)
ana = AnalysisKpiData(kpis_status, kpis_list)
ana.analysis_data()
ana.print_result()
except Exception as e:
print (e)
suc = False
fail_models.append(task)
if suc:
print("all changed models success!")
else:
log.warn("failed models:", fail_models)
sys.exit(1)
def run_task(task_name, times):
'''
Run the model task.
'''
task_dir = pjoin(_config.baseline_path, task_name)
log.warn('run model', task_name)
cd @(_config.workspace)
env = {}
try:
exec('from tasks.%s.continuous_evaluation import tracking_kpis'
% task_name, env)
log.info("import from continuous_evaluation suc.")
except Exception as e:
log.warn("import failed. %s" % e)
exec('from tasks.%s._ce import tracking_kpis' % task_name, env)
log.info("import from _ce suc")
tracking_kpis = env['tracking_kpis']
kpis_status = get_kpis_status(tracking_kpis)
need_mul_times = False
for actived in kpis_status.values():
if actived:
need_mul_times = True
break
if not need_mul_times:
times = 1
kpis_list = []
for i in range(times):
with PathRecover():
cd @(task_dir)
./run.xsh
cd @(_config.workspace)
kpis = {}
for kpi in tracking_kpis:
kpi.root = task_dir
kpis[kpi.name] = kpi.cur_data
kpis_list.append(kpis)
return kpis_status, kpis_list
def get_kpis_status(tracking_kpis):
kpis_status = {}
for kpi in tracking_kpis:
kpis_status[kpi.name] = kpi.actived
print (kpis_status)
return kpis_status
main()