forked from yuting-wei/AC-EVAL
-
Notifications
You must be signed in to change notification settings - Fork 0
/
multi_evaluate.py
76 lines (63 loc) · 1.96 KB
/
multi_evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from dotenv import load_dotenv
load_dotenv()
import os
import logging
from datetime import datetime
from arg_parser import parse_args
from evaluate.eval import Evaluator
# model_list = [
# "Qwen-7B-Chat",
# "Qwen-14B-Chat",
# "Qwen-72B-Chat",
# "Xunzi-Qwen-Chat",
# "qwen-max",
# "ernie-bot",
# "ernie-bot-4.0",
# "glm-4",
# "glm-3-turbo",
# "chatglm3-6b",
# "Yi-6B-Chat",
# "Yi-34B-Chat",
# "Baichuan2-7B-Chat",
# "Baichuan2-13B-Chat",
# "gpt-3.5-turbo-0125",
# "gpt-4-0125-preview",
# ]
args = parse_args()
model_list: list[str] = [model.strip() for model in args.models.split(",")]
assert len(model_list) >= 1, "Select at least one model for evaluation."
data_dir: str = args.data_dir
mode: str = args.mode
times: int = args.times
# Generate dataset list
dataset_list = [os.path.splitext(file)[0] for file in sorted(os.listdir(data_dir))]
if mode == "zero-shot":
out_root = "output"
elif mode == "zero-shot-cot":
out_root = "output_zero_cot"
elif mode == "few-shot":
out_root = "output_few"
elif mode == "few-shot-cot":
out_root = "output_few_cot"
# Log configuration
formatted_time = datetime.now().strftime("%m%d%H%M")
run_log = os.path.join(out_root, f"run_{model_list[0]}_{formatted_time}.log")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler(), logging.FileHandler(run_log, encoding="utf-8")],
)
for count in range(1, times + 1):
for model in model_list:
logging.info(f"Model {model} is being evaluated...")
out_dir = os.path.join(out_root, model)
evaluator = Evaluator(
model_name=model,
output_dir=out_dir,
mode=mode,
)
for file in dataset_list:
logging.info(f"Current evaluation dataset: {file}")
evaluator.update_data(data_dir, file)
evaluator.call_and_save()
evaluator.calculate_accuracy()