forked from DLR-RM/rl-baselines3-zoo
-
Notifications
You must be signed in to change notification settings - Fork 1
/
eval_hyperparameters.py
92 lines (79 loc) · 2.9 KB
/
eval_hyperparameters.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import json
import sys
import pettingzoo.butterfly.pistonball_v6 as pistonball_v5
import supersuit as ss
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first
from stable_baselines3.common.vec_env import VecMonitor, VecNormalize, VecTransposeImage
num = sys.argv[1]
n_evaluations = 20
n_agents = 20
n_envs = 4
n_timesteps = 2000000
with open("./hyperparameter_jsons/" + "hyperparameters_" + num + ".json") as f:
params = json.load(f)
print(params)
def image_transpose(env):
if is_image_space(env.observation_space) and not is_image_space_channels_first(
env.observation_space
):
env = VecTransposeImage(env)
return env
env = pistonball_v5.parallel_env()
env = ss.color_reduction_v0(env, mode="B")
env = ss.resize_v0(env, x_size=84, y_size=84)
env = ss.frame_stack_v1(env, 3)
env = ss.pettingzoo_env_to_vec_env_v1(env)
env = ss.concat_vec_envs_v1(env, n_envs, num_cpus=1, base_class="stable_baselines3")
env = VecMonitor(env)
env = image_transpose(env)
eval_env = pistonball_v5.parallel_env()
eval_env = ss.color_reduction_v0(eval_env, mode="B")
eval_env = ss.resize_v0(eval_env, x_size=84, y_size=84)
eval_env = ss.frame_stack_v1(eval_env, 3)
eval_env = ss.pettingzoo_env_to_vec_env_v1(eval_env)
eval_env = ss.concat_vec_envs_v1(
eval_env, 1, num_cpus=1, base_class="stable_baselines3"
)
eval_env = VecMonitor(eval_env)
eval_env = image_transpose(eval_env)
eval_freq = int(n_timesteps / n_evaluations)
eval_freq = max(eval_freq // (n_envs * n_agents), 1)
all_mean_rewards = []
for i in range(10):
try:
model = PPO("CnnPolicy", env, verbose=1, **params)
eval_callback = EvalCallback(
eval_env,
best_model_save_path="./eval_logs/" + num + "/" + str(i) + "/",
log_path="./eval_logs/" + num + "/" + str(i) + "/",
eval_freq=eval_freq,
deterministic=True,
render=False,
)
model.learn(total_timesteps=n_timesteps, callback=eval_callback)
model = PPO.load("./eval_logs/" + num + "/" + str(i) + "/" + "best_model")
mean_reward, std_reward = evaluate_policy(
model, eval_env, deterministic=True, n_eval_episodes=25
)
print(mean_reward)
print(std_reward)
all_mean_rewards.append(mean_reward)
if mean_reward > 90:
model.save(
"./mature_policies/"
+ str(num)
+ "/"
+ str(i)
+ "_"
+ str(mean_reward).split(".")[0]
+ ".zip"
)
except:
print("Error occurred during evaluation")
if len(all_mean_rewards) > 0:
print(sum(all_mean_rewards) / len(all_mean_rewards))
else:
print("No mature policies found")