-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.py
47 lines (45 loc) · 1.37 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
"""
In this file, you may edit the hyperparameters used for different environments.
memory_size: Maximum size of the replay memory.
n_episodes: Number of episodes to train for.
batch_size: Batch size used for training DQN.
target_update_frequency: How often to update the target network.
train_frequency: How often to train the DQN.
gamma: Discount factor.
lr: Learning rate used for optimizer.
eps_start: Starting value for epsilon (linear annealing).
eps_end: Final value for epsilon (linear annealing).
anneal_length: How many steps to anneal epsilon for.
n_actions: The number of actions can easily be accessed with env.action_space.n, but we do
some manual engineering to account for the fact that Pong has duplicate actions.
"""
# Hyperparameters for CartPole-v0
CartPole = {
'memory_size': 50000,
'n_episodes': 10000,
'batch_size': 32,
'target_update_frequency': 100,
'train_frequency': 1,
'gamma': 0.95,
'lr': 1e-4,
'eps_start': 1.0,
'eps_end': 0.1,
'anneal_length': 10**4,
'n_actions': 3,
}
# Hyperparameters for Pong-v0
Pong = {
'memory_size': 50000,
'n_episodes': 10000,
'batch_size': 32,
'target_update_frequency': 500,
'train_frequency': 4,
'gamma': 0.99,
'lr': 1e-4,
'eps_start': 1.0,
'eps_end': 0.01,
'anneal_length': 10**6,
'n_actions': 2,
'obs_stack_size': 4,
'screen_size': 84
}