-
Notifications
You must be signed in to change notification settings - Fork 192
/
a3c_display.py
63 lines (50 loc) · 1.81 KB
/
a3c_display.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import random
from game_state import GameState
from game_ac_network import GameACFFNetwork, GameACLSTMNetwork
from a3c_training_thread import A3CTrainingThread
from rmsprop_applier import RMSPropApplier
from constants import ACTION_SIZE
from constants import PARALLEL_SIZE
from constants import CHECKPOINT_DIR
from constants import RMSP_EPSILON
from constants import RMSP_ALPHA
from constants import GRAD_NORM_CLIP
from constants import USE_GPU
from constants import USE_LSTM
def choose_action(pi_values):
return np.random.choice(range(len(pi_values)), p=pi_values)
# use CPU for display tool
device = "/cpu:0"
if USE_LSTM:
global_network = GameACLSTMNetwork(ACTION_SIZE, -1, device)
else:
global_network = GameACFFNetwork(ACTION_SIZE, -1, device)
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = device)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old checkpoint")
game_state = GameState(0, display=True, no_op_max=0)
while True:
pi_values = global_network.run_policy(sess, game_state.s_t)
action = choose_action(pi_values)
game_state.process(action)
if game_state.terminal:
game_state.reset()
else:
game_state.update()