forked from notadamking/RLTrader
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
55 lines (42 loc) · 1.68 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
import gym
import optuna
import pandas as pd
from stable_baselines.common.policies import MlpLnLstmPolicy
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
from stable_baselines import A2C, ACKTR, PPO2
from env.BitcoinTradingEnv import BitcoinTradingEnv
from util.indicators import add_indicators
curr_idx = 0
reward_strategy = 'sortino'
input_data_file = os.path.join('data', 'coinbase_hourly.csv')
params_db_file = 'sqlite:///params.db'
study_name = 'ppo2_' + reward_strategy
study = optuna.load_study(study_name=study_name, storage=params_db_file)
params = study.best_trial.params
print("Testing PPO2 agent with params:", params)
print("Best trial:", -1 * study.best_trial.value)
df = pd.read_csv(input_data_file)
df = df.drop(['Symbol'], axis=1)
df = df.sort_values(['Date'])
df = add_indicators(df.reset_index())
test_len = int(len(df) * 0.2)
train_len = int(len(df)) - test_len
test_df = df[train_len:]
test_env = DummyVecEnv([lambda: BitcoinTradingEnv(
test_df, reward_func=reward_strategy, forecast_len=int(params['forecast_len']), confidence_interval=params['confidence_interval'])])
model_params = {
'n_steps': int(params['n_steps']),
'gamma': params['gamma'],
'learning_rate': params['learning_rate'],
'ent_coef': params['ent_coef'],
'cliprange': params['cliprange'],
'noptepochs': int(params['noptepochs']),
'lam': params['lam'],
}
model = PPO2.load(os.path.join('.', 'agents', 'ppo2_' + reward_strategy + '_' + str(curr_idx) + '.pkl'), env=test_env)
obs, done = test_env.reset(), False
while not done:
action, _states = model.predict(obs)
obs, reward, done, info = test_env.step(action)
test_env.render(mode="human")