-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
265 lines (236 loc) · 7.91 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
import warnings
import gymnasium as gym
from gymnasium.envs.registration import register
import numpy as np
import torch as th
import wandb
import argparse
from typing import Dict, List
import stable_baselines3 as sb3
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecVideoRecorder
from stable_baselines3 import A2C, PPO
from stable_baselines3.common.env_util import make_vec_env
from constants import ClassicalPolicy
from typing import Any
warnings.filterwarnings("ignore")
# Note that we use the environment from envs/training_ewn.py
register(
id='EWN-v0',
# entry_point='envs:EinsteinWuerfeltNichtEnv'
entry_point='envs:MiniMaxHeuristicEnv'
)
register(
id='EWN-eval-v0',
entry_point='envs:EinsteinWuerfeltNichtEnv'
)
def return_model(config: Dict[str, str | int],
env: SubprocVecEnv) -> sb3.A2C | sb3.PPO:
assert config["algorithm"] in ["A2C", "PPO"]
model = getattr(sb3, config["algorithm"])
if config["algorithm"] != "A2C":
model = model(
"MultiInputPolicy",
env,
# verbose=1,
batch_size=config["batch_size"],
learning_rate=config["learning_rate"],
policy_kwargs=dict(
activation_fn=th.nn.Tanh),
# tensorboard_log=my_config["run_id"]
seed=config["model_seed"]
)
else:
model = model(
"MultiInputPolicy",
env,
# verbose=1,
n_steps=config["n_steps"],
learning_rate=config["learning_rate"],
policy_kwargs=dict(
activation_fn=th.nn.Tanh),
# tensorboard_log=my_config["run_id"]
seed=config["model_seed"]
)
return model
def evaluate(config: Dict[str, Any],
model: sb3.A2C | sb3.PPO, current_best: float, epoch: int) -> float:
# Evaluate agent using original Env
avg_score = 0
episode_num = config["eval_episode_num"]
reward = []
reward_list = np.zeros(episode_num)
env = gym.make(
'EWN-eval-v0',
cube_layer=config["cube_layer"],
board_size=config["board_size"],
# evaluate with random opponent
# opponent_policy=ClassicalPolicy.random,
opponent_policy=ClassicalPolicy.minimax,
max_depth=5
)
for seed in range(episode_num):
done = False
# Interact with env using old Gym API
obs, info = env.reset(seed=seed)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, reward, done, trunc, info = env.step(action)
avg_score += reward / config["eval_episode_num"]
# append the last reward of first env in vec_env
episode = seed
reward_list[episode] = reward
print("Avg_score: ", avg_score)
print("Reward_list(first 10): ", reward_list[:10])
winrate: float = np.count_nonzero(
np.array(reward_list) > 0) / config["eval_episode_num"]
print("Win rate: ", winrate)
print()
wandb.log(
{"win_rate": winrate,
"avg_score": avg_score}
)
# Save best model with highest win rate
if current_best < winrate:
print("Saving Model")
save_path = f"models/{wandb.run.id}"
model.save(f"{save_path}/{epoch}")
print("---------------")
return winrate
else:
print("---------------")
return current_best
def train(config=None):
with wandb.init(project="ewn-gym", config=config):
config = dict(wandb.config)
# del config["opponent_policy"]
def make_env():
env = gym.make(
'EWN-v0',
# opponent_policy=ClassicalPolicy.random,
**config
)
return env
env = SubprocVecEnv([make_env] * config["num_envs"])
env.seed(config["env_seed"])
env.reset()
if config["checkpoint"] is not None:
model = return_model(config, env)
model = model.load(config["checkpoint"])
else:
model = return_model(config, env)
current_best = 0
for epoch in range(config["epoch_num"]):
# Train agent using SB3
# Uncomment to enable wandb logging
model.learn(
total_timesteps=config["timesteps_per_epoch"],
reset_num_timesteps=False,
# callback=WandbCallback(
# gradient_save_freq=100,
# verbose=2,
# ),
)
# Evaluation
print("Run id: ", wandb.run.id)
print("Epoch: ", epoch)
current_best = evaluate(config, model, current_best, epoch)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
"Trainer for EWN",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub_parsers = parser.add_subparsers(
dest="algorithm",
required=True,
help="Algorithm to use for training")
parser_a2c = sub_parsers.add_parser("A2C", help="A2C algorithm")
parser_a2c.add_argument(
"-n",
"--n_steps",
type=int,
default=1,
help="The number of steps to run for each environment per update (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)")
parser_ppo = sub_parsers.add_parser("PPO", help="PPO algorithm")
parser_ppo.add_argument(
"-b",
"--batch_size",
type=int,
default=8,
help="Minibatch size")
parser.add_argument("--board_size", type=int, default=5, help="Board size")
parser.add_argument("--cube_layer", type=int, default=3, help="Cube layer")
parser.add_argument("-op",
"--opponent_policy",
type=ClassicalPolicy.from_string,
default=ClassicalPolicy.random,
choices=list(ClassicalPolicy), help="The policy of the opponent")
parser.add_argument(
"--max_depth",
type=int,
default=3,
help="The max depth of minimax.")
parser.add_argument(
"-e",
"--epoch_num",
type=int,
default=5,
help="Number of epochs to train")
parser.add_argument(
"-t",
"--timesteps_per_epoch",
type=int,
default=200000)
parser.add_argument(
"--eval_episode_num",
type=int,
default=20,
help="Number of episodes to evaluate per epoch, each episode num is also the seed for the environment.")
parser.add_argument(
"--model_seed",
type=int,
default=9487,
help="Random seed for the sb3 model. This function is currently not supported."
)
parser.add_argument(
"--env_seed",
type=int,
default=9487,
help="Random seed for the environment."
)
parser.add_argument(
"-lr",
"--learning_rate",
type=float,
default=3e-4,
help="Learning rate")
parser.add_argument(
"--num_envs",
type=int,
default=8,
help="Number of environments to run in parallel")
parser.add_argument(
"--illegal_move_reward",
type=float,
default=-1.0,
help="reward for the agent when it makes an illegal move")
parser.add_argument(
"--illegal_move_tolerance",
type=int,
default=10,
help="Number of illegal moves the agent can make before it loses.")
parser.add_argument(
"--checkpoint",
type=str,
default=None,
help="Path to checkpoint to load from")
parser.add_argument(
"--alpha_model_name",
type=str,
default=None,
dest="model_name",
help="model name of the alpha zero model")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
config = dict(args._get_kwargs())
train(config)