-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtrainer.py
More file actions
61 lines (44 loc) · 1.65 KB
/
trainer.py
File metadata and controls
61 lines (44 loc) · 1.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import numpy as np
import torch
from gym.wrappers import Monitor
from dqn import Agent, DQN, FloatTensor, ComposedDQN, get_action
from gym_repoman.envs import CollectEnv
from wrappers import WarpFrame
def video_callable(episode_id):
return episode_id > 1 and episode_id % 500 == 0
def train(path, env):
env = Monitor(env, path, video_callable=video_callable, force=True)
agent = Agent(env)
agent.train()
return agent
def save(path, agent):
torch.save(agent.q_func.state_dict(), path)
def load(path, env):
dqn = DQN(env.action_space.n)
dqn.load_state_dict(torch.load(path))
return dqn
def enjoy(dqn, env, timesteps):
obs = env.reset()
env.render()
for _ in range(timesteps):
obs = np.array(obs)
obs = torch.from_numpy(obs).type(FloatTensor).unsqueeze(0)
# Use volatile = True if variable is only used in inference mode, i.e. don’t save the history
#weights = nn.Softmax()(dqn(Variable(obs, volatile=True)))
#action = torch.multinomial(weights, 1, replacement=True).data[0][0]
action = get_action(dqn, obs)
obs, reward, done, _ = env.step(action)
env.render()
if done:
obs = env.reset()
env.render()
def compose(dqns, weights):
return ComposedDQN(dqns, weights)
def learn(colour, shape, condition):
name = colour + shape
base_path = './models/{}/'.format(name)
env = WarpFrame(CollectEnv(goal_condition=condition))
agent = train(base_path + 'results', env)
save(base_path + 'model.dqn', agent)
if __name__ == '__main__':
learn('purple', 'circle', lambda x: x.colour == 'purple' and x.shape == 'circle')