-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path3-A2C.py
More file actions
147 lines (113 loc) · 3.56 KB
/
3-A2C.py
File metadata and controls
147 lines (113 loc) · 3.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import numpy as np
import time
import random
import gym
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
env = gym.make('InvertedPendulum-v2')
# import mujoco_py
test_env = False
if test_env:
env.reset()
for _ in range(1000):
env.render()
env.step(env.action_space.sample()) # take random actions
env.close()
print('Observation Shape:', env.observation_space.shape, '\nAction Shape:', env.action_space)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Code is running on:", device)
############ PARAMETERS ####################
N_OBS = env.observation_space.shape[0]
N_ACT = env.action_space.shape[0]
N_EPISODE = 1500
LEARNING_RATE = 0.001
DISCOUNT = 0.99
############### Network for A2C ####################3
class ACNet(nn.Module):
def __init__(self, observations, actions):
super(ACNet, self).__init__()
self.actor = nn.Sequential(
nn.Linear(observations, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU()
)
self.mu = nn.Linear(16, actions)
self.sigma = nn.Linear(16, actions)
self.critic = nn.Sequential(
nn.Linear(observations, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 1)
)
def forward(self, x):
act = self.actor(x)
mean = self.mu(act)
std = F.softplus(self.sigma(act))
value = self.critic(x)
# value = self.value(crt)
return mean, std, value
ac_network = ACNet(N_OBS, N_ACT).to(device)
optimizer = optim.Adam(ac_network.parameters())
writer = SummaryWriter('run/using_tensorboard')
def choose_action(state):
mu, sigma, value = ac_network(state)
m = torch.distributions.Normal(mu, sigma)
action = m.sample()
log_prob = m.log_prob(action)
return action.detach().cpu().numpy(), log_prob, value
def compute_returns(next_state, rewards, done, discount = DISCOUNT):
next_state = torch.FloatTensor(next_state).to(device)
_, _, next_q_val = ac_network(next_state)
returns = []
for step in reversed(range(len(rewards))):
next_q_val = rewards[step] + discount*next_q_val*(1-done[step])
returns.append(next_q_val)
returns.reverse()
return returns
def ACupdate(log_probs, q_vals, values):
optimizer.zero_grad()
ac_loss = 0
advantage = q_vals - values
# print(-(log_probs*advantage).sum())
actor_loss = -(log_probs*advantage.detach()).mean()
critic_loss = advantage.pow(2).mean()
ac_loss = actor_loss+critic_loss
ac_loss.backward()
optimizer.step()
return ac_loss.item()
for i in range(1, N_EPISODE+1):
ep_rewards = []
log_probs = []
done_states = []
total_reward = 0
done = False
values = []
state = env.reset()
while not done:
state = torch.FloatTensor(state).to(device)
action, log_prob, value = choose_action(state)
next_state, reward, done, info = env.step(action)
done = torch.tensor([done], dtype = torch.float, device = device)
ep_rewards.append(torch.tensor([reward], dtype = torch.float, device = device))
log_probs.append(log_prob)
done_states.append(done)
values.append(value)
total_reward += reward
state = next_state
q_vals = compute_returns(next_state, ep_rewards, done_states)
q_vals = torch.stack(q_vals)
values = torch.stack(values)
log_probs = torch.stack(log_probs)
loss = ACupdate(log_probs, q_vals, values)
writer.add_scalar('Attr/Training loss', loss, i)
writer.add_scalar('Attr/Episode reward', total_reward, i)
print('Episode Trained:', i)
if i%1000 == 0:
torch.save(ac_network.state_dict(), 'Models/ACNet_'+str(i)+'.pth')
print('Model Saved')
print('Done Training')