-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathagent.py
More file actions
80 lines (62 loc) · 2.76 KB
/
agent.py
File metadata and controls
80 lines (62 loc) · 2.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import torch as T
import numpy as np
import matplotlib.pyplot as plt
from CNN import CNN
import pickle
from replay_memory import ReplayMemory
class Agent:
def __init__(self, gamma=0.99, epsilon = 1, input_dims = [240, 256, 3], batch_size=8, n_actions=12,
max_mem_size=1000, eps_end=0.05, eps_dec=5e-4, lr= 0.001, device=None):
self.gamma = gamma
self.epsilon = epsilon
self.eps_min = eps_end
self.eps_dec = eps_dec
self.lr = lr
self.action_space = [i for i in range(n_actions)]
self.replay_memory = ReplayMemory(max_mem_size, input_dims, batch_size)
self.batch_size = batch_size
self.device = device
self.Q_eval = CNN(action_size=n_actions, learning_rate=lr, device=self.device)
self.loss_history = []
def save_memory(self):
T.save(self.Q_eval.state_dict(), 'CNN_model.pth')
with open("agent.pkl", "wb") as f:
pickle.dump(self, f)
self.replay_memory.save()
def load_memory(self):
self.Q_eval.load_state_dict(T.load('CNN_model.pth', map_location=self.device))
self.replay_memory = self.replay_memory.load()
def choose_action(self, observation):
if np.random.random() > self.epsilon:
observation = np.array(observation)
state = T.tensor(np.array([observation])).to(self.Q_eval.device)
actions = self.Q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def play(self, observation):
observation = np.array(observation)
state = T.tensor(np.array([observation])).to(self.Q_eval.device)
actions = self.Q_eval.forward(state)
action = T.argmax(actions).item()
return action
def plot_loss(self):
plt.plot(self.loss_history)
plt.show()
def learn(self):
if self.replay_memory.mem_cntr < self.batch_size:
return
self.Q_eval.optimizer.zero_grad()
state_batch, new_state_batch, action_batch, reward_batch, terminal_batch = self.replay_memory.sample_buffer(self.device)
batch_index = np.arange(self.batch_size, dtype=np.int32)
q_eval = self.Q_eval.forward(state_batch)[batch_index, action_batch]
q_next = self.Q_eval.forward(new_state_batch)
q_next[terminal_batch] = 0.0
q_target = reward_batch + self.gamma*T.max(q_next, dim=1)[0]
loss = self.Q_eval.loss(q_target, q_eval).to(self.Q_eval.device)
self.loss_history.append(loss.item())
loss.backward()
self.Q_eval.optimizer.step()
self.epsilon = self.epsilon - self.eps_dec \
if self.epsilon > self.eps_min else self.eps_min