-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtraining.py
More file actions
272 lines (226 loc) · 9.42 KB
/
training.py
File metadata and controls
272 lines (226 loc) · 9.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
# imports
import torch
import torch.nn as nn
from torch.nn import functional as F
import random
import mmap
import pickle
import argparse
# defining hyperparameters
block_size = 32
batch_size = 128
max_iters = 3000
learning_rate = 2e-5
eval_iters = 100
eval_interval = 100
n_embd = 384 # how long the embedded vector will be
n_layer = 8 # number of decoder blocks
n_head = 8
dropout = 0.2 # dropout random neurons at a rate of 20% so that the model doesnt overfit
# implementing parallelism
device = 'cuda' if torch.cuda.is_available() else 'cpu' #using mps due to apple silicon restrictions
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") # using Metal Performance Shaders(mps) for apple silicon devices
print(device)
# Adding the argparser to parse arguments
# parser = argparse.ArgumentParser(description='demo')
# parser.add_argument('-batch_size', type=str, required=True, help='Please provide a batch size')
# args = parser.parse_args()
# # now argument value is usable in our program
# print(f'batch size: {args.batch_size}')
# batch_size = args.batch_size
chars = ""
# Read the vocab file and collect characters
with open("data/vocab.txt", 'r', encoding='utf-8') as f:
text = f.read()
chars = sorted(list(set(text)))
# Ensure common special characters are included
# If you expect any additional special characters, add them here.
additional_chars = [' ', '\n', '\t']
# Merge additional characters with those found in vocab.txt
for char in additional_chars:
if char not in chars:
chars.append(char)
# Sort the characters list again after adding new characters due to errors
chars = sorted(chars)
vocab_size = len(chars)
print(chars)
string_to_int = { ch:i for i,ch in enumerate(chars) }
int_to_string = { i:ch for i,ch in enumerate(chars) }
encode = lambda s: [string_to_int[c] for c in s]
decode = lambda l: ''.join([int_to_string[i] for i in l])
# memory map for using small snippets of text from a single file of any size
def get_random_chunk(split):
filename = "data/output_train_0.txt" if split == 'train' else "data/output_val_0.txt"
with open(filename, 'rb') as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
# Determine the file size and a random position to start reading
file_size = len(mm)
start_pos = random.randint(0, (file_size) - block_size*batch_size)
# Seek to the random position and read the block of text
mm.seek(start_pos)
block = mm.read(block_size*batch_size-1)
# Decode the block to a string, ignoring any invalid byte sequences
decoded_block = block.decode(
'utf-8', errors='ignore').replace('\r', '')
# Train and test splits
data = torch.tensor(encode(decoded_block), dtype=torch.long)
return data
def get_batch(split):
data = get_random_chunk(split)
ix = torch.randint(len(data) - block_size, (batch_size,))
x = torch.stack([data[i:i+block_size] for i in ix])
y = torch.stack([data[i+1:i+block_size+1] for i in ix])
x, y = x.to(device), y.to(device)
return x, y
class Head(nn.Module):
""" one head of self-attention """
def __init__(self, head_size):
super().__init__()
self.key = nn.Linear(n_embd, head_size, bias=False)
self.query = nn.Linear(n_embd, head_size, bias=False)
self.value = nn.Linear(n_embd, head_size, bias=False)
self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# input of size (batch, time-step, channels)
# output of size (batch, time-step, head size)
B,T,C = x.shape
k = self.key(x) # (B,T,hs)
q = self.query(x) # (B,T,hs)
# compute attention scores ("affinities")
wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)
wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
wei = F.softmax(wei, dim=-1) # (B, T, T)
wei = self.dropout(wei)
# perform the weighted aggregation of the values
v = self.value(x) # (B,T,hs)
out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)
return out
class MultiHeadAttention(nn.Module):
""" multiple heads of self-attention in parallel """
def __init__(self, num_heads, head_size):
super().__init__()
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
self.proj = nn.Linear(head_size * num_heads, n_embd)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
out = torch.cat([h(x) for h in self.heads], dim=-1) # (B, T, F) -> (B, T, [h1, h1, h1, h1, h2, h2, h2, h2, h3, h3, h3, h3])
out = self.dropout(self.proj(out))
return out
class FeedFoward(nn.Module):
""" a simple linear layer followed by a non-linearity """
def __init__(self, n_embd):
super().__init__()
self.net = nn.Sequential(
nn.Linear(n_embd, 4 * n_embd),
nn.ReLU(),
nn.Linear(4 * n_embd, n_embd),
nn.Dropout(dropout),
)
def forward(self, x):
return self.net(x)
class Block(nn.Module):
""" Transformer block: communication followed by computation """
def __init__(self, n_embd, n_head):
# n_embd: embedding dimension, n_head: the number of heads we'd like
super().__init__()
head_size = n_embd // n_head
self.sa = MultiHeadAttention(n_head, head_size)
self.ffwd = FeedFoward(n_embd)
self.ln1 = nn.LayerNorm(n_embd)
self.ln2 = nn.LayerNorm(n_embd)
def forward(self, x):
y = self.sa(x)
x = self.ln1(x + y)
y = self.ffwd(x)
x = self.ln2(x + y)
return x
class GPTLanguageModel(nn.Module):
def __init__(self, vocab_size):
super().__init__()
self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
self.position_embedding_table = nn.Embedding(
block_size, n_embd) # Adding positional embedding
# Number of decoder blocks we have running sequentially
self.blocks = nn.Sequential(
*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
self.ln_f = nn.LayerNorm(n_embd) # final layer normalization
# transforming the decoder output to make it workable with softmax
self.lm_head = nn.Linear(n_embd, vocab_size)
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, index, targets=None):
B, T = index.shape
# idx and targets are both (B, T) tensor of integers
tok_emb = self.token_embedding_table(index) # (B, T, C)
pos_emb = self.position_embedding_table(
torch.arange(T, device=device)) # (T, C)
x = tok_emb + pos_emb # (B, T, C)
x = self.blocks(x) # (B, T, C)
logits = self.lm_head(x) # (B, T, vocab_size)
B, T, C = logits.shape
if targets is None:
loss = None
else:
logits = logits.view(B*T, C)
targets = targets.view(B*T)
loss = F.cross_entropy(logits, targets)
return logits, loss # Return both logits and loss
def generate(self, index, max_new_tokens):
# index is (B, T) array of indices in the current context
for _ in range(max_new_tokens):
# get the predictions
logits, _ = self.forward(index) # Unpack logits and ignore loss
# focus only on the last time step
logits = logits[:, -1, :] # becomes (B, C)
# apply softmax to get probabilities
probs = F.softmax(logits, dim=-1) # (B, C)
# sample from the distribution
index_next = torch.multinomial(probs, num_samples=1) # (B, 1)
# append sampled index to the running sequence
index = torch.cat((index, index_next), dim=1) # (B, T+1)
return index
model = GPTLanguageModel(vocab_size)
print('loading model parameters...')
with open('model-01.pkl', 'rb') as f:
model = pickle.load(f)
print('Loaded successfully')
m = model.to(device)
@torch.no_grad()
def estimate_loss():
out = {}
model.eval()
for split in ['train', 'val']:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
X, Y = get_batch(split)
logits, loss = model(X, Y)
losses[k] = loss.item()
out[split] = losses.mean()
model.train()
return out
# create a PyTorch optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
for iter in range(max_iters):
print(iter)
if iter % eval_iters == 0:
losses = estimate_loss()
print(f"step: {iter}, train loss: {losses['train']:.3f}, val loss: {losses['val']:.3f}")
# sample a batch of data
xb, yb = get_batch('train')
# evaluate the loss
logits, loss = model.forward(xb, yb)
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
print(loss.item())
# saving the trained model
with open('model-01.pkl', 'wb') as f:
pickle.dump(model, f)
print('model saved')