-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodel.py
212 lines (183 loc) · 8.59 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
"""
Model definition. To use, check out train.py and sample.py. I have written
these files without looking at NanoGPT, but I did try to follow the same
class interfaces. Most notably, GPTConfig and GPT are imported from this
module and then used to train and sample from the trained model.
"""
from dataclasses import dataclass
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
# Copied CausalSelfAttention from nanogpt for its speed. Got this working with
# my previous bigram implementation from the course, but it was too slow since:
#
# 1. It used separate k,v,q tensors for each head instead of 1
# 2. It did not take advantage of flash attention (scaled_dot_product_attention)
class CausalSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
# regularization
self.attn_dropout = nn.Dropout(config.dropout)
self.resid_dropout = nn.Dropout(config.dropout)
self.n_head = config.n_head
self.n_embd = config.n_embd
self.dropout = config.dropout
# flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
if not self.flash:
print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
def forward(self, x):
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
if self.flash:
# efficient attention using Flash Attention CUDA kernels
y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
else:
# manual implementation of attention
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_dropout(self.c_proj(y))
return y
class Block(nn.Module):
def __init__(self, config):
super().__init__()
n_embd = config.n_embd
bias = config.bias
self.ln1 = nn.LayerNorm(n_embd, bias=bias)
self.attention = CausalSelfAttention(config)
self.ln2 = nn.LayerNorm(n_embd)
self.ffwd = FeedForward(config)
def forward(self, x):
# note: "x +" represents a residual connection
# you will need projection layers in the attention
# and ffwd blocks to learn whether this identity
# flow-through gradient is better in the context of
# the training data!
x = x + self.attention(self.ln1(x))
x = x + self.ffwd(self.ln2(x))
return x
class FeedForward(nn.Module):
def __init__(self, config):
super().__init__()
n_embd = config.n_embd
# core ffwd
self.ffwd_linear = nn.Linear(n_embd, 4*n_embd, bias=config.bias) # 4 * per GPT-1 paper specs of inner dimension
self.gelu = nn.GELU()
# learned residual projection
self.proj_linear = nn.Linear(4*n_embd, n_embd, bias=config.bias)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x):
x = self.ffwd_linear(x)
x = self.gelu(x)
x = self.proj_linear(x)
x = self.dropout(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
n_embd = config.n_embd
bias = config.bias
self.ln1 = nn.LayerNorm(n_embd, bias=bias)
self.attention = CausalSelfAttention(config)
self.ln2 = nn.LayerNorm(n_embd)
self.ffwd = FeedForward(config)
def forward(self, x):
x = x + self.attention(self.ln1(x))
x = x + self.ffwd(self.ln2(x))
return x
# largely copied from nanogpt
@dataclass
class GPTConfig:
device: str = "cpu" # cpu or cuda
block_size: int = 1024 # context length
vocab_size: int = 50257
n_layer: int = 12
n_head: int = 12
n_embd: int = 768
dropout: float = 0.0
bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
n_embd = config.n_embd
block_size = config.block_size
vocab_size = config.vocab_size
n_layer = config.n_layer
dropout = config.dropout
bias = config.bias
self.device = config.device
self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
self.position_embedding_table = nn.Embedding(block_size, n_embd)
self.embedding_dropout = nn.Dropout(dropout)
self.blocks = nn.Sequential(*[Block(config) for _ in range(n_layer)])
self.lm_head = nn.Linear(n_embd, vocab_size)
self.ln_f = nn.LayerNorm(n_embd, bias=bias) # final layer norm
# weight sharing between the position embedding and language model head
# ^ used in attention is all you need paper (section 3.4)
#
# personally, I still don't understand why this is better. semantically, I
# guess the output of the head will encode the same information about a given
# token as the token embedding table, but it's interesting that it actually
# works. I need to read the paper.
self.token_embedding_table.weight = self.lm_head.weight
# normalize weights with a std of 0.02 (per gpt-2 paper)
# note that this is roughly (but not exactly) equivalent to initializing
# to the sqrt(fan_in)
self.apply(self.__init_weights)
def __init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal(module.weight, mean=0.0, std=0.02)
def forward(self, idx, targets=None):
B,T = idx.shape
# idx and targets are both (B,T) tensor of integers
tok_emb = self.token_embedding_table(idx) # (B,T,C)
pos_embd = self.position_embedding_table(torch.arange(T, device=self.device)) # (T, C)
x = self.embedding_dropout(tok_emb + pos_embd) # (B,T,C)
x = self.blocks(x) # (B,T,C)
x = self.ln_f(x) # (B,T,C)
logits = self.lm_head(x) # (B,T,vocab_size)
if targets is None:
loss = None
else:
# conform to what pytorch expects the matrix dims to be
B, T, C = logits.shape
logits = logits.view(B*T, C)
targets = targets.view(B*T)
loss = F.cross_entropy(logits, targets)
return logits, loss
def generate(self, idx, max_new_tokens):
for _ in range(max_new_tokens):
# get the predictions
logits, loss = self.forward(idx)
# focus only on the last time step
logits = logits[:, -1, :] # becomes (B,C)
# apply softmax to get probabilities
probs = F.softmax(logits, dim=-1) # (B,C)
# sample from the distribution
idx_next = torch.multinomial(probs, num_samples=1) # (B,1)
# append sampled index to the running sequence
idx = torch.cat((idx, idx_next), dim=1) # (B,T+1)
return idx