feat: Added GPT Model Code
Fix: Changed .pre-commit-confit.yaml to stop conflicts docs: README.md changed due to the pre-commits
This commit is contained in:
@ -1,11 +1,19 @@
|
|||||||
repos:
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v3.4.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
|
||||||
- repo: https://github.com/psf/black
|
- repo: https://github.com/psf/black
|
||||||
rev: 24.4.2
|
rev: 22.3.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
language_version: python3.10.6
|
args: [--line-length=79]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/flake8
|
- repo: https://github.com/pycqa/flake8
|
||||||
rev: 7.0.0 # Use the latest revision
|
rev: 4.0.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
|
args: [--max-line-length=79, --ignore=E203]
|
||||||
|
@ -2,4 +2,4 @@
|
|||||||

|

|
||||||
|
|
||||||
# About Me
|
# About Me
|
||||||
Hi there! My name is Phoebe! I am a 20 year old college student who is currently working on my degree in Machine Learning. I am a bit of a shy gal, and like to obverse everyone from the distance. My best friend is Daniel (@advtech as he goes by on Discord). I am looking forward to getting to know you!
|
Hi there! My name is Phoebe! I am a 20 year old college student who is currently working on my degree in Machine Learning. I am a bit of a shy gal, and like to obverse everyone from the distance. My best friend is Daniel (@advtech as he goes by on Discord). I am looking forward to getting to know you!
|
||||||
|
216
phoebe/neural/gpt_model.py
Normal file
216
phoebe/neural/gpt_model.py
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
import mmap
|
||||||
|
import random
|
||||||
|
|
||||||
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||||
|
|
||||||
|
# Hyperparameters
|
||||||
|
batch_size = 64
|
||||||
|
block_size = 256
|
||||||
|
max_iters = 200
|
||||||
|
learning_rate = 2e-5
|
||||||
|
eval_iters = 100
|
||||||
|
num_embed = 384 # Ensure consistency in naming
|
||||||
|
num_heads = 8
|
||||||
|
num_layers = 8
|
||||||
|
dropout = 0.2
|
||||||
|
|
||||||
|
chars = ""
|
||||||
|
with open("vocab.txt", "r", encoding="utf-8") as f:
|
||||||
|
text = f.read()
|
||||||
|
chars = sorted(list(set(text)))
|
||||||
|
vocab_size = len(chars)
|
||||||
|
|
||||||
|
string_to_int = {ch: i for i, ch in enumerate(chars)}
|
||||||
|
int_to_string = {i: ch for i, ch in enumerate(chars)}
|
||||||
|
|
||||||
|
|
||||||
|
def encode(s):
|
||||||
|
return [string_to_int[c] for c in s]
|
||||||
|
|
||||||
|
|
||||||
|
def decode(lst):
|
||||||
|
return "".join([int_to_string[i] for i in lst])
|
||||||
|
|
||||||
|
|
||||||
|
def get_random_chunk(split):
|
||||||
|
filename = "train_split.txt" if split == "train" else "eval_split.txt"
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm:
|
||||||
|
file_size = len(mm)
|
||||||
|
start = random.randint(0, file_size - block_size * batch_size)
|
||||||
|
mm.seek(start)
|
||||||
|
block = mm.read(block_size * batch_size - 1)
|
||||||
|
decoded_block = block.decode("utf-8", errors="ignore").replace(
|
||||||
|
"\r", ""
|
||||||
|
)
|
||||||
|
data = torch.tensor(encode(decoded_block), dtype=torch.long)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch(split):
|
||||||
|
data = get_random_chunk(split)
|
||||||
|
ix = torch.randint(len(data) - block_size, (batch_size,))
|
||||||
|
x = torch.stack([data[i : i + block_size] for i in ix])
|
||||||
|
y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix])
|
||||||
|
x, y = x.to(device), y.to(device)
|
||||||
|
return x, y
|
||||||
|
|
||||||
|
|
||||||
|
class Head(nn.Module):
|
||||||
|
def __init__(self, head_size):
|
||||||
|
super().__init__()
|
||||||
|
self.key = nn.Linear(num_embed, head_size)
|
||||||
|
self.query = nn.Linear(num_embed, head_size)
|
||||||
|
self.value = nn.Linear(num_embed, head_size)
|
||||||
|
self.register_buffer(
|
||||||
|
"tril", torch.tril(torch.ones(block_size, block_size))
|
||||||
|
)
|
||||||
|
self.dropout = nn.Dropout(dropout)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
B, T, C = x.shape
|
||||||
|
k = self.key(x)
|
||||||
|
q = self.query(x)
|
||||||
|
wei = q @ k.transpose(-2, -1) * C**-0.5
|
||||||
|
wei = wei.masked_fill(self.tril[:T, :T] == 0, float("-inf"))
|
||||||
|
wei = F.softmax(wei, dim=-1)
|
||||||
|
wei = self.dropout(wei)
|
||||||
|
v = self.value(x)
|
||||||
|
out = wei @ v
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class MultiHeadAttention(nn.Module):
|
||||||
|
def __init__(self, num_heads, head_size):
|
||||||
|
super().__init__()
|
||||||
|
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
|
||||||
|
self.proj = nn.Linear(num_embed, num_embed)
|
||||||
|
self.dropout = nn.Dropout(dropout)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
out = torch.cat([h(x) for h in self.heads], dim=-1)
|
||||||
|
out = self.dropout(self.proj(out))
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class FeedForward(nn.Module):
|
||||||
|
def __init__(self, num_embed):
|
||||||
|
super().__init__()
|
||||||
|
self.net = nn.Sequential(
|
||||||
|
nn.Linear(num_embed, 4 * num_embed),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(4 * num_embed, num_embed),
|
||||||
|
nn.Dropout(dropout),
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return self.net(x)
|
||||||
|
|
||||||
|
|
||||||
|
class Block(nn.Module):
|
||||||
|
def __init__(self, num_embed, num_head):
|
||||||
|
super().__init__()
|
||||||
|
head_size = num_embed // num_head
|
||||||
|
self.sa = MultiHeadAttention(num_head, head_size)
|
||||||
|
self.ff = FeedForward(num_embed)
|
||||||
|
self.ln1 = nn.LayerNorm(num_embed)
|
||||||
|
self.ln2 = nn.LayerNorm(num_embed)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
y = self.sa(x)
|
||||||
|
x = self.ln1(x + y)
|
||||||
|
y = self.ff(x)
|
||||||
|
x = self.ln2(x + y)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class GPT(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.token_embedding_table = nn.Embedding(vocab_size, num_embed)
|
||||||
|
self.position_embedding_table = nn.Embedding(block_size, num_embed)
|
||||||
|
self.blocks = nn.Sequential(
|
||||||
|
*[Block(num_embed, num_heads) for _ in range(num_layers)]
|
||||||
|
)
|
||||||
|
self.ln = nn.LayerNorm(num_embed)
|
||||||
|
self.lm_head = nn.Linear(num_embed, vocab_size)
|
||||||
|
self.apply(self._init_weights)
|
||||||
|
|
||||||
|
def _init_weights(self, module):
|
||||||
|
if isinstance(module, nn.Linear):
|
||||||
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||||
|
if module.bias is not None:
|
||||||
|
torch.nn.init.zeros_(module.bias)
|
||||||
|
elif isinstance(module, nn.Embedding):
|
||||||
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||||
|
|
||||||
|
def forward(self, idx, targets=None):
|
||||||
|
B, T = idx.shape
|
||||||
|
tok_emb = self.token_embedding_table(idx)
|
||||||
|
pos_emb = self.position_embedding_table(
|
||||||
|
torch.arange(T, device=idx.device)
|
||||||
|
)
|
||||||
|
x = tok_emb + pos_emb
|
||||||
|
x = self.blocks(x)
|
||||||
|
x = self.ln(x)
|
||||||
|
logits = self.lm_head(x)
|
||||||
|
|
||||||
|
if targets is None:
|
||||||
|
loss = None
|
||||||
|
else:
|
||||||
|
B, T, C = logits.shape
|
||||||
|
logits = logits.view(B * T, C)
|
||||||
|
targets = targets.view(B * T)
|
||||||
|
loss = F.cross_entropy(logits, targets)
|
||||||
|
return logits, loss
|
||||||
|
|
||||||
|
def generate(self, idx, max_new_tokens):
|
||||||
|
for _ in range(max_new_tokens):
|
||||||
|
idx_cond = idx[:, -block_size:]
|
||||||
|
logits, loss = self(idx_cond)
|
||||||
|
logits = logits[:, -1, :]
|
||||||
|
probs = F.softmax(logits, dim=-1)
|
||||||
|
idx_next = torch.multinomial(probs, num_samples=1)
|
||||||
|
idx = torch.cat((idx, idx_next), dim=1)
|
||||||
|
return idx
|
||||||
|
|
||||||
|
|
||||||
|
model = GPT().to(device)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def estimate_loss():
|
||||||
|
out = {}
|
||||||
|
model.eval()
|
||||||
|
for split in ["train", "val"]:
|
||||||
|
losses = torch.zeros(eval_iters)
|
||||||
|
for k in range(eval_iters):
|
||||||
|
X, Y = get_batch(split)
|
||||||
|
logits, loss = model(X, Y)
|
||||||
|
losses[k] = loss.item()
|
||||||
|
out[split] = losses.mean().item()
|
||||||
|
model.train()
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
|
||||||
|
for iter in range(max_iters):
|
||||||
|
if iter % eval_iters == 0:
|
||||||
|
losses = estimate_loss()
|
||||||
|
print(
|
||||||
|
f"step {iter}: train loss {losses['train']:.3f}, "
|
||||||
|
f"val loss {losses['val']:.3f}"
|
||||||
|
)
|
||||||
|
xb, yb = get_batch("train")
|
||||||
|
logits, loss = model(xb, yb)
|
||||||
|
optimizer.zero_grad(set_to_none=True)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
print(loss.item())
|
||||||
|
|
||||||
|
torch.save(model.state_dict(), "phoebe_model.pt")
|
||||||
|
print("Model Saved!")
|
Reference in New Issue
Block a user