Feat!: Added train_gpt_model.py
This breaks any past code as it splits the code into two files. doc: added phoebe_model.pt (trained model for phoebe)
This commit is contained in:
@ -1,63 +1,15 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import mmap
|
||||
import random
|
||||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# Hyperparameters
|
||||
batch_size = 64
|
||||
block_size = 256
|
||||
max_iters = 200
|
||||
learning_rate = 2e-5
|
||||
eval_iters = 100
|
||||
num_embed = 384 # Ensure consistency in naming
|
||||
num_heads = 8
|
||||
num_layers = 8
|
||||
dropout = 0.2
|
||||
|
||||
chars = ""
|
||||
with open("vocab.txt", "r", encoding="utf-8") as f:
|
||||
text = f.read()
|
||||
chars = sorted(list(set(text)))
|
||||
vocab_size = len(chars)
|
||||
|
||||
string_to_int = {ch: i for i, ch in enumerate(chars)}
|
||||
int_to_string = {i: ch for i, ch in enumerate(chars)}
|
||||
|
||||
|
||||
def encode(s):
|
||||
return [string_to_int[c] for c in s]
|
||||
|
||||
|
||||
def decode(lst):
|
||||
return "".join([int_to_string[i] for i in lst])
|
||||
|
||||
|
||||
def get_random_chunk(split):
|
||||
filename = "train_split.txt" if split == "train" else "eval_split.txt"
|
||||
with open(filename, "rb") as f:
|
||||
with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm:
|
||||
file_size = len(mm)
|
||||
start = random.randint(0, file_size - block_size * batch_size)
|
||||
mm.seek(start)
|
||||
block = mm.read(block_size * batch_size - 1)
|
||||
decoded_block = block.decode("utf-8", errors="ignore").replace(
|
||||
"\r", ""
|
||||
)
|
||||
data = torch.tensor(encode(decoded_block), dtype=torch.long)
|
||||
return data
|
||||
|
||||
|
||||
def get_batch(split):
|
||||
data = get_random_chunk(split)
|
||||
ix = torch.randint(len(data) - block_size, (batch_size,))
|
||||
x = torch.stack([data[i : i + block_size] for i in ix])
|
||||
y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix])
|
||||
x, y = x.to(device), y.to(device)
|
||||
return x, y
|
||||
|
||||
|
||||
class Head(nn.Module):
|
||||
def __init__(self, head_size):
|
||||
@ -128,7 +80,7 @@ class Block(nn.Module):
|
||||
|
||||
|
||||
class GPT(nn.Module):
|
||||
def __init__(self):
|
||||
def __init__(self, vocab_size):
|
||||
super().__init__()
|
||||
self.token_embedding_table = nn.Embedding(vocab_size, num_embed)
|
||||
self.position_embedding_table = nn.Embedding(block_size, num_embed)
|
||||
@ -170,7 +122,7 @@ class GPT(nn.Module):
|
||||
def generate(self, idx, max_new_tokens):
|
||||
for _ in range(max_new_tokens):
|
||||
idx_cond = idx[:, -block_size:]
|
||||
logits, loss = self(idx_cond)
|
||||
logits, _ = self(idx_cond)
|
||||
logits = logits[:, -1, :]
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
idx_next = torch.multinomial(probs, num_samples=1)
|
||||
@ -178,39 +130,9 @@ class GPT(nn.Module):
|
||||
return idx
|
||||
|
||||
|
||||
model = GPT().to(device)
|
||||
def encode(s, string_to_int):
|
||||
return [string_to_int[c] for c in s]
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def estimate_loss():
|
||||
out = {}
|
||||
model.eval()
|
||||
for split in ["train", "val"]:
|
||||
losses = torch.zeros(eval_iters)
|
||||
for k in range(eval_iters):
|
||||
X, Y = get_batch(split)
|
||||
logits, loss = model(X, Y)
|
||||
losses[k] = loss.item()
|
||||
out[split] = losses.mean().item()
|
||||
model.train()
|
||||
return out
|
||||
|
||||
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
|
||||
for iter in range(max_iters):
|
||||
if iter % eval_iters == 0:
|
||||
losses = estimate_loss()
|
||||
print(
|
||||
f"step {iter}: train loss {losses['train']:.3f}, "
|
||||
f"val loss {losses['val']:.3f}"
|
||||
)
|
||||
xb, yb = get_batch("train")
|
||||
logits, loss = model(xb, yb)
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
print(loss.item())
|
||||
|
||||
torch.save(model.state_dict(), "phoebe_model.pt")
|
||||
print("Model Saved!")
|
||||
def decode(lst, int_to_string):
|
||||
return "".join([int_to_string[i] for i in lst])
|
||||
|
93
phoebe/neural/train_gpt_model.py
Normal file
93
phoebe/neural/train_gpt_model.py
Normal file
@ -0,0 +1,93 @@
|
||||
import torch
|
||||
import mmap
|
||||
import random
|
||||
from gpt_model import GPT, encode
|
||||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# Hyperparameters
|
||||
batch_size = 64
|
||||
block_size = 256
|
||||
max_iters = 200
|
||||
learning_rate = 2e-5
|
||||
eval_iters = 100
|
||||
dropout = 0.2
|
||||
|
||||
chars = ""
|
||||
with open("vocab.txt", "r", encoding="utf-8") as f:
|
||||
text = f.read()
|
||||
chars = sorted(list(set(text)))
|
||||
|
||||
# Ensure that space and other special characters are included
|
||||
required_chars = " \n\r\t"
|
||||
for char in required_chars:
|
||||
if char not in chars:
|
||||
chars.append(char)
|
||||
|
||||
vocab_size = len(chars)
|
||||
string_to_int = {ch: i for i, ch in enumerate(chars)}
|
||||
int_to_string = {i: ch for i, ch in enumerate(chars)}
|
||||
|
||||
|
||||
def get_random_chunk(split):
|
||||
filename = "train_split.txt" if split == "train" else "eval_split.txt"
|
||||
with open(filename, "rb") as f:
|
||||
with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm:
|
||||
file_size = len(mm)
|
||||
start = random.randint(0, file_size - block_size * batch_size)
|
||||
mm.seek(start)
|
||||
block = mm.read(block_size * batch_size - 1)
|
||||
decoded_block = block.decode("utf-8", errors="ignore").replace(
|
||||
"\r", ""
|
||||
)
|
||||
data = torch.tensor(
|
||||
encode(decoded_block, string_to_int), dtype=torch.long
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
def get_batch(split):
|
||||
data = get_random_chunk(split)
|
||||
ix = torch.randint(len(data) - block_size, (batch_size,))
|
||||
x = torch.stack([data[i : i + block_size] for i in ix])
|
||||
y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix])
|
||||
x, y = x.to(device), y.to(device)
|
||||
return x, y
|
||||
|
||||
|
||||
model = GPT(vocab_size).to(device)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def estimate_loss():
|
||||
out = {}
|
||||
model.eval()
|
||||
for split in ["train", "val"]:
|
||||
losses = torch.zeros(eval_iters)
|
||||
for k in range(eval_iters):
|
||||
X, Y = get_batch(split)
|
||||
logits, loss = model(X, Y)
|
||||
losses[k] = loss.item()
|
||||
out[split] = losses.mean().item()
|
||||
model.train()
|
||||
return out
|
||||
|
||||
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
|
||||
for iter in range(max_iters):
|
||||
if iter % eval_iters == 0:
|
||||
losses = estimate_loss()
|
||||
print(
|
||||
f"step {iter}: train loss {losses['train']:.3f}, "
|
||||
f"val loss {losses['val']:.3f}"
|
||||
)
|
||||
xb, yb = get_batch("train")
|
||||
logits, loss = model(xb, yb)
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
print(loss.item())
|
||||
|
||||
torch.save(model.state_dict(), "phoebe_model.pt")
|
||||
print("Model Saved!")
|
BIN
phoebe_model.pt
Normal file
BIN
phoebe_model.pt
Normal file
Binary file not shown.
Reference in New Issue
Block a user