diff --git a/phoebe/neural/gpt_model.py b/phoebe/neural/gpt_model.py index 5d7c583..1b46fcf 100644 --- a/phoebe/neural/gpt_model.py +++ b/phoebe/neural/gpt_model.py @@ -1,63 +1,15 @@ import torch import torch.nn as nn import torch.nn.functional as F -import mmap -import random - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Hyperparameters batch_size = 64 block_size = 256 -max_iters = 200 -learning_rate = 2e-5 -eval_iters = 100 num_embed = 384 # Ensure consistency in naming num_heads = 8 num_layers = 8 dropout = 0.2 -chars = "" -with open("vocab.txt", "r", encoding="utf-8") as f: - text = f.read() - chars = sorted(list(set(text))) -vocab_size = len(chars) - -string_to_int = {ch: i for i, ch in enumerate(chars)} -int_to_string = {i: ch for i, ch in enumerate(chars)} - - -def encode(s): - return [string_to_int[c] for c in s] - - -def decode(lst): - return "".join([int_to_string[i] for i in lst]) - - -def get_random_chunk(split): - filename = "train_split.txt" if split == "train" else "eval_split.txt" - with open(filename, "rb") as f: - with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm: - file_size = len(mm) - start = random.randint(0, file_size - block_size * batch_size) - mm.seek(start) - block = mm.read(block_size * batch_size - 1) - decoded_block = block.decode("utf-8", errors="ignore").replace( - "\r", "" - ) - data = torch.tensor(encode(decoded_block), dtype=torch.long) - return data - - -def get_batch(split): - data = get_random_chunk(split) - ix = torch.randint(len(data) - block_size, (batch_size,)) - x = torch.stack([data[i : i + block_size] for i in ix]) - y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix]) - x, y = x.to(device), y.to(device) - return x, y - class Head(nn.Module): def __init__(self, head_size): @@ -128,7 +80,7 @@ class Block(nn.Module): class GPT(nn.Module): - def __init__(self): + def __init__(self, vocab_size): super().__init__() self.token_embedding_table = nn.Embedding(vocab_size, num_embed) self.position_embedding_table = nn.Embedding(block_size, num_embed) @@ -170,7 +122,7 @@ class GPT(nn.Module): def generate(self, idx, max_new_tokens): for _ in range(max_new_tokens): idx_cond = idx[:, -block_size:] - logits, loss = self(idx_cond) + logits, _ = self(idx_cond) logits = logits[:, -1, :] probs = F.softmax(logits, dim=-1) idx_next = torch.multinomial(probs, num_samples=1) @@ -178,39 +130,9 @@ class GPT(nn.Module): return idx -model = GPT().to(device) +def encode(s, string_to_int): + return [string_to_int[c] for c in s] -@torch.no_grad() -def estimate_loss(): - out = {} - model.eval() - for split in ["train", "val"]: - losses = torch.zeros(eval_iters) - for k in range(eval_iters): - X, Y = get_batch(split) - logits, loss = model(X, Y) - losses[k] = loss.item() - out[split] = losses.mean().item() - model.train() - return out - - -optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) -for iter in range(max_iters): - if iter % eval_iters == 0: - losses = estimate_loss() - print( - f"step {iter}: train loss {losses['train']:.3f}, " - f"val loss {losses['val']:.3f}" - ) - xb, yb = get_batch("train") - logits, loss = model(xb, yb) - optimizer.zero_grad(set_to_none=True) - loss.backward() - optimizer.step() - -print(loss.item()) - -torch.save(model.state_dict(), "phoebe_model.pt") -print("Model Saved!") +def decode(lst, int_to_string): + return "".join([int_to_string[i] for i in lst]) diff --git a/phoebe/neural/train_gpt_model.py b/phoebe/neural/train_gpt_model.py new file mode 100644 index 0000000..958d1e9 --- /dev/null +++ b/phoebe/neural/train_gpt_model.py @@ -0,0 +1,93 @@ +import torch +import mmap +import random +from gpt_model import GPT, encode + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# Hyperparameters +batch_size = 64 +block_size = 256 +max_iters = 200 +learning_rate = 2e-5 +eval_iters = 100 +dropout = 0.2 + +chars = "" +with open("vocab.txt", "r", encoding="utf-8") as f: + text = f.read() + chars = sorted(list(set(text))) + +# Ensure that space and other special characters are included +required_chars = " \n\r\t" +for char in required_chars: + if char not in chars: + chars.append(char) + +vocab_size = len(chars) +string_to_int = {ch: i for i, ch in enumerate(chars)} +int_to_string = {i: ch for i, ch in enumerate(chars)} + + +def get_random_chunk(split): + filename = "train_split.txt" if split == "train" else "eval_split.txt" + with open(filename, "rb") as f: + with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm: + file_size = len(mm) + start = random.randint(0, file_size - block_size * batch_size) + mm.seek(start) + block = mm.read(block_size * batch_size - 1) + decoded_block = block.decode("utf-8", errors="ignore").replace( + "\r", "" + ) + data = torch.tensor( + encode(decoded_block, string_to_int), dtype=torch.long + ) + return data + + +def get_batch(split): + data = get_random_chunk(split) + ix = torch.randint(len(data) - block_size, (batch_size,)) + x = torch.stack([data[i : i + block_size] for i in ix]) + y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix]) + x, y = x.to(device), y.to(device) + return x, y + + +model = GPT(vocab_size).to(device) + + +@torch.no_grad() +def estimate_loss(): + out = {} + model.eval() + for split in ["train", "val"]: + losses = torch.zeros(eval_iters) + for k in range(eval_iters): + X, Y = get_batch(split) + logits, loss = model(X, Y) + losses[k] = loss.item() + out[split] = losses.mean().item() + model.train() + return out + + +optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) +for iter in range(max_iters): + if iter % eval_iters == 0: + losses = estimate_loss() + print( + f"step {iter}: train loss {losses['train']:.3f}, " + f"val loss {losses['val']:.3f}" + ) + xb, yb = get_batch("train") + logits, loss = model(xb, yb) + optimizer.zero_grad(set_to_none=True) + loss.backward() + optimizer.step() + +print(loss.item()) + +torch.save(model.state_dict(), "phoebe_model.pt") +print("Model Saved!") diff --git a/phoebe_model.pt b/phoebe_model.pt new file mode 100644 index 0000000..9388979 Binary files /dev/null and b/phoebe_model.pt differ