Feat: Phoebe replies but it's gibbish This is a version break because of the file structure change.
131 lines
3.7 KiB
Python
131 lines
3.7 KiB
Python
import torch
|
|
import mmap
|
|
import random
|
|
from gpt_model import GPT, encode, decode
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
# Hyperparameters
|
|
batch_size = 64
|
|
block_size = 256
|
|
max_iters = 500
|
|
learning_rate = 2e-5
|
|
eval_iters = 250
|
|
dropout = 0.2
|
|
|
|
chars = ""
|
|
with open("vocab.txt", "r", encoding="utf-8") as f:
|
|
text = f.read()
|
|
chars = sorted(list(set(text)))
|
|
|
|
# Ensure that space and other special characters are included
|
|
# Ensure that space and other special characters are included
|
|
required_chars = " \n\r\t"
|
|
for char in required_chars:
|
|
if char not in chars:
|
|
chars.append(char)
|
|
|
|
# Add a special token for unknown characters
|
|
special_token = "<unk>"
|
|
if special_token not in chars:
|
|
chars.append(special_token)
|
|
|
|
vocab_size = len(chars)
|
|
string_to_int = {ch: i for i, ch in enumerate(chars)}
|
|
int_to_string = {i: ch for i, ch in enumerate(chars)}
|
|
|
|
|
|
def get_random_chunk(split):
|
|
filename = "train_split.txt" if split == "train" else "eval_split.txt"
|
|
with open(filename, "rb") as f:
|
|
with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm:
|
|
file_size = len(mm)
|
|
start = random.randint(0, file_size - block_size * batch_size)
|
|
mm.seek(start)
|
|
block = mm.read(block_size * batch_size - 1)
|
|
decoded_block = block.decode("utf-8", errors="ignore").replace(
|
|
"\r", ""
|
|
)
|
|
data = torch.tensor(
|
|
encode(decoded_block, string_to_int), dtype=torch.long
|
|
)
|
|
return data
|
|
|
|
|
|
def get_batch(split):
|
|
data = get_random_chunk(split)
|
|
ix = torch.randint(len(data) - block_size, (batch_size,))
|
|
x = torch.stack([data[i : i + block_size] for i in ix])
|
|
y = torch.stack([data[i + 1 : i + block_size + 1] for i in ix])
|
|
x, y = x.to(device), y.to(device)
|
|
return x, y
|
|
|
|
|
|
model = GPT(vocab_size).to(device)
|
|
|
|
|
|
@torch.no_grad()
|
|
def estimate_loss():
|
|
out = {}
|
|
model.eval()
|
|
for split in ["train", "val"]:
|
|
losses = torch.zeros(eval_iters)
|
|
for k in range(eval_iters):
|
|
X, Y = get_batch(split)
|
|
logits, loss = model(X, Y)
|
|
losses[k] = loss.item()
|
|
out[split] = losses.mean().item()
|
|
model.train()
|
|
return out
|
|
|
|
|
|
def train_model():
|
|
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
|
|
for iter in range(max_iters):
|
|
if iter % eval_iters == 0:
|
|
losses = estimate_loss()
|
|
print(
|
|
f"step {iter}: train loss {losses['train']:.3f}, "
|
|
f"val loss {losses['val']:.3f}"
|
|
)
|
|
xb, yb = get_batch("train")
|
|
logits, loss = model(xb, yb)
|
|
optimizer.zero_grad(set_to_none=True)
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
print(loss.item())
|
|
torch.save(model.state_dict(), "phoebe_model.pt")
|
|
print("Model Saved!")
|
|
|
|
|
|
def check_input_chars(s, string_to_int):
|
|
unknown_chars = [c for c in s if c not in string_to_int]
|
|
if unknown_chars:
|
|
print(f"Unknown characters in input: {unknown_chars}")
|
|
return unknown_chars
|
|
|
|
|
|
def process_message(message):
|
|
if not message.strip():
|
|
return "Message is empty or invalid."
|
|
|
|
# Check for unknown characters
|
|
unknown_chars = check_input_chars(message, string_to_int)
|
|
if unknown_chars:
|
|
return f"Message contains unknown characters: {unknown_chars}"
|
|
|
|
encoded_text = torch.tensor(
|
|
[encode(message, string_to_int)], dtype=torch.long
|
|
).to(device)
|
|
print(f"Encoded text shape: {encoded_text.shape}") # Debug print
|
|
if encoded_text.size(1) == 0:
|
|
return "Message could not be processed."
|
|
|
|
response = model.generate(encoded_text, max_new_tokens=50)
|
|
decoded_response = decode(response[0].tolist(), int_to_string)
|
|
return decoded_response
|
|
|
|
|
|
# train_model()
|