37 lines
1.0 KiB
Python
37 lines
1.0 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
import random
|
|
from model.tokenizer import Tokenizer
|
|
|
|
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
tokenizer = Tokenizer()
|
|
VOCAB_SIZE = 10000 # Temporary cap, grows dynamically
|
|
EMBED_DIM = 128
|
|
|
|
|
|
class TinyTransformer(nn.Module):
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.embed = nn.Embedding(VOCAB_SIZE, EMBED_DIM)
|
|
self.ln1 = nn.LayerNorm(EMBED_DIM)
|
|
self.fc = nn.Linear(EMBED_DIM, VOCAB_SIZE)
|
|
|
|
def forward(self, x):
|
|
x = self.embed(x)
|
|
x = self.ln1(x)
|
|
return self.fc(x)
|
|
|
|
|
|
model = TinyTransformer().to(DEVICE)
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
|
|
loss_fn = nn.CrossEntropyLoss()
|
|
|
|
|
|
def generate_response():
|
|
seed = torch.tensor([random.randint(0, tokenizer.next_id - 1)], device=DEVICE)
|
|
output = model(seed.unsqueeze(0))
|
|
pred = torch.argmax(output, dim=-1).squeeze().tolist()
|
|
if not isinstance(pred, list):
|
|
pred = [pred]
|
|
return tokenizer.detokenize(pred)
|