Ruby/model/brain.py
2025-04-24 13:38:55 -04:00

75 lines
2.0 KiB
Python

import torch
import torch.nn as nn
import random
from model.tokenizer import Tokenizer
import torch.nn.functional as F
from model.memory import save_dream
import time
recent_dreams = []
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = Tokenizer()
VOCAB_SIZE = 10000 # Temporary cap, grows dynamically
EMBED_DIM = 128
class TinyTransformer(nn.Module):
def __init__(self):
super().__init__()
self.embed = nn.Embedding(VOCAB_SIZE, EMBED_DIM)
self.ln1 = nn.LayerNorm(EMBED_DIM)
self.fc = nn.Linear(EMBED_DIM, VOCAB_SIZE)
def forward(self, x):
x = self.embed(x)
x = self.ln1(x)
return self.fc(x)
model = TinyTransformer().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
loss_fn = nn.CrossEntropyLoss()
def generate_response():
seed = torch.tensor([random.randint(0, tokenizer.next_id - 1)], device=DEVICE)
output = model(seed.unsqueeze(0))
pred = torch.argmax(output, dim=-1).squeeze().tolist()
if not isinstance(pred, list):
pred = [pred]
return tokenizer.detokenize(pred)
def score_sentence(sentence: str) -> float:
words = sentence.strip().split()
length = len(words)
diversity = len(set(words)) / (length + 1)
if length < 4:
return 0.0
return diversity * min(length, 20)
def daydream():
model.eval()
seed = torch.tensor([random.randint(0, tokenizer.next_id - 1)], device=DEVICE).unsqueeze(0)
dream = []
for _ in range(12):
out = model(seed)
logits = out[:, -1, :]
probs = F.softmax(logits, dim=-1)
token = torch.multinomial(probs, num_samples=1)
dream.append(token.item())
seed = torch.cat([seed, token], dim=1)
sentence = tokenizer.detokenize(dream)
score = score_sentence(sentence)
if score > 0.3:
save_dream(sentence, score)
train_on_message(sentence)
recent_dreams.append((score, sentence))
if len(recent_dreams) > 10:
recent_dreams.pop(0)