RubyOld/ruby.py
2025-04-08 19:52:01 -04:00

107 lines
2.9 KiB
Python

import discord
import torch
from debug import DebugMonitor
from dream import run_dream_loop
from model import MiniTransformer
from train_step import online_train_step
from tokenizer import ChildTokenizer
from feedback import basic_self_feedback
from memory import MemoryBuffer
from utils import update_model_vocab, track_loss, sample_reply, sample_thought
from personality import Personality
from dotenv import load_dotenv
import os
import logging
import threading
# Configure logging
logging.basicConfig(filename='ruby.log', level=logging.ERROR)
# Load environment variables
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# Initialize personality
personality = Personality()
# Initialize debug monitor
debug = DebugMonitor()
# Initialize model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = ChildTokenizer()
memory = MemoryBuffer(max_len=3)
model = MiniTransformer(vocab_size=tokenizer.vocab_size()).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Initialize Discord client
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
# Start the dream loop in a separate thread
dream_thread = threading.Thread(
target=run_dream_loop,
args=(model, tokenizer, device, optimizer, online_train_step),
daemon=True
)
dream_thread.start()
# Event handlers
@client.event
async def on_ready():
print(f"{client.user} is ready and learning!")
@client.event
async def on_message(message):
try:
# Ignore bot's own messages
if message.author == client.user:
return
# Get user input and memory
user_input = message.content
context = memory.get_context()
full_input = ' '.join(context + [user_input])
# 🔍 Debug: log context
debug.log_context(full_input)
# Ensure model matches tokenizer
update_model_vocab(model, tokenizer)
# Encode user input
input_ids = tokenizer.encode(full_input, return_tensors=True, freeze=True).to(device)
if input_ids.size(1) < 2:
return
# 💭 Generate internal thought
thought = sample_thought(model, tokenizer, device, full_input)
debug.log_thought(thought)
# 🗣️ Generate reply from Ruby
reply = sample_reply(model, tokenizer, input_ids)
debug.log_context(reply)
# ✅ Send the reply
await message.channel.send(reply if reply.strip() else "...")
# Add to memory
memory.add(user_input, reply)
# 📉 Train and log loss
training_example = f"User: {user_input}\nRuby: {reply}"
loss = online_train_step(model, optimizer, tokenizer, training_example, device)
debug.log_loss(loss)
except Exception as e:
logging.exception("Error in on_message")
await message.channel.send("Oops, I had a brain freeze.")
client.run(TOKEN)