Adjusted Dream States, added a dashboard to monitor her

This commit is contained in:
Dani 2025-04-15 16:14:11 -04:00
parent 3829ca8d01
commit 6c9dde2289
4 changed files with 96 additions and 10 deletions

60
dashboard.py Normal file
View File

@ -0,0 +1,60 @@
from flask import Flask, render_template_string
from datetime import datetime
import os
app = Flask(__name__)
@app.route("/")
def home():
dreams = []
if os.path.exists("logs/dreams.log"):
with open("logs/dreams.log", encoding="utf-8") as f:
dreams = [line.strip() for line in f.readlines()[-10:]]
messages = []
if os.path.exists("logs/messages.log"):
with open("logs/messages.log", encoding="utf-8") as f:
messages = [line.strip() for line in f.readlines()[-10:]]
vocab_size = 0
if os.path.exists("tokenizer_vocab.txt"):
with open("tokenizer_vocab.txt", encoding="utf-8") as f:
vocab_size = sum(1 for _ in f)
return render_template_string("""
<!DOCTYPE html>
<html>
<head>
<title>Ruby Dashboard</title>
<meta http-equiv="refresh" content="5">
<style>
body { background: #121212; color: #eee; font-family: sans-serif; padding: 20px; }
h1, h3 { color: #e48bf8; }
li { margin-bottom: 4px; }
</style>
</head>
<body>
<h1>🌸 Ruby's Dashboard</h1>
<p><b>Vocabulary Size:</b> {{ vocab_size }}</p>
<h3>🧠 Recent Daydreams</h3>
<ul>
{% for dream in dreams %}
<li>{{ dream }}</li>
{% endfor %}
</ul>
<h3>📨 Recent Messages</h3>
<ul>
{% for msg in messages %}
<li>{{ msg }}</li>
{% endfor %}
</ul>
</body>
</html>
""", dreams=dreams[::-1], messages=messages[::-1], vocab_size=vocab_size)
def start_dashboard():
app.run(debug=False, host="0.0.0.0", port=5000)

View File

@ -2,8 +2,10 @@ import discord
import asyncio import asyncio
import atexit import atexit
import os import os
import threading
from dotenv import load_dotenv from dotenv import load_dotenv
from datetime import datetime, timedelta from datetime import datetime, timedelta
from dashboard import start_dashboard
from tokenizer import Tokenizer from tokenizer import Tokenizer
from model import RubyTrainer from model import RubyTrainer
@ -110,6 +112,7 @@ class Ruby(discord.Client):
client = None client = None
try: try:
client = Ruby() client = Ruby()
def on_exit(): def on_exit():
if client: if client:
print("[EXIT] Ruby is gracefully shutting down...") print("[EXIT] Ruby is gracefully shutting down...")
@ -117,6 +120,8 @@ try:
client.trainer.daydream(rounds=10) client.trainer.daydream(rounds=10)
atexit.register(on_exit) atexit.register(on_exit)
dashboard_thread = threading.Thread(target=start_dashboard, daemon=True)
dashboard_thread.start()
client.run(TOKEN) client.run(TOKEN)
finally: finally:
if client is not None: if client is not None:

View File

@ -3,6 +3,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import os import os
from datetime import datetime from datetime import datetime
from collections import Counter
class MiniGPT(nn.Module): class MiniGPT(nn.Module):
@ -95,29 +96,32 @@ class RubyTrainer:
input_ids = torch.tensor([[self.tokenizer.vocab["<START>"]]], dtype=torch.long, device=self.device) input_ids = torch.tensor([[self.tokenizer.vocab["<START>"]]], dtype=torch.long, device=self.device)
token_freq = Counter()
for _ in range(max_tokens): for _ in range(max_tokens):
with torch.no_grad(): with torch.no_grad():
out = self.model(input_ids) out = self.model(input_ids)
logits = out[:, -1, :] / temperature logits = out[:, -1, :] / temperature
if top_k > 0: # 💡 Apply repetition penalty
top_k_logits, top_k_indices = torch.topk(logits, top_k) for token_id, freq in token_freq.items():
probs = F.softmax(top_k_logits, dim=-1) if freq > 0:
next_token = top_k_indices[0][torch.multinomial(probs, 1)] logits[0, token_id] *= 0.7 ** freq # dampens reused tokens
else:
probs = F.softmax(logits, dim=-1) probs = F.softmax(logits, dim=-1)
if top_k > 0:
top_k_logits, top_k_indices = torch.topk(probs, top_k)
next_token = top_k_indices[0][torch.multinomial(top_k_logits, 1)]
else:
next_token = torch.multinomial(probs, 1)[0] next_token = torch.multinomial(probs, 1)[0]
# ⬇️ Fix here: reshape next_token to (1, 1) token_freq[next_token.item()] += 1
next_token = next_token.view(1, 1) next_token = next_token.view(1, 1)
input_ids = torch.cat([input_ids, next_token], dim=1) input_ids = torch.cat([input_ids, next_token], dim=1)
if input_ids.size(1) < 5: # prevent ending too early
logits[0, self.tokenizer.vocab["<END>"]] = float("-inf")
if next_token.item() == self.tokenizer.vocab["<END>"]: if next_token.item() == self.tokenizer.vocab["<END>"]:
break break
token_ids = input_ids.squeeze(0).tolist()[1:] # skip <START> token_ids = input_ids.squeeze(0).tolist()[1:] # skip <START>
reply_tokens = [tid for tid in token_ids if tid != self.tokenizer.vocab.get("<END>")] reply_tokens = [tid for tid in token_ids if tid != self.tokenizer.vocab.get("<END>")]
return self.tokenizer.detokenize(reply_tokens) return self.tokenizer.detokenize(reply_tokens)
@ -154,7 +158,7 @@ class RubyTrainer:
thought = self.generate_reply() thought = self.generate_reply()
attempts += 1 attempts += 1
if thought and len(thought.strip().split()) >= 4: if thought and len(set(thought.lower().split())) >= 3:
self.train_on_tokens_from_text(thought) self.train_on_tokens_from_text(thought)
thoughts.append(thought) thoughts.append(thought)

17
state_tracker.py Normal file
View File

@ -0,0 +1,17 @@
from datetime import datetime
class RubyState:
def __init__(self):
self.last_message_time = datetime.utcnow()
self.current_activity = "Booting up..."
self.latest_thoughts = []
self.latest_losses = []
self.vocab_size = 0
def log_thought(self, thought):
self.latest_thoughts.append((datetime.utcnow(), thought))
self.latest_thoughts = self.latest_thoughts[-10:]
def log_loss(self, value):
self.latest_losses.append((datetime.utcnow(), value))
self.latest_losses = self.latest_losses[-10:]