diff --git a/dashboard.py b/dashboard.py
new file mode 100644
index 0000000..d42d310
--- /dev/null
+++ b/dashboard.py
@@ -0,0 +1,60 @@
+from flask import Flask, render_template_string
+from datetime import datetime
+import os
+
+app = Flask(__name__)
+
+
+@app.route("/")
+def home():
+ dreams = []
+ if os.path.exists("logs/dreams.log"):
+ with open("logs/dreams.log", encoding="utf-8") as f:
+ dreams = [line.strip() for line in f.readlines()[-10:]]
+
+ messages = []
+ if os.path.exists("logs/messages.log"):
+ with open("logs/messages.log", encoding="utf-8") as f:
+ messages = [line.strip() for line in f.readlines()[-10:]]
+
+ vocab_size = 0
+ if os.path.exists("tokenizer_vocab.txt"):
+ with open("tokenizer_vocab.txt", encoding="utf-8") as f:
+ vocab_size = sum(1 for _ in f)
+
+ return render_template_string("""
+
+
+
+ Ruby Dashboard
+
+
+
+
+ 🌸 Ruby's Dashboard
+ Vocabulary Size: {{ vocab_size }}
+
+ 🧠 Recent Daydreams
+
+ {% for dream in dreams %}
+ - {{ dream }}
+ {% endfor %}
+
+
+ 📨 Recent Messages
+
+ {% for msg in messages %}
+ - {{ msg }}
+ {% endfor %}
+
+
+
+ """, dreams=dreams[::-1], messages=messages[::-1], vocab_size=vocab_size)
+
+
+def start_dashboard():
+ app.run(debug=False, host="0.0.0.0", port=5000)
diff --git a/main.py b/main.py
index 085a087..2a10d7c 100644
--- a/main.py
+++ b/main.py
@@ -2,8 +2,10 @@ import discord
import asyncio
import atexit
import os
+import threading
from dotenv import load_dotenv
from datetime import datetime, timedelta
+from dashboard import start_dashboard
from tokenizer import Tokenizer
from model import RubyTrainer
@@ -110,6 +112,7 @@ class Ruby(discord.Client):
client = None
try:
client = Ruby()
+
def on_exit():
if client:
print("[EXIT] Ruby is gracefully shutting down...")
@@ -117,6 +120,8 @@ try:
client.trainer.daydream(rounds=10)
atexit.register(on_exit)
+ dashboard_thread = threading.Thread(target=start_dashboard, daemon=True)
+ dashboard_thread.start()
client.run(TOKEN)
finally:
if client is not None:
diff --git a/model.py b/model.py
index 915a467..b78731d 100644
--- a/model.py
+++ b/model.py
@@ -3,6 +3,7 @@ import torch.nn as nn
import torch.nn.functional as F
import os
from datetime import datetime
+from collections import Counter
class MiniGPT(nn.Module):
@@ -95,29 +96,32 @@ class RubyTrainer:
input_ids = torch.tensor([[self.tokenizer.vocab[""]]], dtype=torch.long, device=self.device)
+ token_freq = Counter()
+
for _ in range(max_tokens):
with torch.no_grad():
out = self.model(input_ids)
logits = out[:, -1, :] / temperature
+ # 💡 Apply repetition penalty
+ for token_id, freq in token_freq.items():
+ if freq > 0:
+ logits[0, token_id] *= 0.7 ** freq # dampens reused tokens
+
+ probs = F.softmax(logits, dim=-1)
+
if top_k > 0:
- top_k_logits, top_k_indices = torch.topk(logits, top_k)
- probs = F.softmax(top_k_logits, dim=-1)
- next_token = top_k_indices[0][torch.multinomial(probs, 1)]
+ top_k_logits, top_k_indices = torch.topk(probs, top_k)
+ next_token = top_k_indices[0][torch.multinomial(top_k_logits, 1)]
else:
- probs = F.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, 1)[0]
- # ⬇️ Fix here: reshape next_token to (1, 1)
+ token_freq[next_token.item()] += 1
next_token = next_token.view(1, 1)
input_ids = torch.cat([input_ids, next_token], dim=1)
- if input_ids.size(1) < 5: # prevent ending too early
- logits[0, self.tokenizer.vocab[""]] = float("-inf")
-
if next_token.item() == self.tokenizer.vocab[""]:
break
-
token_ids = input_ids.squeeze(0).tolist()[1:] # skip
reply_tokens = [tid for tid in token_ids if tid != self.tokenizer.vocab.get("")]
return self.tokenizer.detokenize(reply_tokens)
@@ -154,7 +158,7 @@ class RubyTrainer:
thought = self.generate_reply()
attempts += 1
- if thought and len(thought.strip().split()) >= 4:
+ if thought and len(set(thought.lower().split())) >= 3:
self.train_on_tokens_from_text(thought)
thoughts.append(thought)
diff --git a/state_tracker.py b/state_tracker.py
new file mode 100644
index 0000000..987658c
--- /dev/null
+++ b/state_tracker.py
@@ -0,0 +1,17 @@
+from datetime import datetime
+
+class RubyState:
+ def __init__(self):
+ self.last_message_time = datetime.utcnow()
+ self.current_activity = "Booting up..."
+ self.latest_thoughts = []
+ self.latest_losses = []
+ self.vocab_size = 0
+
+ def log_thought(self, thought):
+ self.latest_thoughts.append((datetime.utcnow(), thought))
+ self.latest_thoughts = self.latest_thoughts[-10:]
+
+ def log_loss(self, value):
+ self.latest_losses.append((datetime.utcnow(), value))
+ self.latest_losses = self.latest_losses[-10:]