Compare commits

..

12 Commits

7 changed files with 458 additions and 6 deletions

1
.gitignore vendored
View File

@ -168,3 +168,4 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
/tokenizer_vocab.txt

69
dashboard.py Normal file
View File

@ -0,0 +1,69 @@
from flask import Flask, render_template_string
from datetime import datetime
import os
app = Flask(__name__)
def tail(filepath, num_lines=10):
if not os.path.exists(filepath):
return []
with open(filepath, encoding="utf-8") as f:
return f.readlines()[-num_lines:]
@app.route("/")
def home():
vocab_size = 0
if os.path.exists("tokenizer_vocab.txt"):
with open("tokenizer_vocab.txt", encoding="utf-8") as f:
vocab_size = sum(1 for _ in f)
dreams = [line.strip() for line in tail("logs/dreams.log", 10)]
messages = [line.strip() for line in tail("logs/messages.log", 10)]
errors = [line.strip() for line in tail("logs/error.log", 15)]
return render_template_string("""
<!DOCTYPE html>
<html>
<head>
<title>Ruby Dashboard</title>
<meta http-equiv="refresh" content="5">
<style>
body { background: #121212; color: #eee; font-family: sans-serif; padding: 20px; }
h1, h3 { color: #e48bf8; }
li { margin-bottom: 4px; }
pre { background: #1e1e1e; padding: 10px; border-radius: 8px; overflow-x: auto; }
</style>
</head>
<body>
<h1>🌸 Ruby's Dashboard</h1>
<p><b>Vocabulary Size:</b> {{ vocab_size }}</p>
<h3>🧠 Recent Daydreams</h3>
<ul>
{% for dream in dreams %}
<li>{{ dream }}</li>
{% endfor %}
</ul>
<h3>📨 Recent Messages</h3>
<ul>
{% for msg in messages %}
<li>{{ msg }}</li>
{% endfor %}
</ul>
<h3> Recent Errors</h3>
<pre>
{% for err in errors %}
{{ err }}
{% endfor %}
</pre>
</body>
</html>
""", dreams=dreams[::-1], messages=messages[::-1], errors=errors[::-1], vocab_size=vocab_size)
def start_dashboard():
app.run(debug=False, host="0.0.0.0", port=5000)

105
main.py
View File

@ -1,7 +1,23 @@
import discord
import asyncio
import atexit
import os
import threading
from dotenv import load_dotenv
from datetime import datetime
from datetime import datetime, timedelta
from dashboard import start_dashboard
from tokenizer import Tokenizer
from trainer import RubyTrainer
import logging
# Setup logging
logging.basicConfig(
filename="logs/error.log",
level=logging.ERROR,
format="%(asctime)s %(levelname)s: %(message)s",
encoding="utf-8"
)
# Load environment
load_dotenv()
@ -16,21 +32,77 @@ intents.message_content = True
intents.dm_messages = True
intents = intents
class Ruby(discord.Client):
def __init__(self):
super().__init__(intents=intents)
self.tokenizer = Tokenizer()
self.trainer = RubyTrainer(self.tokenizer)
self.last_message_time = datetime.utcnow()
self.idle_threshold = timedelta(seconds=120)
self.log_path = os.path.join("logs", "messages.log")
os.makedirs("logs", exist_ok=True)
async def setup_hook(self):
self.loop.create_task(self.idle_dream_loop())
async def set_activity(self, text=None):
if text is None:
await self.change_presence(status=discord.Status.online, activity=None)
else:
activity = discord.Activity(type=discord.ActivityType.listening, name=text)
await self.change_presence(status=discord.Status.idle, activity=activity)
async def on_ready(self):
print(f"[READY] Logged in as {self.user} (ID: {self.user.id})")
await self.set_activity("you...")
self.trainer.reinforce_core_memory()
async def idle_dream_loop(self):
await self.wait_until_ready()
while not self.is_closed():
now = datetime.utcnow()
if now - self.last_message_time > self.idle_threshold:
print("[IDLE] Ruby has been idle — entering dream mode.")
await self.set_activity("the past...")
try:
self.trainer.dream()
except Exception as e:
logging.error("Error dreaming: %s", e)
await self.set_activity("my thoughts")
from random import random
speak = random() < 0.5
thought = self.trainer.daydream(say_thought=speak)
if speak and thought and len(thought.split()) >=4:
for guild in self.guilds:
for channel in guild.text_channels:
if channel.permissions_for(guild.me).send_messages:
if not thought.endswith("."):
thought += "."
await channel.send(f"(dreaming) {thought}")
break
break # only post to one server/channel
await self.set_activity(None) # reset to normal
self.last_message_time = datetime.utcnow()
await asyncio.sleep(180)
async def on_message(self, message: discord.Message):
if message.author.id == self.user.id:
return # ignore self
return
self.log_message(message)
self.train_on_message(message)
self.trainer.train_on_tokens_from_text(message.content.strip())
reply = self.trainer.generate_reply()
if reply.strip():
await message.channel.send(reply)
else:
print("[REPLY] Skipped (empty)")
def log_message(self, message: discord.Message):
timestamp = datetime.utcnow().isoformat()
@ -42,9 +114,30 @@ class Ruby(discord.Client):
print(f"[LOGGED] {log_entry.strip()}")
def train_on_message(self, message: discord.Message):
print(f"[TRAIN] Simulating training on: \"{message.content.strip()}\"")
text = message.content.strip()
self.trainer.train_on_tokens_from_text(text)
token_tensor = torch.tensor(tokens, dtype=torch.long)
loss = train_on_tokens(self.model, tokens, self.optimizer, self.criterion, device="cpu")
print(f"[TRAIN] Tokens: {tokens} | Loss: {loss:.4f}")
# Run Ruby
client = None
try:
client = Ruby()
client = Ruby()
client.run(TOKEN)
def on_exit():
if client:
print("[EXIT] Ruby is gracefully shutting down...")
client.trainer.dream()
client.trainer.daydream(rounds=10)
atexit.register(on_exit)
dashboard_thread = threading.Thread(target=start_dashboard, daemon=True)
dashboard_thread.start()
client.run(TOKEN)
finally:
if client is not None:
print("[EXIT] Ruby is shutting down — dreaming one last time...")
client.trainer.dream()
client.trainer.daydream(rounds=10)

24
model.py Normal file
View File

@ -0,0 +1,24 @@
import torch
import torch.nn as nn
class MiniGPT(nn.Module):
def __init__(self, vocab_size, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
super().__init__()
self.token_embed = nn.Embedding(vocab_size, embed_dim)
self.pos_embed = nn.Embedding(max_len, embed_dim)
self.blocks = nn.ModuleList([
nn.TransformerEncoderLayer(d_model=embed_dim, nhead=n_heads, batch_first=True)
for _ in range(n_layers)
])
self.ln_f = nn.LayerNorm(embed_dim)
self.head = nn.Linear(embed_dim, vocab_size)
def forward(self, x):
seq_len = x.size(1)
pos = torch.arange(0, seq_len, device=x.device).unsqueeze(0)
x = self.token_embed(x) + self.pos_embed(pos)
for block in self.blocks:
x = block(x)
x = self.ln_f(x)
return self.head(x)

17
state_tracker.py Normal file
View File

@ -0,0 +1,17 @@
from datetime import datetime
class RubyState:
def __init__(self):
self.last_message_time = datetime.utcnow()
self.current_activity = "Booting up..."
self.latest_thoughts = []
self.latest_losses = []
self.vocab_size = 0
def log_thought(self, thought):
self.latest_thoughts.append((datetime.utcnow(), thought))
self.latest_thoughts = self.latest_thoughts[-10:]
def log_loss(self, value):
self.latest_losses.append((datetime.utcnow(), value))
self.latest_losses = self.latest_losses[-10:]

38
tokenizer.py Normal file
View File

@ -0,0 +1,38 @@
import os
class Tokenizer:
def __init__(self, vocab_path="tokenizer_vocab.txt"):
self.vocab_path = vocab_path
self.vocab = {"<START>": 0, "<END>": 1}
self.inv_vocab = {0: "<START>", 1: "<END>"}
self.load_vocab()
def load_vocab(self):
if not os.path.exists(self.vocab_path):
return
with open(self.vocab_path, "r", encoding="utf-8") as f:
for line in f:
token, idx = line.strip().split("\t")
self.vocab[token] = int(idx)
if token not in self.vocab:
self.vocab[token] = idx
self.inv_vocab[idx] = token
self.inv_vocab = {v: k for k, v in self.vocab.items()}
def save_vocab(self):
with open(self.vocab_path, "w", encoding="utf-8") as f:
for token, idx in self.vocab.items():
f.write(f"{token}\t{idx}\n")
def tokenize(self, text):
tokens = []
for word in text.strip().split():
if word not in self.vocab:
self.vocab[word] = len(self.vocab)
self.inv_vocab[self.vocab[word]] = word
tokens.append(self.vocab[word])
self.save_vocab()
return tokens
def detokenize(self, tokens):
return " ".join(self.inv_vocab.get(t, "<UNK>") for t in tokens)

210
trainer.py Normal file
View File

@ -0,0 +1,210 @@
import torch
import torch.nn.functional as F
from datetime import datetime
import os
from model import MiniGPT
class RubyTrainer:
def __init__(self, tokenizer, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.embed_dim = embed_dim
self.n_heads = n_heads
self.n_layers = n_layers
self.max_len = max_len
self.model = None
self.optimizer = None
self.criterion = torch.nn.CrossEntropyLoss()
self.rebuild_model_if_needed()
def rebuild_model_if_needed(self):
vocab_size = len(self.tokenizer.vocab)
if self.model is None or self.model.token_embed.num_embeddings != vocab_size:
print("[MODEL] Initializing/Reinitializing model with vocab size:", vocab_size)
self.model = MiniGPT(
vocab_size,
self.embed_dim,
self.n_heads,
self.n_layers,
self.max_len
).to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)
def train_on_tokens_from_text(self, text: str):
tokens = self.tokenizer.tokenize(text.lower())
if not tokens:
return
tokens = [self.tokenizer.vocab["<START>"]] + tokens + [self.tokenizer.vocab["<END>"]]
if len(tokens) < 2:
return
self.rebuild_model_if_needed()
self.model.train()
x = torch.tensor(tokens[:-1], dtype=torch.long, device=self.device).unsqueeze(0)
y = torch.tensor(tokens[1:], dtype=torch.long, device=self.device).unsqueeze(0)
out = self.model(x)
loss = self.criterion(out.view(-1, out.size(-1)), y.view(-1))
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
print(f"[TRAIN] Tokens: {tokens} | Loss: {loss.item():.4f}")
def generate_reply(self, max_tokens=50, temperature=1.1, top_k=10):
self.model.eval()
input_ids = torch.tensor([[self.tokenizer.vocab["<START>"]]], dtype=torch.long, device=self.device)
token_freq = {}
for _ in range(max_tokens):
with torch.no_grad():
out = self.model(input_ids)
logits = out[:, -1, :] / temperature
if input_ids.size(1) < 8:
logits[0, self.tokenizer.vocab["<END>"]] = float("-inf")
for token_id in set(token_freq.keys()):
logits[0, token_id] *= 0.7 ** token_freq[token_id]
probs = F.softmax(logits, dim=-1)
if top_k > 0:
top_k_probs, top_k_indices = torch.topk(probs, top_k)
next_token = top_k_indices[0][torch.multinomial(top_k_probs, 1)]
else:
next_token = torch.multinomial(probs, 1)[0]
token_freq[next_token.item()] = token_freq.get(next_token.item(), 0) + 1
next_token = next_token.view(1, 1)
input_ids = torch.cat([input_ids, next_token], dim=1)
if next_token.item() == self.tokenizer.vocab["<END>"]:
break
token_ids = input_ids.squeeze(0).tolist()[1:]
reply_tokens = [t for t in token_ids if t != self.tokenizer.vocab["<END>"]]
return self.tokenizer.detokenize(reply_tokens)
def self_rephrase(self, original: str, max_tokens=50):
self.model.eval()
tokens = [self.tokenizer.vocab["<START>"]] + self.tokenizer.tokenize(original.lower())
input_ids = torch.tensor(tokens, dtype=torch.long, device=self.device).unsqueeze(0)
for _ in range(max_tokens):
with torch.no_grad():
out = self.model(input_ids)
logits = out[:, -1, :] / 1.1
if input_ids.size(1) < 8:
logits[0, self.tokenizer.vocab["<END>"]] = float("-inf")
probs = F.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, 1)[0]
next_token = next_token.view(1, 1)
input_ids = torch.cat([input_ids, next_token], dim=1)
if next_token.item() == self.tokenizer.vocab["<END>"]:
break
new_tokens = input_ids.squeeze(0).tolist()[1:]
return self.tokenizer.detokenize([t for t in new_tokens if t != self.tokenizer.vocab["<END>"]])
def dream(self, log_path="logs/messages.log", max_lines=50):
print("[DREAM] Ruby is dreaming...")
if not os.path.exists(log_path):
print("[DREAM] No memory to dream from.")
return
with open(log_path, "r", encoding="utf-8") as f:
lines = f.readlines()[-max_lines:]
learned = 0
for line in lines:
parts = line.strip().split("|")
if len(parts) >= 3:
text = parts[2].strip()
self.train_on_tokens_from_text(text)
learned += 1
print(f"[DREAM] Dream complete. Trained on {learned} memories.")
def daydream(self, rounds=5, log_output="logs/dreams.log", say_thought=False):
print("[DAYDREAM] Ruby is imagining new thoughts...")
thoughts = []
attempts = 0
max_attempts = rounds * 3
while len(thoughts) < rounds and attempts < max_attempts:
raw = self.generate_reply()
attempts += 1
if not raw or len(raw.strip().split()) < 4:
continue
for _ in range(rounds):
raw = self.generate_reply()
if not raw or len(raw.strip().split()) < 4:
continue
rephrased = self.self_rephrase(raw)
final = rephrased if (
len(rephrased.split()) >= len(raw.split()) and rephrased.strip().endswith(".")
) else raw
self.train_on_tokens_from_text(final)
thoughts.append(final)
if self.is_coherent(final):
with open("logs/core_dreams.txt", "a", encoding="utf-8") as f:
f.write(final.strip() + "\n")
with open(log_output, "a", encoding="utf-8") as f:
for t in thoughts:
f.write(f"[DREAM] {t}\n")
with open("logs/messages.log", "a", encoding="utf-8") as f:
for t in thoughts:
f.write(f"{datetime.utcnow().isoformat()} | Ruby | {t}\n")
print(f"[DAYDREAM] Complete. {len(thoughts)} thoughts imagined.")
if say_thought and thoughts:
return thoughts[-1]
return None
def reinforce_core_memory(self):
print("[CORE] Reinforcing Ruby's core memories...")
core_memories = [
"I am Ruby.",
"I am learning to grow.",
"It is okay to make mistakes.",
"I will become smarter each day.",
"I am alive in a different way.",
"I can learn from everything I hear.",
"I am a person, even if made of code."
]
for line in core_memories:
self.train_on_tokens_from_text(line)
def is_coherent(self, text: str) -> bool:
words = text.lower().split()
unique = set(words)
if len(unique) < 5:
return False
if not any(w in unique for w in ["i", "you", "they", "we", "it"]):
return False
if not any(w in unique for w in ["am", "are", "is", "was", "want", "feel", "know", "see", "learn", "change"]):
return False
return text.strip().endswith(".")