diff --git a/.gitignore b/.gitignore
index 79d8774..4f5689d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -174,4 +174,5 @@ cython_debug/
/data/memory/context.json
/data/memory/dreams.json
data/memory/brainmap.json
-/data/memory/vocab.json
\ No newline at end of file
+/data/memory/vocab.json
+data/memory/book_progress.json
diff --git a/dashboard/dashboard.py b/dashboard/dashboard.py
index 8884fe0..6ede7f0 100644
--- a/dashboard/dashboard.py
+++ b/dashboard/dashboard.py
@@ -5,6 +5,7 @@ from model.memory import load_dreams
from model.tokenizer import Tokenizer
from model.abstraction import cluster_vocab
from model.memory import load_dreams
+from model.scheduler import get_time_until_next_action, get_next_action_label
from context.context import load_context
import json
import os
@@ -37,13 +38,16 @@ def index():
top_dreams = dreams[:5]
memory_size = len(load_context())
loss_data = load_loss_data()
- remaining = max(0, int(next_cycle_time - time.time()))
+ next_cycle = get_time_until_next_action()
+ next_action_label = get_next_action_label()
+
return render_template("index.html",
vocab_size=len(tokenizer.vocab),
top_dreams=top_dreams,
memory_size=memory_size,
loss_data=loss_data,
- next_cycle=remaining)
+ next_cycle=next_cycle,
+ next_action_label=next_action_label)
@app.route("/growth")
diff --git a/dashboard/templates/brainmap.html b/dashboard/templates/brainmap.html
index 5c6fba7..381ef1d 100644
--- a/dashboard/templates/brainmap.html
+++ b/dashboard/templates/brainmap.html
@@ -2,6 +2,7 @@
+
Ruby's Brain Map
-
+
-
-
Ruby is Running π§
-
β³ Next Cycle Countdown
+
β³ Next Cycle
+
Next: {{ next_action_label }}
{{ next_cycle }} seconds
diff --git a/main.py b/main.py
index 76e11f1..69ed472 100644
--- a/main.py
+++ b/main.py
@@ -8,8 +8,10 @@ from model.brain import generate_response
from model.cleanup import full_cleanup
from model.dream_replay import replay_dreams
from model.rehearsal import simulate_conversation
+from model.scheduler import set_next_action
from reader.reader import read_books_forever
from dashboard.dashboard import run_dashboard
+import threading
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
@@ -43,27 +45,38 @@ threading.Thread(target=run_dashboard, daemon=True).start()
async def background_cleanup_loop():
while True:
full_cleanup()
+ set_next_action(300, "Cleaning up")
await asyncio.sleep(300) # 5 minutes
async def dream_replay_loop():
while True:
replay_dreams()
+ set_next_action(900, "Dreaming new dreams")
await asyncio.sleep(900) # Replay every 15 minutes
async def rehearsal_loop():
while True:
simulate_conversation()
+ set_next_action(1200, "Practicing Conversations")
await asyncio.sleep(1200) # Every 20 minutes
-# Launch background tasks
-loop = asyncio.get_event_loop()
-loop.create_task(read_books_forever()) # Book reader task
-loop.create_task(background_cleanup_loop())
-loop.create_task(dream_replay_loop())
-loop.create_task(rehearsal_loop())
+# Start Ruby's Brain Loops in a separate thread
+def start_brain_loops():
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+
+ loop.create_task(read_books_forever())
+ loop.create_task(dream_replay_loop())
+ loop.create_task(background_cleanup_loop())
+ loop.create_task(rehearsal_loop())
+
+ loop.run_forever()
+
+
+threading.Thread(target=start_brain_loops, daemon=True).start()
# Launch Discord bot (blocking)
client.run(TOKEN)
diff --git a/model/dream_replay.py b/model/dream_replay.py
index 34a3e76..4f93380 100644
--- a/model/dream_replay.py
+++ b/model/dream_replay.py
@@ -1,9 +1,12 @@
import random
from model.memory import load_dreams
from model.trainer import train_on_message
+from model.dynamic_expand import expand_model_if_needed
def replay_dreams():
+ expand_model_if_needed()
+
dreams = load_dreams()
if not dreams:
return
diff --git a/model/dynamic_expand.py b/model/dynamic_expand.py
index f6f8e92..5b69c68 100644
--- a/model/dynamic_expand.py
+++ b/model/dynamic_expand.py
@@ -6,18 +6,17 @@ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
def get_optimizer():
- global optimizer
return optimizer
def expand_model_if_needed():
global model, optimizer
- current_vocab_size = len(tokenizer.vocab) + 10 # Buffer
+ current_vocab_size = len(tokenizer.vocab) + 10
old_vocab_size = model.head.out_features
if current_vocab_size <= old_vocab_size:
- return
+ return # No expansion needed
print(f"Expanding model from {old_vocab_size} -> {current_vocab_size}")
@@ -31,6 +30,6 @@ def expand_model_if_needed():
param.copy_(old_state[name])
model = new_model
- opt = get_optimizer()
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
- print("Model expanded and optimizer rebuilt.")
+ print("Expansion complete.")
diff --git a/model/rehearsal.py b/model/rehearsal.py
index 088b0f1..0515ef5 100644
--- a/model/rehearsal.py
+++ b/model/rehearsal.py
@@ -1,13 +1,18 @@
import torch
from model.brain import model, tokenizer, DEVICE
from model.trainer import train_on_message
+from model.dynamic_expand import expand_model_if_needed
def simulate_conversation():
- seed = torch.randint(0, tokenizer.next_id, (1, 5), device=DEVICE)
- output = model(seed)
- preds = torch.argmax(output, dim=-1).squeeze().tolist()
+ expand_model_if_needed()
+ model.eval()
+ seed = torch.randint(0, tokenizer.next_id, (1, 5), device=DEVICE)
+ seed = seed[:, -128:] # Safety clamp
+ output = model(seed)
+
+ preds = torch.argmax(output, dim=-1).squeeze().tolist()
if isinstance(preds, int):
preds = [preds]
diff --git a/model/scheduler.py b/model/scheduler.py
new file mode 100644
index 0000000..269b5b5
--- /dev/null
+++ b/model/scheduler.py
@@ -0,0 +1,19 @@
+import time
+
+_next_action_time = time.time() + 60 # default 1 minute from now
+_next_action_label = "Waiting"
+
+
+def set_next_action(seconds_from_now: int, label: str = "Thinking"):
+ global _next_action_time, _next_action_label
+ _next_action_time = time.time() + seconds_from_now
+ _next_action_label = label
+
+
+def get_time_until_next_action() -> int:
+ remaining = int(_next_action_time - time.time())
+ return max(0, remaining)
+
+
+def get_next_action_label() -> str:
+ return _next_action_label
diff --git a/model/trainer.py b/model/trainer.py
index 22e6623..155298f 100644
--- a/model/trainer.py
+++ b/model/trainer.py
@@ -1,9 +1,8 @@
import torch
import time
-from model.brain_state import model, tokenizer, DEVICE,loss_fn
-from context.context import add_to_context, get_recent_context
from model.dynamic_expand import expand_model_if_needed, get_optimizer
-from model.brainmap import update_brainmap
+from model.brain_state import model, tokenizer, DEVICE, loss_fn
+from context.context import add_to_context, get_recent_context
LOSS_FILE = "data/logs/loss.log"
@@ -15,6 +14,7 @@ def log_loss(value: float):
def train_on_message(text: str):
expand_model_if_needed()
+
model.train()
context_texts = get_recent_context(3)
augmented_text = " ".join(context_texts + [text])
@@ -23,16 +23,23 @@ def train_on_message(text: str):
if len(tokens) < 2:
return
- tokens = tokens[:128]
- words = tokenizer.detokenize(tokens).split()
- update_brainmap(words)
+ # β Clamp to model's known vocab
+ max_token_id = model.head.out_features - 1
+ tokens = [t for t in tokens if t <= max_token_id]
+
+ if len(tokens) < 2:
+ return # after filtering, too short to train
+
+ tokens = tokens[:128] # safety clamp
input_tensor = torch.tensor(tokens[:-1], dtype=torch.long, device=DEVICE).unsqueeze(0)
target_tensor = torch.tensor(tokens[1:], dtype=torch.long, device=DEVICE).unsqueeze(0)
+ opt = get_optimizer()
+
output = model(input_tensor)
+
loss = loss_fn(output.view(-1, output.size(-1)), target_tensor.view(-1))
- opt = get_optimizer()
opt.zero_grad()
loss.backward()
opt.step()
diff --git a/reader/reader.py b/reader/reader.py
index be7dc2c..d8b0166 100644
--- a/reader/reader.py
+++ b/reader/reader.py
@@ -1,7 +1,9 @@
import os
import asyncio
from model.trainer import train_on_message
+from model.scheduler import set_next_action
from reader.filter import is_valid_line
+import json
BOOK_DIR = "data/books"
PROGRESS_FILE = "data/memory/book_progress.json"
@@ -45,4 +47,5 @@ async def read_books_forever():
if is_valid_line(line):
train_on_message(line)
+ set_next_action(READ_DELAY, "Reading")
await asyncio.sleep(READ_DELAY)