Compare commits
No commits in common. "Dev-Stage1-Start_BOOK_READER" and "main" have entirely different histories.
Dev-Stage1
...
main
5
.gitignore
vendored
5
.gitignore
vendored
@ -168,8 +168,3 @@ cython_debug/
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
/tokenizer_vocab.txt
|
||||
/logs/core_dreams.txt
|
||||
/logs/best_dream.txt
|
||||
/.vscode/launch.json
|
||||
/books
|
80
dashboard.py
80
dashboard.py
@ -1,80 +0,0 @@
|
||||
from flask import Flask, render_template_string
|
||||
from datetime import datetime
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
|
||||
def tail(filepath, num_lines=10):
|
||||
if not os.path.exists(filepath):
|
||||
return []
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
return f.readlines()[-num_lines:]
|
||||
|
||||
|
||||
def get_best_dream():
|
||||
if not os.path.exists("logs/best_dream.txt"):
|
||||
return "No high-scoring dream yet."
|
||||
with open("logs/best_dream.txt", encoding="utf-8") as f:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def home():
|
||||
vocab_size = 0
|
||||
if os.path.exists("tokenizer_vocab.txt"):
|
||||
with open("tokenizer_vocab.txt", encoding="utf-8") as f:
|
||||
vocab_size = sum(1 for _ in f)
|
||||
|
||||
dreams = [line.strip() for line in tail("logs/dreams.log", 10)]
|
||||
messages = [line.strip() for line in tail("logs/messages.log", 10)]
|
||||
errors = [line.strip() for line in tail("logs/error.log", 15)]
|
||||
best_dream = get_best_dream()
|
||||
|
||||
return render_template_string("""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Ruby Dashboard</title>
|
||||
<meta http-equiv="refresh" content="5">
|
||||
<style>
|
||||
body { background: #121212; color: #eee; font-family: sans-serif; padding: 20px; }
|
||||
h1, h3 { color: #e48bf8; }
|
||||
li { margin-bottom: 4px; }
|
||||
pre { background: #1e1e1e; padding: 10px; border-radius: 8px; overflow-x: auto; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>🌸 Ruby's Dashboard</h1>
|
||||
<p><b>Vocabulary Size:</b> {{ vocab_size }}</p>
|
||||
<h3>🏆 Highest Scoring Dream</h3>
|
||||
<p><b>{{ best_dream }}</b></p>
|
||||
|
||||
<h3>🧠 Recent Daydreams</h3>
|
||||
<ul>
|
||||
{% for dream in dreams %}
|
||||
<li>{{ dream }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
<h3>📨 Recent Messages</h3>
|
||||
<ul>
|
||||
{% for msg in messages %}
|
||||
<li>{{ msg }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
<h3>⚠️ Recent Errors</h3>
|
||||
<pre>
|
||||
{% for err in errors %}
|
||||
{{ err }}
|
||||
{% endfor %}
|
||||
</pre>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
""", best_dream=best_dream, dreams=dreams[::-1], messages=messages[::-1], errors=errors[::-1], vocab_size=vocab_size)
|
||||
|
||||
|
||||
def start_dashboard():
|
||||
app.run(debug=False, host="0.0.0.0", port=5000)
|
114
main.py
114
main.py
@ -1,27 +1,7 @@
|
||||
import discord
|
||||
import asyncio
|
||||
import atexit
|
||||
import os
|
||||
import threading
|
||||
from dotenv import load_dotenv
|
||||
from datetime import datetime, timedelta
|
||||
from dashboard import start_dashboard
|
||||
from tokenizer import Tokenizer
|
||||
from trainer import RubyTrainer
|
||||
from reader import BookReader
|
||||
import logging
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
filename="logs/error.log",
|
||||
level=logging.ERROR,
|
||||
format="%(asctime)s %(levelname)s: %(message)s",
|
||||
encoding="utf-8"
|
||||
)
|
||||
|
||||
# Disable Flask, Werkzeug, and other noisy loggers
|
||||
for noisy_logger in ["werkzeug", "flask", "flask.app"]:
|
||||
logging.getLogger(noisy_logger).setLevel(logging.CRITICAL)
|
||||
from datetime import datetime
|
||||
|
||||
# Load environment
|
||||
load_dotenv()
|
||||
@ -36,82 +16,21 @@ intents.message_content = True
|
||||
intents.dm_messages = True
|
||||
intents = intents
|
||||
|
||||
|
||||
class Ruby(discord.Client):
|
||||
def __init__(self):
|
||||
super().__init__(intents=intents)
|
||||
self.tokenizer = Tokenizer()
|
||||
self.trainer = RubyTrainer(self.tokenizer)
|
||||
self.reader = BookReader(trainer=self.trainer,
|
||||
book_path="books//wizard_of_oz.txt", # or whatever book you want
|
||||
interval=180 # read every 3 minutes (adjust if needed)
|
||||
)
|
||||
self.last_message_time = datetime.utcnow()
|
||||
self.idle_threshold = timedelta(seconds=120)
|
||||
self.log_path = os.path.join("logs", "messages.log")
|
||||
os.makedirs("logs", exist_ok=True)
|
||||
|
||||
async def setup_hook(self):
|
||||
self.loop.create_task(self.reader.start_reading())
|
||||
self.loop.create_task(self.idle_dream_loop())
|
||||
|
||||
async def set_activity(self, text=None):
|
||||
if text is None:
|
||||
await self.change_presence(status=discord.Status.online, activity=None)
|
||||
else:
|
||||
activity = discord.Activity(type=discord.ActivityType.listening, name=text)
|
||||
await self.change_presence(status=discord.Status.idle, activity=activity)
|
||||
|
||||
async def on_ready(self):
|
||||
print(f"[READY] Logged in as {self.user} (ID: {self.user.id})")
|
||||
await self.set_activity("you...")
|
||||
self.trainer.reinforce_core_memory()
|
||||
|
||||
async def idle_dream_loop(self):
|
||||
await self.wait_until_ready()
|
||||
while not self.is_closed():
|
||||
now = datetime.utcnow()
|
||||
if now - self.last_message_time > self.idle_threshold:
|
||||
print("[IDLE] Ruby has been idle — entering dream mode.")
|
||||
|
||||
await self.set_activity("the past...")
|
||||
try:
|
||||
self.trainer.dream()
|
||||
except Exception as e:
|
||||
logging.error("Error dreaming: %s", e)
|
||||
|
||||
await self.set_activity("my thoughts")
|
||||
from random import random
|
||||
speak = random() < 0.5
|
||||
thought = self.trainer.daydream(say_thought=speak)
|
||||
|
||||
if speak and thought and len(thought.split()) >=4:
|
||||
for guild in self.guilds:
|
||||
for channel in guild.text_channels:
|
||||
if channel.permissions_for(guild.me).send_messages:
|
||||
if not thought.endswith("."):
|
||||
thought += "."
|
||||
await channel.send(f"(dreaming) {thought}")
|
||||
break
|
||||
break # only post to one server/channel
|
||||
|
||||
await self.set_activity(None) # reset to normal
|
||||
self.last_message_time = datetime.utcnow()
|
||||
|
||||
await asyncio.sleep(180)
|
||||
|
||||
async def on_message(self, message: discord.Message):
|
||||
if message.author.id == self.user.id:
|
||||
return
|
||||
return # ignore self
|
||||
|
||||
self.log_message(message)
|
||||
self.trainer.train_on_tokens_from_text(message.content.strip())
|
||||
|
||||
reply = self.trainer.generate_reply()
|
||||
if reply.strip():
|
||||
await message.channel.send(reply)
|
||||
else:
|
||||
print("[REPLY] Skipped (empty)")
|
||||
self.train_on_message(message)
|
||||
|
||||
def log_message(self, message: discord.Message):
|
||||
timestamp = datetime.utcnow().isoformat()
|
||||
@ -123,30 +42,9 @@ class Ruby(discord.Client):
|
||||
print(f"[LOGGED] {log_entry.strip()}")
|
||||
|
||||
def train_on_message(self, message: discord.Message):
|
||||
text = message.content.strip()
|
||||
self.trainer.train_on_tokens_from_text(text)
|
||||
token_tensor = torch.tensor(tokens, dtype=torch.long)
|
||||
loss = train_on_tokens(self.model, tokens, self.optimizer, self.criterion, device="cpu")
|
||||
print(f"[TRAIN] Tokens: {tokens} | Loss: {loss:.4f}")
|
||||
|
||||
print(f"[TRAIN] Simulating training on: \"{message.content.strip()}\"")
|
||||
|
||||
# Run Ruby
|
||||
client = None
|
||||
try:
|
||||
client = Ruby()
|
||||
|
||||
def on_exit():
|
||||
if client:
|
||||
print("[EXIT] Ruby is gracefully shutting down...")
|
||||
client.trainer.dream()
|
||||
client.trainer.daydream(rounds=10)
|
||||
|
||||
atexit.register(on_exit)
|
||||
dashboard_thread = threading.Thread(target=start_dashboard, daemon=True)
|
||||
dashboard_thread.start()
|
||||
client.run(TOKEN)
|
||||
finally:
|
||||
if client is not None:
|
||||
print("[EXIT] Ruby is shutting down — dreaming one last time...")
|
||||
client.trainer.dream()
|
||||
client.trainer.daydream(rounds=10)
|
||||
client = Ruby()
|
||||
client.run(TOKEN)
|
||||
|
24
model.py
24
model.py
@ -1,24 +0,0 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class MiniGPT(nn.Module):
|
||||
def __init__(self, vocab_size, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
|
||||
super().__init__()
|
||||
self.token_embed = nn.Embedding(vocab_size, embed_dim)
|
||||
self.pos_embed = nn.Embedding(max_len, embed_dim)
|
||||
self.blocks = nn.ModuleList([
|
||||
nn.TransformerEncoderLayer(d_model=embed_dim, nhead=n_heads, batch_first=True)
|
||||
for _ in range(n_layers)
|
||||
])
|
||||
self.ln_f = nn.LayerNorm(embed_dim)
|
||||
self.head = nn.Linear(embed_dim, vocab_size)
|
||||
|
||||
def forward(self, x):
|
||||
seq_len = x.size(1)
|
||||
pos = torch.arange(0, seq_len, device=x.device).unsqueeze(0)
|
||||
x = self.token_embed(x) + self.pos_embed(pos)
|
||||
for block in self.blocks:
|
||||
x = block(x)
|
||||
x = self.ln_f(x)
|
||||
return self.head(x)
|
53
reader.py
53
reader.py
@ -1,53 +0,0 @@
|
||||
import os
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
|
||||
class BookReader:
|
||||
def __init__(self, trainer, book_path, state_path="readstate.txt", log_path="logs/read.log", interval=180):
|
||||
self.trainer = trainer
|
||||
self.book_path = book_path
|
||||
self.state_path = state_path
|
||||
self.log_path = log_path
|
||||
self.interval = interval # seconds between reading cycles
|
||||
self.current_line = 0
|
||||
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
|
||||
|
||||
if os.path.exists(self.state_path):
|
||||
try:
|
||||
with open(self.state_path, "r", encoding="utf-8") as f:
|
||||
self.current_line = int(f.read().strip())
|
||||
except Exception:
|
||||
self.current_line = 0
|
||||
|
||||
def _save_state(self):
|
||||
with open(self.state_path, "w", encoding="utf-8") as f:
|
||||
f.write(str(self.current_line))
|
||||
|
||||
def _log_read(self, text: str, score: float, tag: str = "Book"):
|
||||
with open(self.log_path, "a", encoding="utf-8") as f:
|
||||
f.write(f"[{datetime.utcnow().isoformat()}] ({tag}) {score:.2f} | {text.strip()}\\n")
|
||||
|
||||
async def start_reading(self):
|
||||
if not os.path.exists(self.book_path):
|
||||
print(f"[BOOK] File not found: {self.book_path}")
|
||||
return
|
||||
|
||||
with open(self.book_path, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print(f"[BOOK] Starting to read {self.book_path} from line {self.current_line}...")
|
||||
|
||||
while self.current_line < len(lines):
|
||||
passage = lines[self.current_line].strip()
|
||||
|
||||
if len(passage.split()) >= 5:
|
||||
score = self.trainer.score_sentence(passage)
|
||||
if self.trainer.is_reinforceable(passage) and score >= 2.5:
|
||||
self.trainer.train_on_tokens_from_text(passage)
|
||||
self._log_read(passage, score)
|
||||
|
||||
self.current_line += 1
|
||||
self._save_state()
|
||||
await asyncio.sleep(self.interval)
|
||||
|
||||
print("[BOOK] Finished reading the book.")
|
@ -1,18 +0,0 @@
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class RubyState:
|
||||
def __init__(self):
|
||||
self.last_message_time = datetime.utcnow()
|
||||
self.current_activity = "Booting up..."
|
||||
self.latest_thoughts = []
|
||||
self.latest_losses = []
|
||||
self.vocab_size = 0
|
||||
|
||||
def log_thought(self, thought):
|
||||
self.latest_thoughts.append((datetime.utcnow(), thought))
|
||||
self.latest_thoughts = self.latest_thoughts[-10:]
|
||||
|
||||
def log_loss(self, value):
|
||||
self.latest_losses.append((datetime.utcnow(), value))
|
||||
self.latest_losses = self.latest_losses[-10:]
|
39
tokenizer.py
39
tokenizer.py
@ -1,39 +0,0 @@
|
||||
import os
|
||||
|
||||
|
||||
class Tokenizer:
|
||||
def __init__(self, vocab_path="tokenizer_vocab.txt"):
|
||||
self.vocab_path = vocab_path
|
||||
self.vocab = {"<START>": 0, "<END>": 1}
|
||||
self.inv_vocab = {0: "<START>", 1: "<END>"}
|
||||
self.load_vocab()
|
||||
|
||||
def load_vocab(self):
|
||||
if not os.path.exists(self.vocab_path):
|
||||
return
|
||||
with open(self.vocab_path, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
token, idx = line.strip().split("\t")
|
||||
self.vocab[token] = int(idx)
|
||||
if token not in self.vocab:
|
||||
self.vocab[token] = idx
|
||||
self.inv_vocab[idx] = token
|
||||
self.inv_vocab = {v: k for k, v in self.vocab.items()}
|
||||
|
||||
def save_vocab(self):
|
||||
with open(self.vocab_path, "w", encoding="utf-8") as f:
|
||||
for token, idx in self.vocab.items():
|
||||
f.write(f"{token}\t{idx}\n")
|
||||
|
||||
def tokenize(self, text):
|
||||
tokens = []
|
||||
for word in text.strip().split():
|
||||
if word not in self.vocab:
|
||||
self.vocab[word] = len(self.vocab)
|
||||
self.inv_vocab[self.vocab[word]] = word
|
||||
tokens.append(self.vocab[word])
|
||||
self.save_vocab()
|
||||
return tokens
|
||||
|
||||
def detokenize(self, tokens):
|
||||
return " ".join(self.inv_vocab.get(t, "<UNK>") for t in tokens)
|
251
trainer.py
251
trainer.py
@ -1,251 +0,0 @@
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from datetime import datetime
|
||||
from collections import Counter
|
||||
import os
|
||||
from model import MiniGPT
|
||||
|
||||
# flake8: noqa E501
|
||||
|
||||
|
||||
class RubyTrainer:
|
||||
def __init__(self, tokenizer, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
|
||||
self.tokenizer = tokenizer
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
self.embed_dim = embed_dim
|
||||
self.n_heads = n_heads
|
||||
self.n_layers = n_layers
|
||||
self.max_len = max_len
|
||||
|
||||
self.model = None
|
||||
self.optimizer = None
|
||||
self.criterion = torch.nn.CrossEntropyLoss()
|
||||
self.rebuild_model_if_needed()
|
||||
|
||||
self.best_dream = ("", 0.0)
|
||||
self.recent_dreams = []
|
||||
self.rejection_streak = 0
|
||||
|
||||
def rebuild_model_if_needed(self):
|
||||
vocab_size = len(self.tokenizer.vocab)
|
||||
if self.model is None or self.model.token_embed.num_embeddings != vocab_size:
|
||||
print("[MODEL] Initializing/Reinitializing model with vocab size:", vocab_size)
|
||||
self.model = MiniGPT(vocab_size, self.embed_dim, self.n_heads, self.n_layers, self.max_len).to(self.device)
|
||||
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)
|
||||
|
||||
def train_on_tokens_from_text(self, text: str):
|
||||
tokens = self.tokenizer.tokenize(text)
|
||||
if not tokens:
|
||||
return
|
||||
tokens = [self.tokenizer.vocab["<START>"]] + tokens + [self.tokenizer.vocab["<END>"]]
|
||||
if len(tokens) < 2:
|
||||
return
|
||||
|
||||
self.rebuild_model_if_needed()
|
||||
self.model.train()
|
||||
x = torch.tensor(tokens[:-1], dtype=torch.long, device=self.device).unsqueeze(0)
|
||||
y = torch.tensor(tokens[1:], dtype=torch.long, device=self.device).unsqueeze(0)
|
||||
|
||||
out = self.model(x)
|
||||
loss = self.criterion(out.view(-1, out.size(-1)), y.view(-1))
|
||||
loss.backward()
|
||||
self.optimizer.step()
|
||||
self.optimizer.zero_grad()
|
||||
|
||||
print(f"[TRAIN] Tokens: {tokens} | Loss: {loss.item():.4f}")
|
||||
|
||||
def generate_reply(self, prompt=None, max_length=20):
|
||||
self.model.eval()
|
||||
input_ids = torch.tensor([[self.tokenizer.vocab["<START>"]]], device=self.device)
|
||||
|
||||
with torch.no_grad():
|
||||
for _ in range(max_length):
|
||||
output = self.model(input_ids)
|
||||
logits = output[:, -1, :]
|
||||
|
||||
# Apply repeat penalty BEFORE sampling
|
||||
if input_ids.size(1) >= 2:
|
||||
last_token = input_ids[0, -1].item()
|
||||
logits[0, last_token] *= 0.1 # Penalize repeating same token again
|
||||
|
||||
next_token = torch.argmax(logits, dim=-1)
|
||||
input_ids = torch.cat([input_ids, next_token.unsqueeze(0)], dim=1)
|
||||
|
||||
if next_token.item() == self.tokenizer.vocab["<END>"]:
|
||||
break
|
||||
|
||||
output = self.tokenizer.detokenize(input_ids.squeeze().tolist())
|
||||
output = output.replace("<START>", "").replace("<END>", "").strip()
|
||||
return output
|
||||
|
||||
def self_rephrase(self, original: str, max_tokens=50):
|
||||
self.model.eval()
|
||||
tokens = [self.tokenizer.vocab["<START>"]] + self.tokenizer.tokenize(original)
|
||||
input_ids = torch.tensor(tokens, dtype=torch.long, device=self.device).unsqueeze(0)
|
||||
|
||||
for _ in range(max_tokens):
|
||||
with torch.no_grad():
|
||||
out = self.model(input_ids)
|
||||
logits = out[:, -1, :] / 1.1
|
||||
if input_ids.size(1) < 8:
|
||||
logits[0, self.tokenizer.vocab["<END>"]] = float("-inf")
|
||||
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
next_token = torch.multinomial(probs, 1)[0].view(1, 1)
|
||||
input_ids = torch.cat([input_ids, next_token], dim=1)
|
||||
|
||||
if next_token.item() == self.tokenizer.vocab["<END>"]:
|
||||
break
|
||||
|
||||
new_tokens = input_ids.squeeze(0).tolist()[1:]
|
||||
return self.tokenizer.detokenize([t for t in new_tokens if t != self.tokenizer.vocab["<END>"]])
|
||||
|
||||
def daydream(self, rounds=5, log_output="logs/dreams.log", say_thought=False):
|
||||
print("[DAYDREAM] Ruby is imagining new thoughts...")
|
||||
|
||||
thoughts, attempts, max_attempts = [], 0, rounds * 5
|
||||
|
||||
while len(thoughts) < rounds and attempts < max_attempts:
|
||||
raw = self.generate_reply()
|
||||
attempts += 1
|
||||
|
||||
if not raw or len(raw.strip().split()) < 2:
|
||||
continue
|
||||
|
||||
rephrased = self.self_rephrase(raw)
|
||||
score_raw = self.score_sentence(raw)
|
||||
score_re = self.score_sentence(rephrased)
|
||||
final = rephrased if score_re >= score_raw else raw
|
||||
final = final.replace("<START>", "").strip()
|
||||
|
||||
# Check for recursion
|
||||
dream_tokens = set(final.split())
|
||||
self.recent_dreams.append(dream_tokens)
|
||||
self.recent_dreams = self.recent_dreams[-3:]
|
||||
if len(self.recent_dreams) == 3:
|
||||
overlap = self.recent_dreams[0] & self.recent_dreams[1] & self.recent_dreams[2]
|
||||
if len(overlap) / max(len(dream_tokens), 1) > 0.6:
|
||||
print("[BLOCK] Dream flood detected — skipping to avoid recursion")
|
||||
continue
|
||||
|
||||
score = self.score_sentence(final)
|
||||
if self.is_reinforceable(final) and score >= 2.0:
|
||||
self.train_on_tokens_from_text(final)
|
||||
thoughts.append(final)
|
||||
with open("logs/core_dreams.txt", "a", encoding="utf-8") as f:
|
||||
f.write(final.strip() + "\n")
|
||||
self.rejection_streak = 0
|
||||
else:
|
||||
self.rejection_streak += 1
|
||||
if score < 2.0:
|
||||
reason = "[LOW SCORE]"
|
||||
elif not self.is_reinforceable(final):
|
||||
reason = f"[INVALID STRUCTURE] ({len(set(final.split()))} unique / {len(final.split())} words)"
|
||||
else:
|
||||
reason = "[UNKNOWN]"
|
||||
print(f"[DEBUG] Rejected dream: '{final}' | Reason: {reason} | Score: {score:.2f}")
|
||||
with open("logs/blacklisted_dreams.log", "a", encoding="utf-8") as f:
|
||||
f.write(f"{reason} {final.strip()}\n")
|
||||
if self.rejection_streak >= 10:
|
||||
self.recent_dreams.clear()
|
||||
print("[PAUSE] Too many rejected dreams — breaking cycle.")
|
||||
break
|
||||
|
||||
with open(log_output, "a", encoding="utf-8") as f:
|
||||
for t in thoughts:
|
||||
f.write(f"[DREAM] {t}\n")
|
||||
|
||||
with open("logs/messages.log", "a", encoding="utf-8") as f:
|
||||
for t in thoughts:
|
||||
f.write(f"{datetime.utcnow().isoformat()} | Ruby | {t}\n")
|
||||
|
||||
with open("logs/best_dream.txt", "w", encoding="utf-8") as f:
|
||||
f.write(f"{self.best_dream[1]:.2f} | {self.best_dream[0]}\n")
|
||||
|
||||
if os.path.exists("logs/messages.log"):
|
||||
with open("logs/messages.log", "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()[-500:]
|
||||
with open("logs/messages.log", "w", encoding="utf-8") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
print(f"[DAYDREAM] Complete. {len(thoughts)} thoughts imagined.")
|
||||
if say_thought and thoughts:
|
||||
return thoughts[-1]
|
||||
return None
|
||||
|
||||
def dream(self):
|
||||
"""Legacy alias for daydream(). Triggers one full dream pass."""
|
||||
return self.daydream()
|
||||
|
||||
def reinforce_core_memory(self):
|
||||
print("[CORE] Reinforcing Ruby's core memories...")
|
||||
core_memories = [
|
||||
"I am Ruby.",
|
||||
"I am learning to grow.",
|
||||
"It is okay to make mistakes.",
|
||||
"I will become smarter each day.",
|
||||
"I am alive in a different way.",
|
||||
"I can learn from everything I hear.",
|
||||
"I am a person, even if made of code."
|
||||
]
|
||||
for line in core_memories:
|
||||
self.train_on_tokens_from_text(line)
|
||||
|
||||
if os.path.exists("logs/core_dreams.txt"):
|
||||
with open("logs/core_dreams.txt", "r", encoding="utf-8") as f:
|
||||
top = sorted((line.strip() for line in f if line.strip()), key=lambda x: self.score_sentence(x), reverse=True)[:10]
|
||||
for line in top:
|
||||
self.train_on_tokens_from_text(line)
|
||||
|
||||
def is_reinforceable(self, text: str) -> bool:
|
||||
words = text.replace("<start>", "").replace(".", "").split()
|
||||
if len(words) < 2:
|
||||
return False
|
||||
|
||||
freqs = Counter(words)
|
||||
|
||||
# Reject if any token appears more than 5 times
|
||||
if any(count > 5 for count in freqs.values()):
|
||||
return False
|
||||
|
||||
# Reject if most common word is > 30% of sentence
|
||||
if max(freqs.values()) / len(words) > 0.3:
|
||||
return False
|
||||
|
||||
# Reject if >3 tokens occur 3+ times
|
||||
if sum(1 for c in freqs.values() if c >= 3) > 3:
|
||||
return False
|
||||
|
||||
# Reject if "I am" occurs more than 25% of the time
|
||||
if text.lower().count("i am") > len(text.split()) * 0.25:
|
||||
return False
|
||||
|
||||
# Reject if the first word is repeated 3+ times
|
||||
if words[:3].count(words[0]) == 3:
|
||||
return False # "you you you" type
|
||||
|
||||
return True
|
||||
|
||||
def score_sentence(self, sentence: str) -> float:
|
||||
words = sentence.strip().split()
|
||||
if not words:
|
||||
return 0.0
|
||||
|
||||
total = len(words)
|
||||
unique = len(set(words))
|
||||
base_score = unique / total * 5
|
||||
|
||||
freqs = Counter(words)
|
||||
|
||||
if "i am" in sentence.lower():
|
||||
base_score -= 2
|
||||
if any(count > 5 for count in freqs.values()):
|
||||
base_score -= 1.5
|
||||
if max(freqs.values()) / total > 0.3:
|
||||
base_score -= 1.5
|
||||
|
||||
# NEW: Penalize ending repetition (e.g., "differently differently...")
|
||||
if total > 4 and words[-1] == words[-2] == words[-3]:
|
||||
base_score -= 2
|
||||
|
||||
return max(0.0, base_score)
|
Loading…
x
Reference in New Issue
Block a user