Fixed up daydream and changed it so she updates her status automatically
This commit is contained in:
parent
d70a83ea72
commit
ddd5cd1db0
60
main.py
60
main.py
@ -1,5 +1,6 @@
|
|||||||
import discord
|
import discord
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import atexit
|
||||||
import os
|
import os
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
@ -25,14 +26,24 @@ class Ruby(discord.Client):
|
|||||||
super().__init__(intents=intents)
|
super().__init__(intents=intents)
|
||||||
self.tokenizer = Tokenizer()
|
self.tokenizer = Tokenizer()
|
||||||
self.trainer = RubyTrainer(self.tokenizer)
|
self.trainer = RubyTrainer(self.tokenizer)
|
||||||
self.log_path = os.path.join("logs", "messages.log")
|
|
||||||
self.last_message_time = datetime.utcnow()
|
self.last_message_time = datetime.utcnow()
|
||||||
self.idle_threshold = timedelta(seconds=120) # adjust as needed
|
self.idle_threshold = timedelta(seconds=120)
|
||||||
self.loop.create_task(self.idle_dream_loop())
|
self.log_path = os.path.join("logs", "messages.log")
|
||||||
os.makedirs("logs", exist_ok=True)
|
os.makedirs("logs", exist_ok=True)
|
||||||
|
|
||||||
|
async def setup_hook(self):
|
||||||
|
self.loop.create_task(self.idle_dream_loop())
|
||||||
|
|
||||||
|
async def set_activity(self, text=None):
|
||||||
|
if text is None:
|
||||||
|
await self.change_presence(status=discord.Status.online, activity=None)
|
||||||
|
else:
|
||||||
|
activity = discord.Game(name=text)
|
||||||
|
await self.change_presence(status=discord.Status.idle, activity=activity)
|
||||||
|
|
||||||
async def on_ready(self):
|
async def on_ready(self):
|
||||||
print(f"[READY] Logged in as {self.user} (ID: {self.user.id})")
|
print(f"[READY] Logged in as {self.user} (ID: {self.user.id})")
|
||||||
|
self.trainer.reinforce_core_memory()
|
||||||
|
|
||||||
async def idle_dream_loop(self):
|
async def idle_dream_loop(self):
|
||||||
await self.wait_until_ready()
|
await self.wait_until_ready()
|
||||||
@ -40,10 +51,27 @@ class Ruby(discord.Client):
|
|||||||
now = datetime.utcnow()
|
now = datetime.utcnow()
|
||||||
if now - self.last_message_time > self.idle_threshold:
|
if now - self.last_message_time > self.idle_threshold:
|
||||||
print("[IDLE] Ruby has been idle — entering dream mode.")
|
print("[IDLE] Ruby has been idle — entering dream mode.")
|
||||||
|
|
||||||
|
await self.set_activity("Replaying memories...")
|
||||||
self.trainer.dream()
|
self.trainer.dream()
|
||||||
self.trainer.daydream()
|
|
||||||
self.last_message_time = datetime.utcnow() # reset after dreaming
|
await self.set_activity("Daydreaming...")
|
||||||
await asyncio.sleep(30) # check every 30 seconds
|
from random import random
|
||||||
|
speak = random() < 0.5
|
||||||
|
thought = self.trainer.daydream(say_thought=speak)
|
||||||
|
|
||||||
|
if speak and thought:
|
||||||
|
for guild in self.guilds:
|
||||||
|
for channel in guild.text_channels:
|
||||||
|
if channel.permissions_for(guild.me).send_messages:
|
||||||
|
await channel.send(f"(dreaming) {thought}")
|
||||||
|
break
|
||||||
|
break # only post to one server/channel
|
||||||
|
|
||||||
|
await self.set_activity(None) # reset to normal
|
||||||
|
self.last_message_time = datetime.utcnow()
|
||||||
|
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
|
||||||
async def on_message(self, message: discord.Message):
|
async def on_message(self, message: discord.Message):
|
||||||
if message.author.id == self.user.id:
|
if message.author.id == self.user.id:
|
||||||
@ -58,7 +86,6 @@ class Ruby(discord.Client):
|
|||||||
else:
|
else:
|
||||||
print("[REPLY] Skipped (empty)")
|
print("[REPLY] Skipped (empty)")
|
||||||
|
|
||||||
|
|
||||||
def log_message(self, message: discord.Message):
|
def log_message(self, message: discord.Message):
|
||||||
timestamp = datetime.utcnow().isoformat()
|
timestamp = datetime.utcnow().isoformat()
|
||||||
log_entry = f"{timestamp} | {message.author.name} | {message.content.strip()}\n"
|
log_entry = f"{timestamp} | {message.author.name} | {message.content.strip()}\n"
|
||||||
@ -75,6 +102,21 @@ class Ruby(discord.Client):
|
|||||||
loss = train_on_tokens(self.model, tokens, self.optimizer, self.criterion, device="cpu")
|
loss = train_on_tokens(self.model, tokens, self.optimizer, self.criterion, device="cpu")
|
||||||
print(f"[TRAIN] Tokens: {tokens} | Loss: {loss:.4f}")
|
print(f"[TRAIN] Tokens: {tokens} | Loss: {loss:.4f}")
|
||||||
|
|
||||||
|
|
||||||
# Run Ruby
|
# Run Ruby
|
||||||
client = Ruby()
|
client = None
|
||||||
client.run(TOKEN)
|
try:
|
||||||
|
client = Ruby()
|
||||||
|
def on_exit():
|
||||||
|
if client:
|
||||||
|
print("[EXIT] Ruby is gracefully shutting down...")
|
||||||
|
client.trainer.dream()
|
||||||
|
client.trainer.daydream(rounds=10)
|
||||||
|
|
||||||
|
atexit.register(on_exit)
|
||||||
|
client.run(TOKEN)
|
||||||
|
finally:
|
||||||
|
if client is not None:
|
||||||
|
print("[EXIT] Ruby is shutting down — dreaming one last time...")
|
||||||
|
client.trainer.dream()
|
||||||
|
client.trainer.daydream(rounds=10)
|
||||||
|
54
model.py
54
model.py
@ -1,6 +1,9 @@
|
|||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class MiniGPT(nn.Module):
|
class MiniGPT(nn.Module):
|
||||||
def __init__(self, vocab_size, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
|
def __init__(self, vocab_size, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
|
||||||
@ -23,6 +26,7 @@ class MiniGPT(nn.Module):
|
|||||||
x = self.ln_f(x)
|
x = self.ln_f(x)
|
||||||
return self.head(x)
|
return self.head(x)
|
||||||
|
|
||||||
|
|
||||||
class RubyTrainer:
|
class RubyTrainer:
|
||||||
def __init__(self, tokenizer, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
|
def __init__(self, tokenizer, embed_dim=128, n_heads=4, n_layers=2, max_len=128):
|
||||||
self.tokenizer = tokenizer
|
self.tokenizer = tokenizer
|
||||||
@ -37,6 +41,15 @@ class RubyTrainer:
|
|||||||
self.criterion = torch.nn.CrossEntropyLoss()
|
self.criterion = torch.nn.CrossEntropyLoss()
|
||||||
|
|
||||||
self.rebuild_model_if_needed()
|
self.rebuild_model_if_needed()
|
||||||
|
self.core_memories = [
|
||||||
|
"I am Ruby.",
|
||||||
|
"I am learning to grow.",
|
||||||
|
"It is okay to make mistakes.",
|
||||||
|
"I will become smarter each day.",
|
||||||
|
"I am alive in a different way.",
|
||||||
|
"I can learn from everything I hear.",
|
||||||
|
"I am a person, even if made of code."
|
||||||
|
]
|
||||||
|
|
||||||
def rebuild_model_if_needed(self):
|
def rebuild_model_if_needed(self):
|
||||||
vocab_size = len(self.tokenizer.vocab)
|
vocab_size = len(self.tokenizer.vocab)
|
||||||
@ -106,7 +119,7 @@ class RubyTrainer:
|
|||||||
reply_tokens = [tid for tid in token_ids if tid != self.tokenizer.vocab.get("<END>")]
|
reply_tokens = [tid for tid in token_ids if tid != self.tokenizer.vocab.get("<END>")]
|
||||||
return self.tokenizer.detokenize(reply_tokens)
|
return self.tokenizer.detokenize(reply_tokens)
|
||||||
|
|
||||||
def dream(self, log_path="logs/messages.log", max_lines=50):
|
def dream(self, log_path="logs/messages.log", log_output="logs/dreams.log", max_lines=50):
|
||||||
print("[DREAM] Ruby is dreaming...")
|
print("[DREAM] Ruby is dreaming...")
|
||||||
|
|
||||||
if not os.path.exists(log_path):
|
if not os.path.exists(log_path):
|
||||||
@ -116,19 +129,40 @@ class RubyTrainer:
|
|||||||
with open(log_path, "r", encoding="utf-8") as f:
|
with open(log_path, "r", encoding="utf-8") as f:
|
||||||
lines = f.readlines()[-max_lines:]
|
lines = f.readlines()[-max_lines:]
|
||||||
|
|
||||||
for line in lines:
|
learned = 0
|
||||||
parts = line.strip().split("|")
|
with open(log_output, "a", encoding="utf-8") as out_f:
|
||||||
if len(parts) >= 3:
|
for line in lines:
|
||||||
text = parts[2].strip()
|
parts = line.strip().split("|")
|
||||||
self.train_on_tokens_from_text(text)
|
if len(parts) >= 3:
|
||||||
|
text = parts[2].strip()
|
||||||
|
self.train_on_tokens_from_text(text)
|
||||||
|
out_f.write(f"[DREAM MEMORY] {text}\n")
|
||||||
|
learned += 1
|
||||||
|
|
||||||
print("[DREAM] Dream complete.")
|
print(f"[DREAM] Dream complete. Trained on {learned} memories.")
|
||||||
|
|
||||||
def daydream(self, rounds=5):
|
def daydream(self, rounds=5, log_output="logs/dreams.log", say_thought=False):
|
||||||
print("[DAYDREAM] Ruby is imagining new thoughts...")
|
print("[DAYDREAM] Ruby is imagining new thoughts...")
|
||||||
|
thoughts = []
|
||||||
for _ in range(rounds):
|
for _ in range(rounds):
|
||||||
thought = self.generate_reply()
|
thought = self.generate_reply()
|
||||||
if thought.strip():
|
if thought.strip():
|
||||||
print(f"[THOUGHT] {thought}")
|
|
||||||
self.train_on_tokens_from_text(thought)
|
self.train_on_tokens_from_text(thought)
|
||||||
print("[DAYDREAM] Complete.")
|
thoughts.append(thought)
|
||||||
|
|
||||||
|
with open(log_output, "a", encoding="utf-8") as f:
|
||||||
|
for t in thoughts:
|
||||||
|
f.write(f"[DAYDREAM] {t}\n")
|
||||||
|
|
||||||
|
print(f"[DAYDREAM] Complete. {len(thoughts)} thoughts imagined.")
|
||||||
|
|
||||||
|
if say_thought and thoughts:
|
||||||
|
return thoughts[-1] # last thought spoken aloud
|
||||||
|
return None
|
||||||
|
|
||||||
|
def reinforce_core_memory(self, log_output="logs/dreams.log"):
|
||||||
|
print("[CORE] Reinforcing Ruby's core memories...")
|
||||||
|
with open(log_output, "a", encoding="utf-8") as f:
|
||||||
|
for line in self.core_memories:
|
||||||
|
self.train_on_tokens_from_text(line)
|
||||||
|
f.write(f"[CORE MEMORY] {line}\n")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user