# body.py # flake8: noqa import os import glob import json import threading import asyncio from collections import deque from datetime import datetime, time, timedelta import logging import requests import discord from nervous_system import NervousSystem from persona import Persona from life_log import LifeLog from plugin_manager import PluginManager import dashboard import brain_map # ─── Config & Paths ──────────────────────────────────────────────────────────── VOCAB_PATH = "vocab.json" PROGRESS_PATH = "progress.json" PERSONA_PATH = "persona.json" LIFELOG_PATH = "life_log.json" OPENWEATHER_KEY = os.getenv("OPENWEATHER_API_KEY") WEATHER_CITY = os.getenv("WEATHER_CITY", "New York") WEATHER_HOUR = 6 # daily at 6 AM # ─── Discord & System Setup ──────────────────────────────────────────────────── intents = discord.Intents.default() intents.message_content = True client = discord.Client(intents=intents) system = NervousSystem() persona = Persona(path=PERSONA_PATH) life_log = LifeLog(path=LIFELOG_PATH) plugins = PluginManager() # keep last 6 turns for conversational context system.history = deque(maxlen=6) # load/resize vocab system.sensory.load_vocab(VOCAB_PATH) system._resize_embeddings() # resume book‐training progress (per-book) if os.path.isfile(PROGRESS_PATH): with open(PROGRESS_PATH, "r", encoding="utf-8") as f: prog = json.load(f) current_book = prog.get("current_book", 0) line_offset = prog.get("line_offset", 0) else: current_book = 0 line_offset = 0 book_list = sorted(glob.glob("books/*.txt")) total_books = len(book_list) # count lines per book book_line_counts = [] for path in book_list: cnt = sum(1 for line in open(path, encoding="utf-8") if line.strip()) book_line_counts.append(cnt) # set up overall progress counters for the dashboard system.total_lines = sum(book_line_counts) lines_done = sum(book_line_counts[:current_book]) + line_offset system.processed_lines = lines_done print( f"Resuming training: book {current_book+1}/{total_books}, " f"line {line_offset}/{book_line_counts[current_book] if current_book < total_books else 0}\n" f"Overall progress: {system.processed_lines}/{system.total_lines} lines" ) # inject for dashboard routes dashboard.system = system brain_map.system = system print( f"Loaded vocab {len(system.sensory.stoi)}, " f"resuming at book {current_book+1}/{total_books}, " f"line {line_offset}/{book_line_counts[current_book] if current_book < total_books else 0}" ) def refine_diary_entry(raw: str) -> str: prompt = ( "Here is a rough diary draft. Rewrite it as a clear, first-person " "diary entry in 2–3 sentences:\n\n" f"Draft:\n{raw}\n\nRefined entry:" ) return system.generate(prompt, max_len=100, temperature=0.7, top_p=0.9) # ─── Seed Book-Title Diary Entries ───────────────────────────────────────────── for path in book_list: title = os.path.splitext(os.path.basename(path))[0] fact = f"I just finished reading “{title}.”" system.train("", fact) draft = system.generate( f"Diary prompt: Stream-of-thought draft about reading “{title}.”", max_len=100, temperature=0.9, top_p=0.95 ) entry = refine_diary_entry(draft) life_log.add(entry) # ─── Idle Book-Training Task (per-book) ──────────────────────────────────────── async def train_books_idle(): global current_book, line_offset await client.wait_until_ready() await asyncio.sleep(5) # process one book at a time while current_book < total_books: path = book_list[current_book] processed = 0 cnt = book_line_counts[current_book] with open(path, encoding="utf-8") as f: for raw in f: text = raw.strip() if not text: continue if processed < line_offset: processed += 1 continue # train on this line await asyncio.to_thread(system.train, text, text) processed += 1 line_offset = processed # checkpoint every 200 lines or at end if processed % 200 == 0 or processed == cnt: system.sensory.save_vocab(VOCAB_PATH) with open(PROGRESS_PATH, "w", encoding="utf-8") as pf: json.dump({ "current_book": current_book, "line_offset": line_offset }, pf, indent=2) # finished current book print(f"Finished book {current_book+1}/{total_books}: {path}") # reset for next book current_book += 1 line_offset = 0 # save progress with open(PROGRESS_PATH, "w", encoding="utf-8") as pf: json.dump({ "current_book": current_book, "line_offset": 0 }, pf, indent=2) # optional small break between books await asyncio.sleep(2) # all books done; bootstrap persona await asyncio.to_thread(persona.bootstrap, system) print("All books trained. Persona bootstrapped:", persona.traits) # ─── Idle Weather-Ingestion & Diary Task ─────────────────────────────────────── async def ingest_weather_idle(): await client.wait_until_ready() # sleep until next WEATHER_HOUR now = datetime.now() target = datetime.combine(now.date(), time(WEATHER_HOUR)) if now >= target: target += timedelta(days=1) await asyncio.sleep((target - now).total_seconds()) while True: if OPENWEATHER_KEY: try: url = ( "https://api.openweathermap.org/data/2.5/weather" f"?q={WEATHER_CITY}&units=metric&appid={OPENWEATHER_KEY}" ) data = requests.get(url, timeout=5).json() desc = data["weather"][0]["description"] temp = data["main"]["temp"] fact = f"Current weather: {desc}, {temp:.1f}°C." await asyncio.to_thread(system.train, "", fact) draft = system.generate( f"Diary prompt: Draft about today’s weather: {desc}, {temp:.1f}°C.", max_len=100, temperature=0.9, top_p=0.95 ) entry = refine_diary_entry(draft) life_log.add(entry) print("Journaled weather:", entry) except Exception: pass await asyncio.sleep(24 * 3600) # ─── Idle Self-Reflection Task ───────────────────────────────────────────────── async def reflect_idle(): await client.wait_until_ready() while True: await asyncio.sleep(600) await asyncio.to_thread(persona.bootstrap, system) print("Persona adapted:", persona.traits) @client.event async def on_ready(): print(f"Ruby is online as {client.user}!") asyncio.create_task(train_books_idle()) asyncio.create_task(ingest_weather_idle()) asyncio.create_task(reflect_idle()) @client.event async def on_message(message: discord.Message): if message.author == client.user or not message.content: return user_text = message.content.strip() # 1) Diary: top 5 recent entries = life_log.recent(5) diary_sec = "### Diary Entries\n" for e in reversed(entries): diary_sec += f"- {e}\n" # 2) Persona persona_sec = "\n### Persona\n" + persona.summary() # 3) Conversation convo_sec = "\n### Conversation\n" for turn in system.history: convo_sec += f"User: {turn['user']}\nRuby: {turn['bot']}\n" convo_sec += f"User: {user_text}\nRuby:" prompt = diary_sec + persona_sec + convo_sec reply = system.generate(prompt) await message.channel.send(reply) # record & train system.history.append({"user": user_text, "bot": reply}) asyncio.create_task( asyncio.to_thread(system.train, user_text, reply) ) # ─── Silence Flask/Werkzeug request logs ──────────────────────────────────────── logging.getLogger('werkzeug').setLevel(logging.ERROR) def run_dashboard(): dashboard.app.run( host="0.0.0.0", port=5000, debug=False, use_reloader=False ) threading.Thread(target=run_dashboard, daemon=True).start() dashboard.system = system dashboard.persona = persona dashboard.life_log = life_log dashboard.plugins = plugins dashboard.brain_map = brain_map print("Dashboard available at http://127.0.0.1:5000") token = os.getenv("DISCORD_TOKEN") if not token: raise RuntimeError("Please set DISCORD_TOKEN in env") client.run(token)