added contextual awareness
This commit is contained in:
parent
699a21ad84
commit
2cf713ca97
@ -0,0 +1,34 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
CONTEXT_FILE = "data/memory/context.json"
|
||||
MAX_MEMORY = 100
|
||||
|
||||
|
||||
def load_context() -> List[dict]:
|
||||
if os.path.exists(CONTEXT_FILE):
|
||||
with open(CONTEXT_FILE, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
return []
|
||||
|
||||
|
||||
def save_context(mem: List[dict]):
|
||||
with open(CONTEXT_FILE, "w", encoding="utf-8") as f:
|
||||
json.dump(mem[-MAX_MEMORY:], f, indent=2)
|
||||
|
||||
|
||||
def add_to_context(text: str, source: str = "user"):
|
||||
mem = load_context()
|
||||
mem.append({
|
||||
"timestamp": time.time(),
|
||||
"source": source,
|
||||
"text": text
|
||||
})
|
||||
save_context(mem)
|
||||
|
||||
|
||||
def get_recent_context(n: int = 5) -> List[str]:
|
||||
mem = load_context()
|
||||
return [entry["text"] for entry in mem[-n:]]
|
@ -1,6 +1,7 @@
|
||||
from flask import Flask, render_template
|
||||
from model.memory import load_dreams
|
||||
from model.tokenizer import Tokenizer
|
||||
from context.context import load_context
|
||||
import threading
|
||||
|
||||
|
||||
@ -12,9 +13,11 @@ tokenizer = Tokenizer()
|
||||
def index():
|
||||
dreams = load_dreams()
|
||||
top_dreams = dreams[:5]
|
||||
memory_size = len(load_context())
|
||||
return render_template("index.html",
|
||||
vocab_size=len(tokenizer.vocab),
|
||||
top_dreams=top_dreams)
|
||||
top_dreams=top_dreams,
|
||||
memory_size=memory_size)
|
||||
|
||||
|
||||
def run_dashboard():
|
||||
|
@ -6,6 +6,7 @@
|
||||
<body>
|
||||
<h1>Ruby is running</h1>
|
||||
<p><strong>Vocabulary Size:</strong> {{ vocab_size }}</p>
|
||||
<p><strong>Memory Entries:</strong> {{ memory_size }}</p>
|
||||
|
||||
<h2>🏆 Highest Scoring Dreams</h2>
|
||||
<ul>
|
||||
|
@ -3,6 +3,7 @@ import torch.nn as nn
|
||||
import random
|
||||
import time
|
||||
from model.brain import model, tokenizer, DEVICE, optimizer, loss_fn, daydream
|
||||
from context.context import get_recent_context, add_to_context
|
||||
|
||||
_last_thought = time.time()
|
||||
|
||||
@ -10,7 +11,10 @@ _last_thought = time.time()
|
||||
def train_on_message(text: str):
|
||||
global _last_thought
|
||||
model.train()
|
||||
tokens = tokenizer.tokenize(text)
|
||||
context_texts = get_recent_context(3)
|
||||
augmented_text = " ".join(context_texts + [text])
|
||||
tokens = tokenizer.tokenize(augmented_text)
|
||||
|
||||
if len(tokens) < 2:
|
||||
return
|
||||
|
||||
@ -24,7 +28,8 @@ def train_on_message(text: str):
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Idle dreaming every 15 seconds
|
||||
add_to_context(text)
|
||||
|
||||
now = time.time()
|
||||
if now - _last_thought > 15:
|
||||
for _ in range(3):
|
||||
|
Loading…
x
Reference in New Issue
Block a user