Updated to dream
This commit is contained in:
parent
a069e9b7dd
commit
699a21ad84
@ -1,13 +1,20 @@
|
||||
from flask import Flask, render_template
|
||||
from model.memory import load_dreams
|
||||
from model.tokenizer import Tokenizer
|
||||
import threading
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
tokenizer = Tokenizer()
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def index():
|
||||
return render_template("index.html")
|
||||
dreams = load_dreams()
|
||||
top_dreams = dreams[:5]
|
||||
return render_template("index.html",
|
||||
vocab_size=len(tokenizer.vocab),
|
||||
top_dreams=top_dreams)
|
||||
|
||||
|
||||
def run_dashboard():
|
||||
|
@ -5,6 +5,13 @@
|
||||
</head>
|
||||
<body>
|
||||
<h1>Ruby is running</h1>
|
||||
<p>Vocabulary Size: {{ vocab_size }}</p>
|
||||
<p><strong>Vocabulary Size:</strong> {{ vocab_size }}</p>
|
||||
|
||||
<h2>🏆 Highest Scoring Dreams</h2>
|
||||
<ul>
|
||||
{% for dream in top_dreams %}
|
||||
<li><strong>{{ dream.score }}</strong> | {{ dream.sentence }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -2,6 +2,11 @@ import torch
|
||||
import torch.nn as nn
|
||||
import random
|
||||
from model.tokenizer import Tokenizer
|
||||
import torch.nn.functional as F
|
||||
from model.memory import save_dream
|
||||
import time
|
||||
|
||||
recent_dreams = []
|
||||
|
||||
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
tokenizer = Tokenizer()
|
||||
@ -34,3 +39,35 @@ def generate_response():
|
||||
if not isinstance(pred, list):
|
||||
pred = [pred]
|
||||
return tokenizer.detokenize(pred)
|
||||
|
||||
|
||||
def score_sentence(sentence: str) -> float:
|
||||
words = sentence.strip().split()
|
||||
length = len(words)
|
||||
diversity = len(set(words)) / (length + 1)
|
||||
if length < 4:
|
||||
return 0.0
|
||||
return diversity * min(length, 20)
|
||||
|
||||
|
||||
def daydream():
|
||||
model.eval()
|
||||
seed = torch.tensor([random.randint(0, tokenizer.next_id - 1)], device=DEVICE).unsqueeze(0)
|
||||
dream = []
|
||||
|
||||
for _ in range(12): # generate 12-word thought
|
||||
out = model(seed)
|
||||
logits = out[:, -1, :]
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
token = torch.multinomial(probs, num_samples=1)
|
||||
dream.append(token.item())
|
||||
seed = torch.cat([seed, token], dim=1)
|
||||
|
||||
sentence = tokenizer.detokenize(dream)
|
||||
score = score_sentence(sentence)
|
||||
|
||||
if score > 0.3:
|
||||
save_dream(sentence, score)
|
||||
recent_dreams.append((score, sentence))
|
||||
if len(recent_dreams) > 10:
|
||||
recent_dreams.pop(0)
|
||||
|
@ -0,0 +1,19 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
DREAM_LOG_PATH = "data/memory/dreams.json"
|
||||
|
||||
|
||||
def load_dreams():
|
||||
if not os.path.exists(DREAM_LOG_PATH):
|
||||
return []
|
||||
with open(DREAM_LOG_PATH, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def save_dream(sentence: str, score: float):
|
||||
dreams = load_dreams()
|
||||
dreams.append({"sentence": sentence, "score": round(score, 2)})
|
||||
dreams = sorted(dreams, key=lambda x: x["score"], reverse=True)[:100]
|
||||
with open(DREAM_LOG_PATH, "w", encoding="utf-8") as f:
|
||||
json.dump(dreams, f, indent=2)
|
@ -1,8 +1,14 @@
|
||||
import torch
|
||||
from model.brain import model, optimizer, loss_fn, tokenizer, DEVICE
|
||||
import torch.nn as nn
|
||||
import random
|
||||
import time
|
||||
from model.brain import model, tokenizer, DEVICE, optimizer, loss_fn, daydream
|
||||
|
||||
_last_thought = time.time()
|
||||
|
||||
|
||||
def train_on_message(text: str):
|
||||
global _last_thought
|
||||
model.train()
|
||||
tokens = tokenizer.tokenize(text)
|
||||
if len(tokens) < 2:
|
||||
@ -17,3 +23,10 @@ def train_on_message(text: str):
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Idle dreaming every 15 seconds
|
||||
now = time.time()
|
||||
if now - _last_thought > 15:
|
||||
for _ in range(3):
|
||||
daydream()
|
||||
_last_thought = now
|
||||
|
Loading…
x
Reference in New Issue
Block a user