diff --git a/context/context.py b/context/context.py
index 6506866..3ec9697 100644
--- a/context/context.py
+++ b/context/context.py
@@ -3,19 +3,23 @@ import os
import time
from typing import List
-CONTEXT_FILE = "data/memory/context.json"
+CONTEXT_PATH = "data/memory/context.json"
MAX_MEMORY = 100
-def load_context() -> List[dict]:
- if os.path.exists(CONTEXT_FILE):
- with open(CONTEXT_FILE, "r", encoding="utf-8") as f:
+def load_context():
+ if not os.path.exists(CONTEXT_PATH):
+ return []
+ try:
+ with open(CONTEXT_PATH, "r", encoding="utf-8") as f:
return json.load(f)
- return []
+ except json.JSONDecodeError:
+ print("[Context] Corrupted context.json. Returning empty list.")
+ return []
def save_context(mem: List[dict]):
- with open(CONTEXT_FILE, "w", encoding="utf-8") as f:
+ with open(CONTEXT_PATH, "w", encoding="utf-8") as f:
json.dump(mem[-MAX_MEMORY:], f, indent=2)
diff --git a/dashboard/dashboard.py b/dashboard/dashboard.py
index 2aa367a..640e5ed 100644
--- a/dashboard/dashboard.py
+++ b/dashboard/dashboard.py
@@ -9,13 +9,12 @@ from model.journal import read_journal_entries
from model.dreams import load_dreams
from model.tokenizer import Tokenizer
from model.abstraction import cluster_vocab
-from model.dreams import load_dreams
from model.scheduler import get_time_until_next_action, get_next_action_label
from context.context import load_context
from reader.reader import get_books, load_progress
-app = Flask(__name__)
+app = Flask(__name__, static_folder="static")
tokenizer = Tokenizer()
next_cycle_time = time.time() + 900 # Example: 15 minutes from now
@@ -128,7 +127,7 @@ def brainmap():
print(f"[Dashboard] Failed to load brainmap cache: {e}")
nodes, links = [], []
- return render_template("brainmap.html", nodes=json.dumps(nodes), links=json.dumps(links))
+ return render_template("brainmap.html", nodes=nodes, links=links)
@app.route("/journal")
diff --git a/dashboard/templates/brainmap.html b/dashboard/templates/brainmap.html
index 1dd457b..afe6266 100644
--- a/dashboard/templates/brainmap.html
+++ b/dashboard/templates/brainmap.html
@@ -2,16 +2,14 @@
-
Ruby's Brain Map
+
+
diff --git a/model/brainmap.py b/model/brainmap.py
index 8179ccb..c83a7c8 100644
--- a/model/brainmap.py
+++ b/model/brainmap.py
@@ -1,6 +1,9 @@
import re
import json
import os
+import shutil
+from sklearn.cluster import KMeans
+import numpy as np
from utils.unicleaner import clean_unicode
BRAINMAP_PATH = "data/memory/brainmap.json" # actual connection data
@@ -99,7 +102,12 @@ def get_brainmap():
return brainmap
-def refresh_brainmap_cache(min_weight=5, max_nodes=300):
+def refresh_brainmap_cache(min_weight=2, max_nodes=300):
+ """
+ Generates a clustered brainmap view and writes to:
+ - data/memory/brainmap_cache.json (master copy)
+ - static/brainmap.json (served to frontend)
+ """
map_data = get_brainmap()
links = []
seen_words = set()
@@ -108,7 +116,6 @@ def refresh_brainmap_cache(min_weight=5, max_nodes=300):
if not isinstance(connections, dict):
print(f"[Brainmap] Skipping corrupted entry: {word} => {type(connections)}")
continue
-
for linked_word, weight in connections.items():
if weight >= min_weight:
links.append({
@@ -119,13 +126,44 @@ def refresh_brainmap_cache(min_weight=5, max_nodes=300):
seen_words.add(word)
seen_words.add(linked_word)
- nodes = [{"id": word} for word in seen_words]
-
+ node_set = {link["source"] for link in links} | {link["target"] for link in links}
+ nodes = sorted(node_set)
if len(nodes) > max_nodes:
nodes = nodes[:max_nodes]
- node_set = {n["id"] for n in nodes}
+ node_set = set(nodes)
links = [l for l in links if l["source"] in node_set and l["target"] in node_set]
+ index_lookup = {word: i for i, word in enumerate(nodes)}
+ word_vectors = []
+ for word in nodes:
+ vec = np.zeros(len(nodes), dtype=np.float32)
+ connections = map_data.get(word, {})
+ for other, strength in connections.items():
+ if other in index_lookup:
+ vec[index_lookup[other]] = strength
+ word_vectors.append(vec)
+
+ if len(word_vectors) < 2:
+ print("[Brainmap] Not enough nodes to cluster.")
+ return
+
+ kmeans = KMeans(n_clusters=min(8, len(nodes)), n_init="auto")
+ labels = kmeans.fit_predict(word_vectors)
+ clustered_nodes = [{"id": word, "group": int(label)} for word, label in zip(nodes, labels)]
+
+ output = {
+ "nodes": clustered_nodes,
+ "links": links
+ }
+
os.makedirs("data/memory", exist_ok=True)
- with open(BRAINMAP_CACHE_PATH, "w", encoding="utf-8") as f:
- json.dump({"nodes": nodes, "links": links}, f, indent=2)
+ os.makedirs("static", exist_ok=True)
+
+ cache_path = "data/memory/brainmap_cache.json"
+ static_path = "static/brainmap.json"
+
+ with open(cache_path, "w", encoding="utf-8") as f:
+ json.dump(output, f, indent=2)
+
+ shutil.copyfile(cache_path, static_path)
+ # print(f"[Brainmap] Cache written to {cache_path} and copied to {static_path}")
diff --git a/model/cleanup.py b/model/cleanup.py
index 3384570..167f6b6 100644
--- a/model/cleanup.py
+++ b/model/cleanup.py
@@ -4,7 +4,7 @@ import os
import time
from model.tokenizer import VOCAB_PATH
from model.dreams import DREAM_LOG_PATH
-from context.context import CONTEXT_FILE
+from context.context import CONTEXT_PATH
from model.brainmap import load_brainmap, save_brainmap
CLEANUP_LOG = "data/logs/cleanup.log"
@@ -59,15 +59,15 @@ def cleanup_dreams():
def cleanup_context():
- if not os.path.exists(CONTEXT_FILE):
+ if not os.path.exists(CONTEXT_PATH):
return
- with open(CONTEXT_FILE, "r", encoding="utf-8") as f:
+ with open(CONTEXT_PATH, "r", encoding="utf-8") as f:
context = json.load(f)
filtered = context[-100:]
- with open(CONTEXT_FILE, "w", encoding="utf-8") as f:
+ with open(CONTEXT_PATH, "w", encoding="utf-8") as f:
json.dump(filtered, f, indent=2)
if len(filtered) < len(context):