diff --git a/dashboard.py b/dashboard.py index bd81818..4407b75 100644 --- a/dashboard.py +++ b/dashboard.py @@ -12,6 +12,13 @@ def tail(filepath, num_lines=10): return f.readlines()[-num_lines:] +def get_best_dream(): + if not os.path.exists("logs/best_dream.txt"): + return "No high-scoring dream yet." + with open("logs/best_dream.txt", encoding="utf-8") as f: + return f.read().strip() + + @app.route("/") def home(): vocab_size = 0 @@ -22,6 +29,7 @@ def home(): dreams = [line.strip() for line in tail("logs/dreams.log", 10)] messages = [line.strip() for line in tail("logs/messages.log", 10)] errors = [line.strip() for line in tail("logs/error.log", 15)] + best_dream = get_best_dream() return render_template_string(""" <!DOCTYPE html> @@ -60,6 +68,8 @@ def home(): {{ err }} {% endfor %} </pre> + <h3>🏆 Highest Scoring Dream</h3> + <p><b>{{ best_dream }}</b></p> </body> </html> """, dreams=dreams[::-1], messages=messages[::-1], errors=errors[::-1], vocab_size=vocab_size) diff --git a/trainer.py b/trainer.py index d1c5135..ed41198 100644 --- a/trainer.py +++ b/trainer.py @@ -19,6 +19,7 @@ class RubyTrainer: self.criterion = torch.nn.CrossEntropyLoss() self.rebuild_model_if_needed() + self.best_dream = ("", 0.0) def rebuild_model_if_needed(self): vocab_size = len(self.tokenizer.vocab) @@ -156,7 +157,8 @@ class RubyTrainer: score_raw = self.score_sentence(raw) score_re = self.score_sentence(rephrased) - + if score_re >= self.best_dream[1]: + self.best_dream = (rephrased.strip(), score_re) final = rephrased if score_re >= score_raw else raw self.train_on_tokens_from_text(final) @@ -182,6 +184,8 @@ class RubyTrainer: f.write(f"{datetime.utcnow().isoformat()} | Ruby | {t}\n") print(f"[DAYDREAM] Complete. {len(thoughts)} thoughts imagined.") + with open("logs/best_dream.txt", "w", encoding="utf-8") as f: + f.write(f"{self.best_dream[1]:.2f} | {self.best_dream[0]}\n") if say_thought and thoughts: return thoughts[-1]