From a23a2fa7d7017499ca1da5212c429afbe42b0d2a Mon Sep 17 00:00:00 2001 From: Dani Date: Sun, 27 Apr 2025 14:13:49 -0400 Subject: [PATCH] Wiped her memory --- model/brain.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/model/brain.py b/model/brain.py index 74d0c32..e6480aa 100644 --- a/model/brain.py +++ b/model/brain.py @@ -14,7 +14,7 @@ recent_dreams = [] def generate_response(): model.eval() - # Start from an empty tensor: she speaks purely from herself + # Start from an empty input: purely organic thought input_ids = torch.zeros((1, 1), dtype=torch.long, device=DEVICE) output_tokens = [] @@ -25,8 +25,14 @@ def generate_response(): next_token_logits = output[:, -1, :] next_token = torch.argmax(next_token_logits, dim=-1) - # Stop if the model predicts padding or unknown token - if next_token.item() in [tokenizer.token_to_id(""), tokenizer.token_to_id("")]: + # Get token id values for special tokens + pad_token_id = tokenizer.vocab.get("", None) + unk_token_id = tokenizer.vocab.get("", None) + + # Stop if the model predicts or + if pad_token_id is not None and next_token.item() == pad_token_id: + break + if unk_token_id is not None and next_token.item() == unk_token_id: break output_tokens.append(next_token.item())