First good level of progress

This commit is contained in:
2025-06-29 12:36:25 -04:00
commit 159be1eb82
15 changed files with 10628 additions and 0 deletions

208
.gitignore vendored Normal file
View File

@ -0,0 +1,208 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[codz]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py.cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
#poetry.toml
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
#pdm.lock
#pdm.toml
.pdm-python
.pdm-build/
# pixi
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
#pixi.lock
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
# in the .venv directory. It is recommended not to include this directory in version control.
.pixi
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.envrc
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Abstra
# Abstra is an AI-powered process automation framework.
# Ignore directories containing user credentials, local state, and settings.
# Learn more at https://abstra.io/docs
.abstra/
# Visual Studio Code
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
# and can be added to the global gitignore or merged into this file. However, if you prefer,
# you could uncomment the following to ignore the entire vscode folder
# .vscode/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# Cursor
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
# refer to https://docs.cursor.com/context/ignore-files
.cursorignore
.cursorindexingignore
# Marimo
marimo/_static/
marimo/_lsp/
__marimo__/
/data/books

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 [fullname]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

6
catlin_chatlog.txt Normal file
View File

@ -0,0 +1,6 @@
[2025-06-29T12:13:41.661917] You: Hi, what's your name?
[2025-06-29T12:13:41.661917] Catlin: what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what
[2025-06-29T12:16:56.771441] You: Hi, what is your name?
[2025-06-29T12:16:56.771441] Catlin: what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what what

BIN
catlin_model.pt Normal file

Binary file not shown.

BIN
catlin_tokenizer.pkl Normal file

Binary file not shown.

11
config.py Normal file
View File

@ -0,0 +1,11 @@
# config.py
VOCAB_SIZE = 50000
CONTEXT_SIZE = 128
EMBED_DIM = 256
NUM_HEADS = 8
NUM_LAYERS = 6
BATCH_SIZE = 16
LEARNING_RATE = 3e-4
DEVICE = "cuda" # fallback handled in trainer
MAX_TOKENS = 100_000 # Used to cap input corpus size

File diff suppressed because one or more lines are too long

84
main.py Normal file
View File

@ -0,0 +1,84 @@
# main.py
import os
import torch
from config import *
from tokenizers.word_tokenizer import WordTokenizer
from models.gpt import GPT
from training.trainer import TextDataset, train
def load_texts():
text = ""
# --- Books ---
book_dir = os.path.join("data", "books")
os.makedirs(book_dir, exist_ok=True)
print(f"[INFO] Scanning books from: {book_dir}")
for file in os.listdir(book_dir):
path = os.path.join(book_dir, file)
if file.endswith(".txt"):
print(f" 📚 Loading {file}")
try:
with open(path, encoding="utf-8") as f:
text += f.read() + "\n"
except Exception as e:
print(f" ❌ Failed to read {file}: {e}")
# --- OpenWebText ---
owt_path = os.path.join("data/openwebtext", "owt_20000.jsonl")
print(f"[INFO] Scanning OpenWebText: {owt_path}")
if os.path.exists(owt_path):
with open(owt_path, encoding="utf-8") as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print(f"{i} lines read...")
try:
text += line.strip() + "\n"
except Exception as e:
print(f" ❌ Line {i} decode error: {e}")
else:
print(f"[WARN] OpenWebText file not found: {owt_path}")
# --- Chat logs ---
if os.path.exists("catlin_chatlog.txt"):
print(f"[INFO] Appending chat log...")
with open("catlin_chatlog.txt", encoding="utf-8") as f:
text += "\n" + f.read()
print(f"[INFO] Raw text loaded: {len(text)} characters")
return text[:MAX_TOKENS * 10]
def main():
print("[INFO] Starting main()")
raw_text = load_texts()
print(f"[INFO] Loaded text: {len(raw_text)} characters")
tokenizer = WordTokenizer(VOCAB_SIZE)
tokenizer.fit(raw_text)
tokenizer.save("catlin_tokenizer.pkl")
print("[INFO] Tokenizer built and saved")
tokens = tokenizer.encode(raw_text)
print(f"[INFO] Total tokens: {len(tokens)}")
dataset = TextDataset(tokens, CONTEXT_SIZE)
if len(dataset) == 0:
print("❌ ERROR: Dataset is empty. Aborting.")
return
model = GPT(VOCAB_SIZE, CONTEXT_SIZE, EMBED_DIM, NUM_HEADS, NUM_LAYERS)
print("[INFO] Model initialized")
train(model, dataset, DEVICE if torch.cuda.is_available() else "cpu", LEARNING_RATE, BATCH_SIZE, epochs=10)
print("[INFO] Training complete")
torch.save(model.state_dict(), "catlin_model.pt")
print("[INFO] Model saved to catlin_model.pt")
if __name__ == "__main__":
main()

44
models/gpt.py Normal file
View File

@ -0,0 +1,44 @@
# models/gpt.py
import torch
import torch.nn as nn
class GPTBlock(nn.Module):
def __init__(self, embed_dim, num_heads):
super().__init__()
self.attn = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
self.ln1 = nn.LayerNorm(embed_dim)
self.ff = nn.Sequential(
nn.Linear(embed_dim, 4 * embed_dim),
nn.GELU(),
nn.Linear(4 * embed_dim, embed_dim)
)
self.ln2 = nn.LayerNorm(embed_dim)
def forward(self, x):
attn_out, _ = self.attn(x, x, x, need_weights=False)
x = self.ln1(x + attn_out)
x = self.ln2(x + self.ff(x))
return x
class GPT(nn.Module):
def __init__(self, vocab_size, context_size, embed_dim, num_heads, num_layers, dropout=0.1):
super().__init__()
self.token_emb = nn.Embedding(vocab_size, embed_dim)
self.pos_emb = nn.Parameter(torch.zeros(1, context_size, embed_dim))
self.dropout = nn.Dropout(dropout)
self.blocks = nn.ModuleList([GPTBlock(embed_dim, num_heads) for _ in range(num_layers)])
self.ln_f = nn.LayerNorm(embed_dim)
self.head = nn.Linear(embed_dim, vocab_size)
def forward(self, x):
B, T = x.size()
tok_emb = self.token_emb(x)
x = tok_emb + self.pos_emb[:, :T, :]
x = self.dropout(x)
for block in self.blocks:
x = block(x)
x = self.ln_f(x)
return self.head(x)

0
requirements.txt Normal file
View File

95
talk_to_catlin.py Normal file
View File

@ -0,0 +1,95 @@
# talk_to_catlin.py
import os
import torch
from config import *
from tokenizers.word_tokenizer import WordTokenizer
from models.gpt import GPT
from datetime import datetime
CHAT_LOG_PATH = "catlin_chatlog.txt"
def load_model(device):
model = GPT(VOCAB_SIZE, CONTEXT_SIZE, EMBED_DIM, NUM_HEADS, NUM_LAYERS)
model.load_state_dict(torch.load("catlin_model.pt", map_location=device))
model.to(device)
model.eval()
return model
def load_tokenizer(path="catlin_tokenizer.pkl"):
if not os.path.exists(path):
print(f"[ERROR] Tokenizer file '{path}' not found. Please train first.")
exit(1)
return WordTokenizer.load(path)
def top_p_sampling(logits, p=0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_keep = cumulative_probs <= p
sorted_indices_to_keep[1:] = sorted_indices_to_keep[:-1].clone()
sorted_indices_to_keep[0] = True
filtered_logits = sorted_logits[sorted_indices_to_keep]
filtered_indices = sorted_indices[sorted_indices_to_keep]
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
next_token = filtered_indices[torch.multinomial(probs, 1)]
return next_token.item()
def generate_response(model, tokenizer, prompt, device, max_tokens=50, k=10, temperature=1.0):
tokens = tokenizer.encode(prompt)[-CONTEXT_SIZE:]
input_ids = torch.tensor([tokens], dtype=torch.long).to(device)
for _ in range(max_tokens):
with torch.no_grad():
logits = model(input_ids)
next_token_logits = logits[0, -1, :]
next_token_logits = next_token_logits / temperature
next_token = top_p_sampling(next_token_logits, p=0.9)
input_ids = torch.cat([input_ids, torch.tensor([[next_token]], device=device)], dim=1)
if input_ids.shape[1] > CONTEXT_SIZE:
input_ids = input_ids[:, -CONTEXT_SIZE:]
word = tokenizer.id_to_word.get(next_token, "")
if word in {".", "!", "?"}:
break
return tokenizer.decode(input_ids[0].tolist()[len(tokens):])
def log_chat(user_input, catlin_response):
with open(CHAT_LOG_PATH, "a", encoding="utf-8") as f:
f.write(f"[{datetime.now().isoformat()}] You: {user_input}\n")
f.write(f"[{datetime.now().isoformat()}] Catlin: {catlin_response}\n\n")
def main():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = load_model(device)
tokenizer = load_tokenizer()
print("🤖 Catlin is online. Type 'exit' to end the conversation.\n")
history = []
while True:
user_input = input("You: ")
if user_input.strip().lower() == "exit":
break
history.append(f"You: {user_input}")
# Build memory window
memory_text = " ".join(history[-10:]) # Keep last 10 lines
catlin_response = generate_response(model, tokenizer, memory_text, device)
print(f"Catlin: {catlin_response}\n")
history.append(f"Catlin: {catlin_response}")
log_chat(user_input, catlin_response)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,42 @@
# tokenizers/word_tokenizer.py
import re
from collections import Counter
import pickle
class WordTokenizer:
def __init__(self, vocab_size=50000):
self.vocab_size = vocab_size
self.word_to_id = {"<PAD>": 0, "<UNK>": 1}
self.id_to_word = {0: "<PAD>", 1: "<UNK>"}
def fit(self, texts):
words = re.findall(r"\b\w+\b", texts.lower())
freq = Counter(words).most_common(self.vocab_size - 2)
for idx, (word, _) in enumerate(freq, start=2):
self.word_to_id[word] = idx
self.id_to_word[idx] = word
def encode(self, text):
return [self.word_to_id.get(word, 1) for word in re.findall(r"\b\w+\b", text.lower())]
def decode(self, tokens):
return " ".join([self.id_to_word.get(token, "<UNK>") for token in tokens])
def save(self, path):
with open(path, "wb") as f:
pickle.dump({
"vocab_size": self.vocab_size,
"word_to_id": self.word_to_id,
"id_to_word": self.id_to_word
}, f)
@classmethod
def load(cls, path):
with open(path, "rb") as f:
data = pickle.load(f)
obj = cls(data["vocab_size"])
obj.word_to_id = data["word_to_id"]
obj.id_to_word = data["id_to_word"]
return obj

38
tools/book_downloader.py Normal file
View File

@ -0,0 +1,38 @@
# tools/book_downloader.py
import requests
import os
DATA_DIR = os.path.join("data", "books")
GUTENBERG_URL = "https://www.gutenberg.org/files/{id}/{id}-0.txt"
def download_book(gutenberg_id, title_hint="book"):
os.makedirs(DATA_DIR, exist_ok=True)
url = GUTENBERG_URL.format(id=gutenberg_id)
try:
response = requests.get(url, timeout=10)
if response.status_code != 200:
print(f"❌ Failed to download book ID {gutenberg_id}")
return
filename = os.path.join(DATA_DIR, f"{title_hint}_{gutenberg_id}.txt")
with open(filename, "w", encoding="utf-8") as f:
f.write(response.text)
print(f"✅ Saved: {filename}")
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
books = [
(1342, "PrideAndPrejudice"), # Jane Austen
(11, "AliceInWonderland"), # Lewis Carroll
(98, "AesopFables"), # Aesop
(1661, "SherlockHolmes"), # Doyle
(76, "HuckFinn") # Mark Twain
]
for gutenberg_id, name in books:
download_book(gutenberg_id, name)

View File

@ -0,0 +1,21 @@
# tools/openwebtext_fetcher.py
from datasets import load_dataset
import os
from tqdm import tqdm
TARGET_DIR = "data/openwebtext"
os.makedirs(TARGET_DIR, exist_ok=True)
def fetch_subset(n=10000, split="train"):
ds = load_dataset("stas/openwebtext-10k", split=split)
with open(os.path.join(TARGET_DIR, f"owt_{n}.jsonl"), "w", encoding="utf-8") as f:
for i, item in tqdm(enumerate(ds), total=n, desc="Writing JSONL"):
f.write(f"{item['text'].replace(chr(10),' ')}\n")
if i + 1 >= n:
break
if __name__ == "__main__":
fetch_subset(20000) # fetch 20K examples (~100MB)

58
training/trainer.py Normal file
View File

@ -0,0 +1,58 @@
# training/trainer.py
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
class TextDataset(Dataset):
def __init__(self, tokens, context_size):
self.tokens = tokens
self.context_size = context_size
def __len__(self):
return len(self.tokens) - self.context_size
def __getitem__(self, idx):
x = torch.tensor(self.tokens[idx:idx+self.context_size], dtype=torch.long)
y = torch.tensor(self.tokens[idx+1:idx+self.context_size+1], dtype=torch.long)
return x, y
def train(model, dataset, device, lr, batch_size, epochs=1):
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
optimizer = optim.Adam(model.parameters(), lr=lr)
loss_fn = nn.CrossEntropyLoss()
model.to(device)
model.train()
torch.autograd.set_detect_anomaly(True)
for epoch in range(epochs):
total_loss = 0.0
progress = tqdm(loader, desc=f"Epoch {epoch+1}/{epochs}", unit="batch", dynamic_ncols=True)
if len(loader) == 0:
print("❌ No data to train on. Check your token count or dataset.")
return
for x, y in progress:
x, y = x.to(device), y.to(device)
logits = model(x)
loss = loss_fn(logits.view(-1, logits.size(-1)), y.view(-1))
if torch.isnan(loss):
print("❌ Loss is NaN! Aborting training.")
exit(1)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
total_loss += loss.item()
progress.set_postfix(loss=loss.item())
print(f"[Epoch {epoch+1}] Avg Loss: {total_loss / len(loader):.4f}")