diff --git a/Dockerfile b/Dockerfile index 5f29ff3..d0e3b9f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -31,7 +31,7 @@ FROM gcr.io/distroless/base-debian12:nonroot WORKDIR /app COPY --from=build /out/greencoast-shard /app/greencoast-shard COPY configs/shard.sample.yaml /app/shard.yaml -COPY client /app/client +COPY client/ /opt/greencoast/client/ VOLUME ["/var/lib/greencoast"] EXPOSE 8080 8081 8443 9443 USER nonroot:nonroot diff --git a/client/app.js b/client/app.js index 1d60f68..6b8274e 100644 --- a/client/app.js +++ b/client/app.js @@ -2,18 +2,15 @@ import { encryptString, decryptToString, toBlob } from "./crypto.js"; // ---- Helpers ---- function defaultApiBase() { - // 1) URL query override: …/index.html?api=https://api.domain try { const qs = new URLSearchParams(window.location.search); const qApi = qs.get("api"); if (qApi) return qApi.replace(/\/+$/, ""); } catch {} - // 2) Meta override in index.html: const m = document.querySelector('meta[name="gc-api-base"]'); if (m && m.content) return m.content.replace(/\/+$/, ""); - // 3) Heuristic from frontend origin try { const u = new URL(window.location.href); const proto = u.protocol; @@ -33,6 +30,8 @@ function defaultApiBase() { } } +const LOCAL_TZ = Intl.DateTimeFormat().resolvedOptions().timeZone || "UTC"; + // ---- DOM refs ---- const els = { shardUrl: document.getElementById("shardUrl"), @@ -47,13 +46,12 @@ const els = { publishStatus: document.getElementById("publishStatus"), posts: document.getElementById("posts"), discordStart: document.getElementById("discordStart"), + shareTZ: document.getElementById("shareTZ"), }; // ---- Config + state ---- const LS_KEY = "gc_client_config_v1"; const POSTS_KEY = "gc_posts_index_v1"; - -// IMPORTANT: define before sse() is ever called let sseCtrl = null; // ---- Boot ---- @@ -63,20 +61,18 @@ checkHealth(); syncIndex(); sse(); - -els.saveConn.onclick = async () => { - const c = { url: norm(els.shardUrl.value), bearer: els.bearer.value.trim(), passphrase: els.passphrase.value }; - saveConfig(c); await checkHealth(); await syncIndex(); sse(true); -}; - -els.publish.onclick = publish; -els.discordStart.onclick = discordStart; - +// ---- Storage helpers ---- function loadConfig(){ try { return JSON.parse(localStorage.getItem(LS_KEY)) ?? {}; } catch { return {}; } } function saveConfig(c){ localStorage.setItem(LS_KEY, JSON.stringify(c)); Object.assign(cfg, c); } function getPosts(){ try { return JSON.parse(localStorage.getItem(POSTS_KEY)) ?? []; } catch { return []; } } function setPosts(v){ localStorage.setItem(POSTS_KEY, JSON.stringify(v)); renderPosts(); } function norm(u){ return (u||"").replace(/\/+$/,""); } +function fmtWhen(ts, tz) { + try { + return new Intl.DateTimeFormat(undefined, { dateStyle:"medium", timeStyle:"short", timeZone: tz }).format(new Date(ts)); + } catch { return ts; } +} + function applyConfig() { if (!cfg.url) { const detected = defaultApiBase(); @@ -88,11 +84,23 @@ function applyConfig() { els.passphrase.value = cfg.passphrase ?? ""; } +els.saveConn.onclick = async () => { + const c = { url: norm(els.shardUrl.value), bearer: els.bearer.value.trim(), passphrase: els.passphrase.value }; + saveConfig(c); await checkHealth(); await syncIndex(); sse(true); +}; + +els.publish.onclick = publish; +els.discordStart.onclick = discordStart; async function checkHealth() { - if (!cfg.url) return; els.health.textContent = "Checking…"; - try { const r = await fetch(cfg.url + "/healthz"); els.health.textContent = r.ok ? "Connected ✔" : `Error: ${r.status}`; } - catch { els.health.textContent = "Not reachable"; } + if (!cfg.url) { els.health.textContent = "No API base set"; return; } + els.health.textContent = "Checking…"; + try { + const r = await fetch(cfg.url + "/healthz", { mode:"cors" }); + els.health.textContent = r.ok ? "Connected ✔" : `Error: ${r.status}`; + } catch (e) { + els.health.textContent = "Not reachable"; + } } async function publish() { @@ -104,15 +112,19 @@ async function publish() { if (!cfg.passphrase) return msg("Set a passphrase for private posts.", true); const payload = await encryptString(JSON.stringify({ title, body }), cfg.passphrase); blob = toBlob(payload); enc=true; - } else { blob = toBlob(JSON.stringify({ title, body })); } + } else { + blob = toBlob(JSON.stringify({ title, body })); + } const headers = { "Content-Type":"application/octet-stream" }; if (cfg.bearer) headers["Authorization"] = "Bearer " + cfg.bearer; if (enc) headers["X-GC-Private"] = "1"; + if (els.shareTZ && els.shareTZ.checked && LOCAL_TZ) headers["X-GC-TZ"] = LOCAL_TZ; // NEW + const r = await fetch(cfg.url + "/v1/object", { method:"PUT", headers, body: blob }); if (!r.ok) throw new Error(await r.text()); const j = await r.json(); const posts = getPosts(); - posts.unshift({ hash:j.hash, title: title || "(untitled)", bytes:j.bytes, ts:j.stored_at, enc }); + posts.unshift({ hash:j.hash, title: title || "(untitled)", bytes:j.bytes, ts:j.stored_at, enc, creator_tz: j.creator_tz || "" }); setPosts(posts); els.body.value = ""; msg(`Published ${enc?"private":"public"} post. Hash: ${j.hash}`); } catch(e){ msg("Publish failed: " + (e?.message||e), true); } @@ -127,13 +139,13 @@ async function syncIndex() { const r = await fetch(cfg.url + "/v1/index", { headers }); if (!r.ok) throw new Error("index fetch failed"); const entries = await r.json(); - setPosts(entries.map(e => ({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private }))); + setPosts(entries.map(e => ({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private, creator_tz: e.creator_tz || "" }))); } catch(e){ console.warn("index sync failed", e); } } -function sse(){ +function sse(forceRestart=false){ if (!cfg.url) return; - if (sseCtrl) { sseCtrl.abort(); sseCtrl = undefined; } + if (sseCtrl) { sseCtrl.abort(); sseCtrl = null; } sseCtrl = new AbortController(); const url = cfg.url + "/v1/index/stream"; const headers = {}; if (cfg.bearer) headers["Authorization"] = "Bearer " + cfg.bearer; @@ -154,7 +166,7 @@ function sse(){ const e = ev.data; const posts = getPosts(); if (!posts.find(p => p.hash === e.hash)) { - posts.unshift({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private }); + posts.unshift({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private, creator_tz: e.creator_tz || "" }); setPosts(posts); } } else if (ev.event === "delete") { @@ -204,18 +216,15 @@ async function delServer(p) { } async function discordStart() { - // Last-resort auto-fill if user didn’t hit Save if (!cfg.url) { const derived = defaultApiBase(); if (derived) { - cfg.url = derived; - try { localStorage.setItem(LS_KEY, JSON.stringify(cfg)); } catch {} + cfg.url = derived; try { localStorage.setItem(LS_KEY, JSON.stringify(cfg)); } catch {} els.shardUrl.value = derived; } } if (!cfg.url) { alert("Set shard URL first."); return; } - - const r = await fetch(cfg.url + "/v1/auth/discord/start", { headers: { "X-GC-3P-Assent": "1" }}); + const r = await fetch(cfg.url + "/v1/auth/discord/start", { headers: { "X-GC-3P-Assent":"1" }}); if (!r.ok) { alert("Discord SSO not available"); return; } const j = await r.json(); location.href = j.url; @@ -224,10 +233,17 @@ async function discordStart() { function renderPosts() { const posts = getPosts(); els.posts.innerHTML = ""; for (const p of posts) { + const localStr = fmtWhen(p.ts, LOCAL_TZ) + ` (${LOCAL_TZ})`; + let creatorStr = ""; + if (p.creator_tz && p.creator_tz !== LOCAL_TZ) { + creatorStr = ` · creator: ${fmtWhen(p.ts, p.creator_tz)} (${p.creator_tz})`; + } const div = document.createElement("div"); div.className = "post"; const badge = p.enc ? `private` : `public`; div.innerHTML = ` -
${p.hash.slice(0,10)}… · ${p.bytes} bytes · ${p.ts} ${badge}
+
+ ${p.hash.slice(0,10)}… · ${p.bytes} bytes · ${localStr}${creatorStr} ${badge} +
diff --git a/client/index.html b/client/index.html index e35f7d1..e34ec37 100644 --- a/client/index.html +++ b/client/index.html @@ -4,9 +4,8 @@ GreenCoast — Client - - - + + @@ -15,13 +14,9 @@

Connect

-
- - -
- +
@@ -61,6 +56,9 @@
+
+ +
@@ -72,16 +70,5 @@
- diff --git a/cmd/shard/main.go b/cmd/shard/main.go index a2f1124..e6295b1 100644 --- a/cmd/shard/main.go +++ b/cmd/shard/main.go @@ -1,89 +1,154 @@ package main import ( - "flag" "log" - "path/filepath" + "net/http" + "os" + "strconv" + "time" "greencoast/internal/api" - "greencoast/internal/config" - "greencoast/internal/federation" "greencoast/internal/index" "greencoast/internal/storage" ) -func main() { - cfgPath := flag.String("config", "shard.yaml", "path to config") - flag.Parse() - - cfg, err := config.Load(*cfgPath) - if err != nil { - log.Fatalf("config error: %v", err) +func getenvBool(key string, def bool) bool { + v := os.Getenv(key) + if v == "" { + return def } - - store, err := storage.NewFSStore(cfg.Storage.Path, cfg.Storage.MaxObjectKB) + b, err := strconv.ParseBool(v) if err != nil { - log.Fatalf("storage error: %v", err) + return def } + return b +} - dataRoot := filepath.Dir(cfg.Storage.Path) - idx := index.New(dataRoot) +func staticHeaders(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Same security posture as API + w.Header().Set("Referrer-Policy", "no-referrer") + w.Header().Set("Cross-Origin-Opener-Policy", "same-origin") + w.Header().Set("Cross-Origin-Resource-Policy", "same-site") + w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=(), interest-cohort=(), browsing-topics=()") + w.Header().Set("X-Frame-Options", "DENY") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Header().Set("Strict-Transport-Security", "max-age=15552000; includeSubDomains; preload") - srv := api.New( - store, idx, - cfg.Privacy.RetainTimestamps == "coarse", - cfg.Security.ZeroTrust, - api.AuthProviders{ - SigningSecretHex: cfg.Auth.SigningSecret, - Discord: api.DiscordProvider{ - Enabled: cfg.Auth.SSO.Discord.Enabled, - ClientID: cfg.Auth.SSO.Discord.ClientID, - ClientSecret: cfg.Auth.SSO.Discord.ClientSecret, - RedirectURI: cfg.Auth.SSO.Discord.RedirectURI, - }, - GoogleEnabled: cfg.Auth.SSO.Google.Enabled, - FacebookEnabled: cfg.Auth.SSO.Facebook.Enabled, - WebAuthnEnabled: cfg.Auth.TwoFactor.WebAuthnEnabled, - TOTPEnabled: cfg.Auth.TwoFactor.TOTPEnabled, - }, - ) - - // Optional: also mount static under API mux (subpath) if you later want that. - // srv.MountStatic(cfg.UI.Path, "/app") - - // Start federation mTLS (if enabled) - if cfg.Federation.MTLSEnable { - tlsCfg, err := federation.ServerTLSConfig( - cfg.Federation.CertFile, - cfg.Federation.KeyFile, - cfg.Federation.ClientCAFile, - ) - if err != nil { - log.Fatalf("federation tls config error: %v", err) + // Basic CORS for client assets + w.Header().Set("Access-Control-Allow-Origin", "*") + if r.Method == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type") + w.WriteHeader(http.StatusNoContent) + return } - go func() { - if err := srv.ListenMTLS(cfg.Federation.Listen, tlsCfg); err != nil { - log.Fatalf("federation mTLS listener error: %v", err) - } - }() + next.ServeHTTP(w, r) + }) +} + +func main() { + // ---- Config via env ---- + httpAddr := os.Getenv("GC_HTTP_ADDR") + if httpAddr == "" { + httpAddr = ":9080" // API } - // Start FRONTEND listener (separate port) if enabled - if cfg.UI.Enable && cfg.UI.FrontendHTTP != "" { - go func() { - if err := srv.ListenFrontendHTTP(cfg.UI.FrontendHTTP, cfg.UI.Path, cfg.UI.BaseURL); err != nil { - log.Fatalf("frontend listener error: %v", err) - } - }() + // Optional TLS for API + httpsAddr := os.Getenv("GC_HTTPS_ADDR") // leave empty for HTTP + certFile := os.Getenv("GC_TLS_CERT") + keyFile := os.Getenv("GC_TLS_KEY") + + dataDir := os.Getenv("GC_DATA_DIR") + if dataDir == "" { + dataDir = "/var/lib/greencoast" } - // Choose ONE foreground listener for API: HTTPS if enabled, else HTTP. - if cfg.TLS.Enable && cfg.Listen.HTTPS != "" { - log.Fatal(srv.ListenHTTPS(cfg.Listen.HTTPS, cfg.TLS.CertFile, cfg.TLS.KeyFile)) + // Static dir + port (frontend) + staticDir := os.Getenv("GC_STATIC_DIR") + if staticDir == "" { + staticDir = "/opt/greencoast/client" + } + staticAddr := os.Getenv("GC_STATIC_ADDR") + if staticAddr == "" { + staticAddr = ":9082" + } + + coarseTS := getenvBool("GC_COARSE_TS", false) + zeroTrust := getenvBool("GC_ZERO_TRUST", true) + signingSecretHex := os.Getenv("GC_SIGNING_SECRET_HEX") + + // Discord SSO + discID := os.Getenv("GC_DISCORD_CLIENT_ID") + discSecret := os.Getenv("GC_DISCORD_CLIENT_SECRET") + discRedirect := os.Getenv("GC_DISCORD_REDIRECT_URI") + + // ---- Storage ---- + store, err := storage.NewFS(dataDir) + if err != nil { + log.Fatalf("storage init: %v", err) + } + + // ---- Index ---- + ix := index.New() + + // Optional: auto-reindex from disk on boot + if w, ok := any(store).(interface { + Walk(func(hash string, size int64, mod time.Time) error) error + }); ok { + if err := w.Walk(func(hash string, size int64, mod time.Time) error { + return ix.Put(index.Entry{ + Hash: hash, + Bytes: size, + StoredAt: mod.UTC().Format(time.RFC3339Nano), + Private: false, + }) + }); err != nil { + log.Printf("reindex on boot: %v", err) + } + } + + // ---- Auth/Providers ---- + ap := api.AuthProviders{ + SigningSecretHex: signingSecretHex, + Discord: api.DiscordProvider{ + Enabled: discID != "" && discSecret != "" && discRedirect != "", + ClientID: discID, + ClientSecret: discSecret, + RedirectURI: discRedirect, + }, + } + + // ---- API server (9080/HTTPS optional) ---- + srv := api.New(store, ix, coarseTS, zeroTrust, ap) + + // Serve the static client in a goroutine on 9082 + go func() { + if st, err := os.Stat(staticDir); err != nil || !st.IsDir() { + log.Printf("WARN: GC_STATIC_DIR %q not found or not a dir; client may 404", staticDir) + } + mux := http.NewServeMux() + mux.Handle("/", http.FileServer(http.Dir(staticDir))) + log.Printf("static listening on %s (dir=%s)", staticAddr, staticDir) + if err := http.ListenAndServe(staticAddr, staticHeaders(mux)); err != nil { + log.Fatalf("static server: %v", err) + } + }() + + // Prefer HTTPS if configured + if httpsAddr != "" && certFile != "" && keyFile != "" { + log.Printf("starting HTTPS API on %s", httpsAddr) + if err := srv.ListenHTTPS(httpsAddr, certFile, keyFile); err != nil { + log.Fatal(err) + } return } - if cfg.Listen.HTTP == "" { - log.Fatal("no API listeners configured (set listen.http or listen.https)") + + // Otherwise HTTP + log.Printf("starting HTTP API on %s", httpAddr) + if err := srv.ListenHTTP(httpAddr); err != nil { + log.Fatal(err) } - log.Fatal(srv.ListenHTTP(cfg.Listen.HTTP)) + + _ = time.Second } diff --git a/configs/shard.test.yaml b/configs/shard.test.yaml index 038b7c8..cdf0487 100644 --- a/configs/shard.test.yaml +++ b/configs/shard.test.yaml @@ -45,8 +45,8 @@ auth: sso: discord: enabled: true - client_id: "REPLACE" - client_secret: "REPLACE" + client_id: "1408292766319906946" + client_secret: "zJ6GnUUykHbMFbWsPPneNxNK-PtOXYg1" # must exactly match your Discord app's allowed redirect redirect_uri: "https://greencoast.fullmooncyberworks.com/auth-callback.html" google: diff --git a/docker-compose.test.yml b/docker-compose.test.yml index b9086e0..d63edd4 100644 --- a/docker-compose.test.yml +++ b/docker-compose.test.yml @@ -11,7 +11,7 @@ services: - "9080:9080" # API - "9082:9082" # Frontend environment: - - GC_DEV_ALLOW_UNAUTH=false + - GC_DEV_ALLOW_UNAUTH=true volumes: - ./testdata:/var/lib/greencoast - ./configs/shard.test.yaml:/app/shard.yaml:ro diff --git a/internal/api/http.go b/internal/api/http.go index 1c13536..16f6806 100644 --- a/internal/api/http.go +++ b/internal/api/http.go @@ -1,31 +1,44 @@ package api import ( + "bytes" + "context" "crypto/hmac" - "crypto/rand" "crypto/sha256" - "crypto/tls" + "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "log" - "net" + "mime" "net/http" "net/url" "os" - "sort" - "strconv" + "path" "strings" "sync" "time" "greencoast/internal/index" - "greencoast/internal/storage" ) -// ----------- Auth Providers & config (SSO / 2FA stubs) ------------ +// BlobStore is the minimal storage interface the API needs. +type BlobStore interface { + Put(hash string, r io.Reader) error + Get(hash string) (io.ReadCloser, int64, error) + Delete(hash string) error +} + +// optional capability for stores that can enumerate blobs +type blobWalker interface { + Walk(func(hash string, size int64, mod time.Time) error) error +} + +// ----------------------------- +// Public wiring +// ----------------------------- type DiscordProvider struct { Enabled bool @@ -35,526 +48,675 @@ type DiscordProvider struct { } type AuthProviders struct { - SigningSecretHex string + SigningSecretHex string // HMAC secret in hex Discord DiscordProvider - GoogleEnabled bool // placeholder - FacebookEnabled bool // placeholder - WebAuthnEnabled bool // placeholder - TOTPEnabled bool // placeholder + + GoogleEnabled bool + FacebookEnabled bool + + WebAuthnEnabled bool + TOTPEnabled bool } -// ----------- SSE hub (live index) ------------ - -type sseEvent struct { - Event string `json:"event"` // "put" | "delete" - Data interface{} `json:"data"` -} - -type hub struct { - mu sync.Mutex - subs map[chan []byte]struct{} -} - -func newHub() *hub { return &hub{subs: make(map[chan []byte]struct{})} } - -func (h *hub) subscribe() (ch chan []byte, cancel func()) { - ch = make(chan []byte, 16) - h.mu.Lock() - h.subs[ch] = struct{}{} - h.mu.Unlock() - cancel = func() { - h.mu.Lock() - if _, ok := h.subs[ch]; ok { - delete(h.subs, ch) - close(ch) - } - h.mu.Unlock() - } - return ch, cancel -} -func (h *hub) broadcast(ev sseEvent) { - b, _ := json.Marshal(ev) - line := append([]byte("data: "), b...) - line = append(line, '\n', '\n') - h.mu.Lock() - for ch := range h.subs { - select { - case ch <- line: - default: - } - } - h.mu.Unlock() -} - -// ----------- Server ------------ - type Server struct { - mux *http.ServeMux - store *storage.FSStore - idx *index.Index + mux *http.ServeMux + + store BlobStore + idx *index.Index + coarseTS bool zeroTrust bool - signingSecret []byte - discord DiscordProvider + allowClientSignedTokens bool // accept self-signed tokens (no DB) + signingKey []byte - devAllow bool - devToken string + // dev flags (from env) + allowUnauth bool + devBearer string - live *hub + // SSE fanout (in-process) + sseMu sync.Mutex + sseSubs map[chan []byte]struct{} + sseClosed bool + + // SSO ephemeral state + stateMu sync.Mutex + states map[string]time.Time } -func New(store *storage.FSStore, idx *index.Index, coarseTimestamps bool, zeroTrust bool, auth AuthProviders) *Server { - devAllow := strings.ToLower(os.Getenv("GC_DEV_ALLOW_UNAUTH")) == "true" - devToken := os.Getenv("GC_DEV_BEARER") - if devToken == "" { - devToken = "dev-local-token" - } - - sec := make([]byte, 0) - if auth.SigningSecretHex != "" { - if b, err := hex.DecodeString(auth.SigningSecretHex); err == nil { - sec = b - } - } - +// New constructs the API server and registers routes. +func New(store BlobStore, idx *index.Index, coarseTS bool, zeroTrust bool, providers AuthProviders) *Server { + key, _ := hex.DecodeString(strings.TrimSpace(providers.SigningSecretHex)) s := &Server{ - mux: http.NewServeMux(), - store: store, - idx: idx, - coarseTS: coarseTimestamps, - zeroTrust: zeroTrust, - signingSecret: sec, - discord: auth.Discord, - devAllow: devAllow, - devToken: devToken, - live: newHub(), + mux: http.NewServeMux(), + store: store, + idx: idx, + coarseTS: coarseTS, + zeroTrust: zeroTrust, + allowClientSignedTokens: true, + signingKey: key, + allowUnauth: os.Getenv("GC_DEV_ALLOW_UNAUTH") == "true", + devBearer: os.Getenv("GC_DEV_BEARER"), + sseSubs: make(map[chan []byte]struct{}), + states: make(map[string]time.Time), } - s.routes() + + // MIME safety (minimal base images can be sparse) + _ = mime.AddExtensionType(".js", "application/javascript; charset=utf-8") + _ = mime.AddExtensionType(".css", "text/css; charset=utf-8") + _ = mime.AddExtensionType(".html", "text/html; charset=utf-8") + _ = mime.AddExtensionType(".map", "application/json; charset=utf-8") + + // Core + s.mux.HandleFunc("/healthz", s.handleHealthz) + + // Objects + s.mux.Handle("/v1/object", s.withCORS(http.HandlerFunc(s.handlePutObject))) + s.mux.Handle("/v1/object/", s.withCORS(http.HandlerFunc(s.handleObjectByHash))) + + // Index + SSE + s.mux.Handle("/v1/index", s.withCORS(http.HandlerFunc(s.handleIndex))) + s.mux.Handle("/v1/index/stream", s.withCORS(http.HandlerFunc(s.handleIndexSSE))) + + // GDPR+policy endpoint (minimal; no PII) + s.mux.Handle("/v1/gdpr/policy", s.withCORS(http.HandlerFunc(s.handleGDPRPolicy))) + + // Admin: reindex from disk if store supports Walk + s.mux.Handle("/v1/admin/reindex", s.withCORS(http.HandlerFunc(s.handleAdminReindex))) + + // Discord SSO + s.mux.Handle("/v1/auth/discord/start", s.withCORS(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s.handleDiscordStart(w, r, providers.Discord) + }))) + s.mux.Handle("/v1/auth/discord/callback", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s.handleDiscordCallback(w, r, providers.Discord) + })) + return s } -// ---------- helpers (privacy headers, CORS, util) ---------- - -func (s *Server) secureHeaders(w http.ResponseWriter) { - // Anti-fingerprinting posture - w.Header().Set("Referrer-Policy", "no-referrer") - w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=(), interest-cohort=(), browsing-topics=()") - w.Header().Set("X-Content-Type-Options", "nosniff") - w.Header().Set("X-Frame-Options", "DENY") - w.Header().Set("Cross-Origin-Opener-Policy", "same-origin") - w.Header().Set("Cross-Origin-Resource-Policy", "same-site") - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-GC-Private, X-GC-3P-Assent") - w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, DELETE, OPTIONS") - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Strict-Transport-Security", "max-age=15552000; includeSubDomains; preload") -} - -func (s *Server) with(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request)) { - s.secureHeaders(w) - if r.Method == http.MethodOptions { - w.WriteHeader(http.StatusNoContent) - return - } - handler(w, r) -} - -func urlq(v string) string { return url.QueryEscape(v) } - -// Generic helper must be package-level (methods cannot have type parameters). -func ternary[T any](cond bool, a, b T) T { - if cond { - return a - } - return b -} - -func randHex(n int) string { - b := make([]byte, n) - if _, err := rand.Read(b); err != nil { - ts := time.Now().UnixNano() - for i := 0; i < n; i++ { - b[i] = byte(ts >> (8 * (i % 8))) - } - } - return hex.EncodeToString(b) -} - -// ---------- auth middleware ---------- - -func (s *Server) auth(next http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - s.secureHeaders(w) - if !s.zeroTrust { - next.ServeHTTP(w, r) - return - } - authz := r.Header.Get("Authorization") - - // Dev bypass if explicitly enabled - if s.devAllow { - if authz == "" || authz == "Bearer "+s.devToken { - next.ServeHTTP(w, r) - return - } - } - if !strings.HasPrefix(authz, "Bearer ") { - http.Error(w, "unauthorized", http.StatusUnauthorized) - return - } - if len(s.signingSecret) == 0 { - next.ServeHTTP(w, r) - return - } - token := strings.TrimPrefix(authz, "Bearer ") - if ok := s.verifyShardToken(token); !ok { - http.Error(w, "unauthorized", http.StatusUnauthorized) - return - } - next.ServeHTTP(w, r) - } -} - -// ---------- shard token (HMAC, short-lived) ---------- - -// Format: gc|prov|sub|expEpoch|hex(hmacSHA256(secret, prov+'|'+sub+'|'+exp)) -func (s *Server) signShardToken(provider, subject string, exp time.Time) (string, error) { - if len(s.signingSecret) == 0 { - return "", errors.New("signing disabled (missing auth.signing_secret)") - } - msg := provider + "|" + subject + "|" + fmt.Sprint(exp.Unix()) - mac := hmac.New(sha256.New, s.signingSecret) - _, _ = mac.Write([]byte(msg)) - sig := hex.EncodeToString(mac.Sum(nil)) - return "gc|" + msg + "|" + sig, nil -} - -func (s *Server) verifyShardToken(tok string) bool { - parts := strings.Split(tok, "|") - if len(parts) != 5 || parts[0] != "gc" { - return false - } - prov, sub, expStr, sig := parts[1], parts[2], parts[3], parts[4] - _ = prov - _ = sub - msg := prov + "|" + sub + "|" + expStr - mac := hmac.New(sha256.New, s.signingSecret) - _, _ = mac.Write([]byte(msg)) - want := hex.EncodeToString(mac.Sum(nil)) - if !hmac.Equal([]byte(want), []byte(sig)) { - return false - } - expUnix, err := strconv.ParseInt(expStr, 10, 64) - if err != nil { - return false - } - return time.Now().UTC().Unix() < expUnix -} - -// ---------- routes ---------- - -func (s *Server) routes() { - s.mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("ok")) - }) - }) - - // PUT object (opaque). Client may flag privacy in index via X-GC-Private: 1. - s.mux.HandleFunc("/v1/object", s.auth(func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPut { - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - return - } - isPrivate := strings.TrimSpace(r.Header.Get("X-GC-Private")) == "1" - hash, n, err := s.store.Put(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - ts := s.nowCoarse() - _ = s.idx.AppendPut(index.Entry{ - Hash: hash, Bytes: n, StoredAt: s.parseRFC3339(ts), Private: isPrivate, - }) - s.live.broadcast(sseEvent{Event: "put", Data: map[string]any{ - "hash": hash, "bytes": n, "stored_at": ts, "private": isPrivate, - }}) - w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, `{"ok":true,"hash":"%s","bytes":%d,"stored_at":"%s"}`, hash, n, ts) - }) - })) - - // GET/DELETE object by hash - s.mux.HandleFunc("/v1/object/", s.auth(func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case http.MethodGet: - hash := strings.TrimPrefix(r.URL.Path, "/v1/object/") - p, err := s.store.Get(hash) - if err != nil { - http.NotFound(w, r) - return - } - f, err := os.Open(p) - if err != nil { - http.Error(w, "open error", http.StatusInternalServerError) - return - } - defer f.Close() - w.Header().Set("Content-Type", "application/octet-stream") - _, _ = io.Copy(w, f) - case http.MethodDelete: - hash := strings.TrimPrefix(r.URL.Path, "/v1/object/") - if err := s.store.Delete(hash); err != nil { - http.NotFound(w, r) - return - } - _ = s.idx.AppendDelete(hash) - s.live.broadcast(sseEvent{Event: "delete", Data: map[string]any{"hash": hash}}) - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(`{"ok":true,"deleted":true}`)) - default: - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - } - }) - })) - - // Index snapshot - s.mux.HandleFunc("/v1/index", s.auth(func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - return - } - entries, err := s.idx.Snapshot() - if err != nil { - http.Error(w, err.Error(), 500) - return - } - sort.Slice(entries, func(i, j int) bool { return entries[i].StoredAt.After(entries[j].StoredAt) }) - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(entries) - }) - })) - - // Index live (SSE) - s.mux.HandleFunc("/v1/index/stream", s.auth(func(w http.ResponseWriter, r *http.Request) { - s.secureHeaders(w) - flusher, ok := w.(http.Flusher) - if !ok { - http.Error(w, "stream unsupported", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "text/event-stream") - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Connection", "keep-alive") - - ch, cancel := s.live.subscribe() - defer cancel() - - _, _ = w.Write([]byte(": ok\n\n")) - flusher.Flush() - ticker := time.NewTicker(25 * time.Second) - defer ticker.Stop() - notify := r.Context().Done() - for { - select { - case <-notify: - return - case <-ticker.C: - _, _ = w.Write([]byte(": ping\n\n")) - flusher.Flush() - case msg, ok := <-ch: - if !ok { - return - } - _, _ = w.Write(msg) - flusher.Flush() - } - } - })) - - // GDPR policy + Third-party disclaimer - s.mux.HandleFunc("/v1/gdpr/policy", func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(map[string]any{ - "collect_ip": false, - "collect_useragent": false, - "timestamp_policy": ternary(s.coarseTS, "coarse-hour", "exact"), - "stores_pii": false, - "erasure": "DELETE /v1/object/{hash}", - "portability": "GET /v1/object/{hash}", - "third_party_auth": "Using external SSO providers is optional. We cannot vouch for their security; proceed only if you trust the provider.", - }) - }) - }) - - // ---------- Discord SSO (first provider) ---------- - - // Start: returns authorization URL. Requires explicit assent. - s.mux.HandleFunc("/v1/auth/discord/start", func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - if !s.discord.Enabled { - http.Error(w, "discord SSO disabled", http.StatusNotImplemented) - return - } - if !assented(r) { - http.Error(w, "third-party assent required (set header X-GC-3P-Assent: 1)", http.StatusPreconditionFailed) - return - } - state := randHex(24) - url := "https://discord.com/api/oauth2/authorize" + - "?response_type=code" + - "&client_id=" + urlq(s.discord.ClientID) + - "&scope=" + urlq("identify") + - "&redirect_uri=" + urlq(s.discord.RedirectURI) + - "&prompt=consent" + - "&state=" + urlq(state) - _ = json.NewEncoder(w).Encode(map[string]any{"url": url, "note": "We cannot vouch for external IdP security."}) - }) - }) - - // Callback: exchanges code for Discord access_token, fetches @me to get subject id - // then issues a short-lived shard token (HMAC). No data persisted. - s.mux.HandleFunc("/v1/auth/discord/callback", func(w http.ResponseWriter, r *http.Request) { - s.with(w, r, func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - if !s.discord.Enabled { - http.Error(w, "discord SSO disabled", http.StatusNotImplemented) - return - } - if !assented(r) { - http.Error(w, "third-party assent required (set header X-GC-3P-Assent: 1)", http.StatusPreconditionFailed) - return - } - code := r.URL.Query().Get("code") - if code == "" { - http.Error(w, "missing code", 400) - return - } - - form := "client_id=" + urlq(s.discord.ClientID) + - "&client_secret=" + urlq(s.discord.ClientSecret) + - "&grant_type=authorization_code" + - "&code=" + urlq(code) + - "&redirect_uri=" + urlq(s.discord.RedirectURI) - req, _ := http.NewRequest(http.MethodPost, "https://discord.com/api/oauth2/token", strings.NewReader(form)) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - resp, err := http.DefaultClient.Do(req) - if err != nil { - http.Error(w, "token exchange failed", 502) - return - } - defer resp.Body.Close() - var tok struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - } - if err := json.NewDecoder(resp.Body).Decode(&tok); err != nil || tok.AccessToken == "" { - http.Error(w, "invalid token response", 502) - return - } - - uReq, _ := http.NewRequest(http.MethodGet, "https://discord.com/api/users/@me", nil) - uReq.Header.Set("Authorization", tok.TokenType+" "+tok.AccessToken) - uResp, err := http.DefaultClient.Do(uReq) - if err != nil { - http.Error(w, "userinfo failed", 502) - return - } - defer uResp.Body.Close() - var me struct { - ID string `json:"id"` - } - if err := json.NewDecoder(uResp.Body).Decode(&me); err != nil || me.ID == "" { - http.Error(w, "userinfo parse failed", 502) - return - } - - exp := time.Now().UTC().Add(30 * time.Minute) - gcTok, err := s.signShardToken("discord", me.ID, exp) - if err != nil { - http.Error(w, err.Error(), 500) - return - } - - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "token": gcTok, - "expires_at": exp.Format(time.RFC3339), - "disclaimer": "This token is issued after authenticating with a third-party provider (Discord). We cannot vouch for third-party security.", - }) - }) - }) -} - -// ---------- misc helpers ---------- - -func assented(r *http.Request) bool { - if r.Header.Get("X-GC-3P-Assent") == "1" { - return true - } - if r.URL.Query().Get("assent") == "1" { - return true - } - return false -} - -func (s *Server) nowCoarse() string { - ts := time.Now().UTC() - if s.coarseTS { - ts = ts.Truncate(time.Hour) - } - return ts.Format(time.RFC3339) -} - -func (s *Server) parseRFC3339(v string) time.Time { - t, _ := time.Parse(time.RFC3339, v) - return t -} - -// ----- listeners ----- - +// ListenHTTP serves the API on addr. func (s *Server) ListenHTTP(addr string) error { log.Printf("http listening on %s", addr) server := &http.Server{ Addr: addr, - Handler: s.mux, + Handler: s.withCORS(s.mux), ReadHeaderTimeout: 5 * time.Second, } - ln, err := net.Listen("tcp", addr) - if err != nil { - return err - } - return server.Serve(ln) + return server.ListenAndServe() } +// ListenHTTPS serves TLS directly. func (s *Server) ListenHTTPS(addr, certFile, keyFile string) error { log.Printf("https listening on %s", addr) server := &http.Server{ Addr: addr, - Handler: s.mux, + Handler: s.withCORS(s.mux), ReadHeaderTimeout: 5 * time.Second, } return server.ListenAndServeTLS(certFile, keyFile) } -func (s *Server) ListenMTLS(addr string, tlsCfg *tls.Config) error { - log.Printf("federation mTLS listening on %s", addr) - server := &http.Server{ - Addr: addr, - Handler: s.mux, - ReadHeaderTimeout: 5 * time.Second, - TLSConfig: tlsCfg, +// ----------------------------- +// Middleware / headers +// ----------------------------- + +func (s *Server) secureHeaders(w http.ResponseWriter) { + // Privacy / security posture + w.Header().Set("Referrer-Policy", "no-referrer") + w.Header().Set("Cross-Origin-Opener-Policy", "same-origin") + w.Header().Set("Cross-Origin-Resource-Policy", "same-site") + w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=(), interest-cohort=(), browsing-topics=()") + w.Header().Set("X-Frame-Options", "DENY") + w.Header().Set("X-Content-Type-Options", "nosniff") + // HSTS (harmless over HTTP; browsers only enforce under HTTPS) + w.Header().Set("Strict-Transport-Security", "max-age=15552000; includeSubDomains; preload") +} + +func (s *Server) withCORS(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s.secureHeaders(w) + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-GC-Private, X-GC-3P-Assent, X-GC-TZ") + + if r.Method == http.MethodOptions { + w.WriteHeader(http.StatusNoContent) + return + } + next.ServeHTTP(w, r) + }) +} + +// ----------------------------- +// Health & policy +// ----------------------------- + +func (s *Server) handleHealthz(w http.ResponseWriter, r *http.Request) { + s.secureHeaders(w) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + io.WriteString(w, "ok") +} + +func (s *Server) handleGDPRPolicy(w http.ResponseWriter, r *http.Request) { + s.secureHeaders(w) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + type policy struct { + StoresPII bool `json:"stores_pii"` + CollectIP bool `json:"collect_ip"` + CollectUA bool `json:"collect_user_agent"` + Timestamps string `json:"timestamps"` + ZeroTrust bool `json:"zero_trust"` } - ln, err := tls.Listen("tcp", addr, tlsCfg) + resp := policy{ + StoresPII: false, + CollectIP: false, + CollectUA: false, + Timestamps: map[bool]string{true: "coarse_utc", false: "utc"}[s.coarseTS], + ZeroTrust: s.zeroTrust, + } + _ = json.NewEncoder(w).Encode(resp) +} + +// ----------------------------- +// Auth helpers +// ----------------------------- + +func (s *Server) requireAuth(w http.ResponseWriter, r *http.Request) bool { + // Developer bypass + if s.allowUnauth { + return true + } + // Optional dev bearer + if s.devBearer != "" { + h := r.Header.Get("Authorization") + if h == "Bearer "+s.devBearer { + return true + } + } + + // Accept self-signed HMAC tokens if configured + if s.allowClientSignedTokens && len(s.signingKey) > 0 { + h := r.Header.Get("Authorization") + if strings.HasPrefix(h, "Bearer ") { + tok := strings.TrimSpace(strings.TrimPrefix(h, "Bearer ")) + if s.verifyToken(tok) == nil { + return true + } + } + } + + http.Error(w, "unauthorized", http.StatusUnauthorized) + return false +} + +func (s *Server) makeToken(subject string, ttl time.Duration) (string, error) { + if len(s.signingKey) == 0 { + return "", errors.New("signing key not set") + } + type claims struct { + Sub string `json:"sub"` + Exp int64 `json:"exp"` + Iss string `json:"iss"` + } + c := claims{ + Sub: subject, + Exp: time.Now().Add(ttl).Unix(), + Iss: "greencoast", + } + body, _ := json.Marshal(c) + mac := hmac.New(sha256.New, s.signingKey) + mac.Write(body) + sig := mac.Sum(nil) + return "gc1." + base64.RawURLEncoding.EncodeToString(body) + "." + base64.RawURLEncoding.EncodeToString(sig), nil +} + +func (s *Server) verifyToken(tok string) error { + if !strings.HasPrefix(tok, "gc1.") { + return errors.New("bad prefix") + } + parts := strings.Split(tok, ".") + if len(parts) != 3 { + return errors.New("bad parts") + } + body, err := base64.RawURLEncoding.DecodeString(parts[1]) if err != nil { return err } - return server.Serve(ln) + want, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + mac := hmac.New(sha256.New, s.signingKey) + mac.Write(body) + if !hmac.Equal(want, mac.Sum(nil)) { + return errors.New("bad sig") + } + var c struct { + Sub string `json:"sub"` + Exp int64 `json:"exp"` + } + if err := json.Unmarshal(body, &c); err != nil { + return err + } + if time.Now().Unix() > c.Exp { + return errors.New("expired") + } + return nil +} + +// ----------------------------- +// Objects & Index +// ----------------------------- + +func (s *Server) handlePutObject(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPut { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if !s.requireAuth(w, r) { + return + } + + isPrivate := r.Header.Get("X-GC-Private") == "1" + creatorTZ := strings.TrimSpace(r.Header.Get("X-GC-TZ")) + if creatorTZ != "" && !isReasonableTZ(creatorTZ) { + creatorTZ = "" + } + + // Write to store; compute hash while streaming + var buf bytes.Buffer + n, err := io.Copy(&buf, r.Body) + if err != nil { + http.Error(w, "read error", 500) + return + } + sum := sha256.Sum256(buf.Bytes()) + hash := hex.EncodeToString(sum[:]) + + // Persist + if err := s.store.Put(hash, bytes.NewReader(buf.Bytes())); err != nil { + http.Error(w, "store error", 500) + return + } + + // Index + when := time.Now().UTC() + if s.coarseTS { + when = when.Truncate(time.Minute) + } + entry := index.Entry{ + Hash: hash, + Bytes: n, + StoredAt: when.Format(time.RFC3339Nano), + Private: isPrivate, + CreatorTZ: creatorTZ, + } + if err := s.idx.Put(entry); err != nil { + http.Error(w, "index error", 500) + return + } + s.sseBroadcast(map[string]interface{}{"event": "put", "data": entry}) + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + _ = json.NewEncoder(w).Encode(entry) +} + +func (s *Server) handleObjectByHash(w http.ResponseWriter, r *http.Request) { + // path: /v1/object/{hash} + parts := strings.Split(strings.TrimPrefix(r.URL.Path, "/v1/object/"), "/") + if len(parts) == 0 || parts[0] == "" { + http.NotFound(w, r) + return + } + hash := parts[0] + + switch r.Method { + case http.MethodGet: + if !s.requireAuth(w, r) { + return + } + rc, n, err := s.store.Get(hash) + if err != nil { + http.Error(w, "not found", http.StatusNotFound) + return + } + defer rc.Close() + w.Header().Set("Content-Type", "application/octet-stream") + if n > 0 { + w.Header().Set("Content-Length", fmt.Sprintf("%d", n)) + } + _, _ = io.Copy(w, rc) + + case http.MethodDelete: + if !s.requireAuth(w, r) { + return + } + if err := s.store.Delete(hash); err != nil { + http.Error(w, "delete error", 500) + return + } + // prune index if present + _ = s.idx.Delete(hash) + s.sseBroadcast(map[string]interface{}{"event": "delete", "data": map[string]string{"hash": hash}}) + w.WriteHeader(http.StatusNoContent) + + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if !s.requireAuth(w, r) { + return + } + items, err := s.idx.List() + if err != nil { + http.Error(w, "index error", 500) + return + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + _ = json.NewEncoder(w).Encode(items) +} + +// Simple in-process SSE fanout. +func (s *Server) handleIndexSSE(w http.ResponseWriter, r *http.Request) { + if !s.requireAuth(w, r) { + return + } + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "stream unsupported", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/event-stream; charset=utf-8") + w.Header().Set("Cache-Control", "no-store") + w.Header().Set("Connection", "keep-alive") + + ch := make(chan []byte, 8) + + // subscribe + s.sseMu.Lock() + if s.sseClosed { + s.sseMu.Unlock() + http.Error(w, "closed", http.StatusGone) + return + } + s.sseSubs[ch] = struct{}{} + s.sseMu.Unlock() + + // Send a hello/heartbeat + fmt.Fprintf(w, "data: %s\n\n", `{"event":"hello","data":"ok"}`) + flusher.Flush() + + // pump + ctx := r.Context() + t := time.NewTicker(25 * time.Second) + defer t.Stop() + + defer func() { + s.sseMu.Lock() + delete(s.sseSubs, ch) + s.sseMu.Unlock() + close(ch) + }() + + for { + select { + case <-ctx.Done(): + return + case b := <-ch: + w.Write(b) + w.Write([]byte("\n\n")) + flusher.Flush() + case <-t.C: + w.Write([]byte("data: {}\n\n")) + flusher.Flush() + } + } +} + +func (s *Server) sseBroadcast(v interface{}) { + b, _ := json.Marshal(v) + s.sseMu.Lock() + for ch := range s.sseSubs { + select { + case ch <- append([]byte("data: "), b...): + default: + } + } + s.sseMu.Unlock() +} + +// ----------------------------- +// Admin: reindex from disk +// ----------------------------- + +func (s *Server) handleAdminReindex(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if !s.requireAuth(w, r) { + return + } + walker, ok := s.store.(blobWalker) + if !ok { + http.Error(w, "store does not support walk", http.StatusNotImplemented) + return + } + + count := 0 + err := walker.Walk(func(hash string, size int64, mod time.Time) error { + count++ + return s.idx.Put(index.Entry{ + Hash: hash, + Bytes: size, + StoredAt: mod.UTC().Format(time.RFC3339Nano), + Private: false, + }) + }) + if err != nil { + http.Error(w, "walk error: "+err.Error(), 500) + return + } + items, _ := s.idx.List() + w.Header().Set("Content-Type", "application/json; charset=utf-8") + _ = json.NewEncoder(w).Encode(map[string]any{ + "walked": count, + "indexed": len(items), + }) +} + +// ----------------------------- +// Discord SSO (server-side code flow) +// ----------------------------- + +func (s *Server) handleDiscordStart(w http.ResponseWriter, r *http.Request, cfg DiscordProvider) { + if !cfg.Enabled || cfg.ClientID == "" || cfg.ClientSecret == "" || cfg.RedirectURI == "" { + http.Error(w, "discord sso disabled", http.StatusBadRequest) + return + } + // Require explicit 3P assent (UI shows disclaimer) + if r.Header.Get("X-GC-3P-Assent") != "1" { + http.Error(w, "third-party provider not assented", http.StatusForbidden) + return + } + + state := s.newState(5 * time.Minute) + v := url.Values{} + v.Set("response_type", "code") + v.Set("client_id", cfg.ClientID) + v.Set("redirect_uri", cfg.RedirectURI) + v.Set("scope", "identify") + v.Set("prompt", "consent") + v.Set("state", state) + authURL := (&url.URL{ + Scheme: "https", + Host: "discord.com", + Path: "/api/oauth2/authorize", + RawQuery: v.Encode(), + }).String() + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + _ = json.NewEncoder(w).Encode(map[string]string{"url": authURL}) +} + +func (s *Server) handleDiscordCallback(w http.ResponseWriter, r *http.Request, cfg DiscordProvider) { + if !cfg.Enabled { + http.Error(w, "disabled", http.StatusBadRequest) + return + } + + q := r.URL.Query() + code := q.Get("code") + state := q.Get("state") + if code == "" || state == "" || !s.consumeState(state) { + http.Error(w, "invalid state/code", http.StatusBadRequest) + return + } + + // Exchange code for token + form := url.Values{} + form.Set("client_id", cfg.ClientID) + form.Set("client_secret", cfg.ClientSecret) + form.Set("grant_type", "authorization_code") + form.Set("code", code) + form.Set("redirect_uri", cfg.RedirectURI) + + req, _ := http.NewRequestWithContext(r.Context(), http.MethodPost, "https://discord.com/api/oauth2/token", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + res, err := http.DefaultClient.Do(req) + if err != nil { + http.Error(w, "token exchange failed", 502) + return + } + defer res.Body.Close() + if res.StatusCode/100 != 2 { + b, _ := io.ReadAll(res.Body) + http.Error(w, "discord token error: "+string(b), 502) + return + } + var tok struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + Scope string `json:"scope"` + ExpiresIn int64 `json:"expires_in"` + } + if err := json.NewDecoder(res.Body).Decode(&tok); err != nil { + http.Error(w, "token decode failed", 502) + return + } + + // Fetch user id (identify scope) + ureq, _ := http.NewRequestWithContext(r.Context(), http.MethodGet, "https://discord.com/api/users/@me", nil) + ureq.Header.Set("Authorization", tok.TokenType+" "+tok.AccessToken) + ures, err := http.DefaultClient.Do(ureq) + if err != nil { + http.Error(w, "user fetch failed", 502) + return + } + defer ures.Body.Close() + if ures.StatusCode/100 != 2 { + b, _ := io.ReadAll(ures.Body) + http.Error(w, "discord user error: "+string(b), 502) + return + } + var user struct { + ID string `json:"id"` + Username string `json:"username"` + } + if err := json.NewDecoder(ures.Body).Decode(&user); err != nil { + http.Error(w, "user decode failed", 502) + return + } + + // Mint self-signed bearer with Discord snowflake as subject + bearer, err := s.makeToken("discord:"+user.ID, time.Hour*8) + if err != nil { + http.Error(w, "signing error", 500) + return + } + + // Redirect to frontend callback with bearer in fragment (not query) + target := cfg.RedirectURI + u, _ := url.Parse(target) + u.Fragment = "bearer=" + url.QueryEscape(bearer) + "&next=/" + http.Redirect(w, r, u.String(), http.StatusFound) +} + +// simple in-memory state store +func (s *Server) newState(ttl time.Duration) string { + s.stateMu.Lock() + defer s.stateMu.Unlock() + b := make([]byte, 12) + now := time.Now().UnixNano() + copy(b, []byte(fmt.Sprintf("%x", now))) + val := base64.RawURLEncoding.EncodeToString(b) + s.states[val] = time.Now().Add(ttl) + return val +} + +func (s *Server) consumeState(v string) bool { + s.stateMu.Lock() + defer s.stateMu.Unlock() + exp, ok := s.states[v] + if !ok { + return false + } + delete(s.states, v) + return time.Now().Before(exp) +} + +// ----------------------------- +// Utilities +// ----------------------------- + +func isReasonableTZ(tz string) bool { + if !strings.Contains(tz, "/") || len(tz) > 64 { + return false + } + for _, r := range tz { + if !(r == '/' || r == '_' || r == '-' || (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z')) { + return false + } + } + return true +} + +// ----------------------------- +// Optional: graceful shutdown +// ----------------------------- + +func (s *Server) Shutdown(ctx context.Context) error { + s.sseMu.Lock() + s.sseClosed = true + for ch := range s.sseSubs { + close(ch) + } + s.sseSubs = make(map[chan []byte]struct{}) + s.sseMu.Unlock() + return nil +} + +// ----------------------------- +// Helpers for static serving (optional use) +// ----------------------------- + +func fileExists(p string) bool { + st, err := os.Stat(p) + return err == nil && !st.IsDir() +} + +func joinClean(dir, p string) (string, bool) { + fp := path.Clean("/" + p) + full := path.Clean(dir + fp) + if !strings.HasPrefix(full, path.Clean(dir)) { + return "", false + } + return full, true } diff --git a/internal/index/index.go b/internal/index/index.go index f087722..f745380 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -1,123 +1,108 @@ package index import ( - "bufio" - "encoding/json" - "os" - "path/filepath" "sort" "sync" "time" ) -type opType string - -const ( - OpPut opType = "put" - OpDel opType = "del" -) - -type record struct { - Op opType `json:"op"` - Hash string `json:"hash"` - Bytes int64 `json:"bytes,omitempty"` - StoredAt time.Time `json:"stored_at,omitempty"` - Private bool `json:"private,omitempty"` -} - +// Entry is the API/JSON shape the server returns. +// StoredAt is RFC3339/RFC3339Nano in UTC. type Entry struct { - Hash string `json:"hash"` - Bytes int64 `json:"bytes"` - StoredAt time.Time `json:"stored_at"` - Private bool `json:"private"` + Hash string `json:"hash"` + Bytes int64 `json:"bytes"` + StoredAt string `json:"stored_at"` // RFC3339( Nano ) string + Private bool `json:"private"` + CreatorTZ string `json:"creator_tz,omitempty"` // IANA TZ like "America/New_York" } +// internal record with real time.Time for sorting/comparison. +type rec struct { + Hash string + Bytes int64 + StoredAt time.Time + Private bool + CreatorTZ string +} + +// Index is an in-memory index keyed by hash. type Index struct { - path string - mu sync.Mutex + mu sync.RWMutex + hash map[string]rec } -func New(baseDir string) *Index { - return &Index{path: filepath.Join(baseDir, "index.jsonl")} +// New creates an empty Index. +func New() *Index { + return &Index{ + hash: make(map[string]rec), + } } -func (i *Index) AppendPut(e Entry) error { - i.mu.Lock() - defer i.mu.Unlock() - return appendRec(i.path, record{ - Op: OpPut, - Hash: e.Hash, - Bytes: e.Bytes, - StoredAt: e.StoredAt, - Private: e.Private, +// Put inserts or replaces an entry. +// e.StoredAt may be RFC3339( Nano ); if empty/invalid we use time.Now().UTC(). +func (ix *Index) Put(e Entry) error { + ix.mu.Lock() + defer ix.mu.Unlock() + + t := parseWhen(e.StoredAt) + if t.IsZero() { + t = time.Now().UTC() + } + + ix.hash[e.Hash] = rec{ + Hash: e.Hash, + Bytes: e.Bytes, + StoredAt: t, + Private: e.Private, + CreatorTZ: e.CreatorTZ, + } + return nil +} + +// Delete removes an entry by hash (no error if absent). +func (ix *Index) Delete(hash string) error { + ix.mu.Lock() + defer ix.mu.Unlock() + delete(ix.hash, hash) + return nil +} + +// List returns entries sorted by StoredAt descending. +func (ix *Index) List() ([]Entry, error) { + ix.mu.RLock() + defer ix.mu.RUnlock() + + tmp := make([]rec, 0, len(ix.hash)) + for _, r := range ix.hash { + tmp = append(tmp, r) + } + sort.Slice(tmp, func(i, j int) bool { + return tmp[i].StoredAt.After(tmp[j].StoredAt) }) -} -func (i *Index) AppendDelete(hash string) error { - i.mu.Lock() - defer i.mu.Unlock() - return appendRec(i.path, record{Op: OpDel, Hash: hash}) -} - -func appendRec(path string, r record) error { - if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) - if err != nil { - return err - } - defer f.Close() - enc := json.NewEncoder(f) - return enc.Encode(r) -} - -func (i *Index) Snapshot() ([]Entry, error) { - i.mu.Lock() - defer i.mu.Unlock() - - f, err := os.Open(i.path) - if os.IsNotExist(err) { - return nil, nil - } - if err != nil { - return nil, err - } - defer f.Close() - - sc := bufio.NewScanner(f) - sc.Buffer(make([]byte, 0, 64*1024), 4*1024*1024) - - type state struct { - Entry Entry - Deleted bool - } - m := make(map[string]state) - for sc.Scan() { - var rec record - if err := json.Unmarshal(sc.Bytes(), &rec); err != nil { - continue - } - switch rec.Op { - case OpPut: - m[rec.Hash] = state{Entry: Entry{ - Hash: rec.Hash, Bytes: rec.Bytes, StoredAt: rec.StoredAt, Private: rec.Private, - }} - case OpDel: - s := m[rec.Hash] - s.Deleted = true - m[rec.Hash] = s + out := make([]Entry, len(tmp)) + for i, r := range tmp { + out[i] = Entry{ + Hash: r.Hash, + Bytes: r.Bytes, + StoredAt: r.StoredAt.UTC().Format(time.RFC3339Nano), + Private: r.Private, + CreatorTZ: r.CreatorTZ, } } - if err := sc.Err(); err != nil { - return nil, err - } - var out []Entry - for _, s := range m { - if !s.Deleted && s.Entry.Hash != "" { - out = append(out, s.Entry) - } - } - sort.Slice(out, func(i, j int) bool { return out[i].StoredAt.After(out[j].StoredAt) }) return out, nil } + +// parseWhen tries RFC3339Nano then RFC3339; returns zero time on failure. +func parseWhen(s string) time.Time { + if s == "" { + return time.Time{} + } + if t, err := time.Parse(time.RFC3339Nano, s); err == nil { + return t + } + if t, err := time.Parse(time.RFC3339, s); err == nil { + return t + } + return time.Time{} +} diff --git a/internal/storage/fs.go b/internal/storage/fs.go new file mode 100644 index 0000000..064f3d7 --- /dev/null +++ b/internal/storage/fs.go @@ -0,0 +1,314 @@ +package storage + +import ( + "errors" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "time" +) + +// FSStore stores blobs on the local filesystem under root/objects/... +// It supports both a flat layout (objects/) and a nested layout +// (objects// or objects//). +type FSStore struct { + root string + objects string +} + +// NewFS returns a file-backed blob store rooted at dir. +func NewFS(dir string) (*FSStore, error) { + if dir == "" { + return nil, errors.New("empty storage dir") + } + o := filepath.Join(dir, "objects") + if err := os.MkdirAll(o, 0o755); err != nil { + return nil, err + } + return &FSStore{root: dir, objects: o}, nil +} + +// pathFlat returns the flat path objects/. +func (s *FSStore) pathFlat(hash string) (string, error) { + if hash == "" { + return "", errors.New("empty hash") + } + return filepath.Join(s.objects, hash), nil +} + +// isHexHash does a quick check for lowercase hex of length 64. +func isHexHash(name string) bool { + if len(name) != 64 { + return false + } + for i := 0; i < 64; i++ { + c := name[i] + if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) { + return false + } + } + return true +} + +// findBlobPath tries common layouts before falling back to a recursive search. +// +// Supported fast paths (in order): +// 1. objects/ (flat file) +// 2. objects//blob|data|content (common names) +// 3. objects// (folder-per-post; pick that file) +// 4. objects// (two-level prefix sharding) +// +// If still not found, it walks recursively under objects/ to locate either: +// - a file named exactly , or +// - any file under a directory named (choose the most recently modified). +func (s *FSStore) findBlobPath(hash string) (string, error) { + if hash == "" { + return "", errors.New("empty hash") + } + + // 1) flat file + if p, _ := s.pathFlat(hash); fileExists(p) { + return p, nil + } + + // 2) objects//{blob,data,content} + dir := filepath.Join(s.objects, hash) + for _, cand := range []string{"blob", "data", "content"} { + p := filepath.Join(dir, cand) + if fileExists(p) { + return p, nil + } + } + + // 3) objects// + if st, err := os.Stat(dir); err == nil && st.IsDir() { + ents, err := os.ReadDir(dir) + if err == nil { + var picked string + var pickedMod time.Time + for _, de := range ents { + if de.IsDir() { + continue + } + p := filepath.Join(dir, de.Name()) + fi, err := os.Stat(p) + if err != nil || !fi.Mode().IsRegular() { + continue + } + // Pick newest file if multiple. + if picked == "" || fi.ModTime().After(pickedMod) { + picked = p + pickedMod = fi.ModTime() + } + } + if picked != "" { + return picked, nil + } + } + } + + // 4) two-level prefix: objects/aa/ + if len(hash) >= 2 { + p := filepath.Join(s.objects, hash[:2], hash) + if fileExists(p) { + return p, nil + } + } + + // Fallback: recursive search + var best string + var bestMod time.Time + + err := filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error { + if err != nil { + // ignore per-entry errors + return nil + } + if d.IsDir() { + return nil + } + base := filepath.Base(p) + // Exact filename == hash + if base == hash { + best = p + // exact match is good enough; stop here + return fs.SkipDir + } + // If parent dir name is hash, consider it + parent := filepath.Base(filepath.Dir(p)) + if parent == hash { + if fi, err := os.Stat(p); err == nil && fi.Mode().IsRegular() { + if best == "" || fi.ModTime().After(bestMod) { + best = p + bestMod = fi.ModTime() + } + } + } + return nil + }) + if err == nil && best != "" { + return best, nil + } + + return "", os.ErrNotExist +} + +// fileExists true if path exists and is a regular file. +func fileExists(p string) bool { + fi, err := os.Stat(p) + return err == nil && fi.Mode().IsRegular() +} + +// Put writes/overwrites the blob at the content hash into the flat path. +// (Nested layouts remain supported for reads/reindex, but new writes are flat.) +func (s *FSStore) Put(hash string, r io.Reader) error { + p, err := s.pathFlat(hash) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil { + return err + } + tmp := p + ".tmp" + f, err := os.Create(tmp) + if err != nil { + return err + } + _, werr := io.Copy(f, r) + cerr := f.Close() + if werr != nil { + _ = os.Remove(tmp) + return werr + } + if cerr != nil { + _ = os.Remove(tmp) + return cerr + } + return os.Rename(tmp, p) +} + +// Get opens the blob for reading and returns its size if known. +func (s *FSStore) Get(hash string) (io.ReadCloser, int64, error) { + p, err := s.findBlobPath(hash) + if err != nil { + return nil, 0, err + } + f, err := os.Open(p) + if err != nil { + return nil, 0, err + } + st, err := f.Stat() + if err != nil { + return f, 0, nil + } + return f, st.Size(), nil +} + +// Delete removes the blob. It is not an error if it doesn't exist. +// It tries the flat path, common nested paths, then falls back to remove +// any file found via findBlobPath. +func (s *FSStore) Delete(hash string) error { + // Try flat + if p, _ := s.pathFlat(hash); fileExists(p) { + if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + } + // Try common nested + dir := filepath.Join(s.objects, hash) + for _, cand := range []string{"blob", "data", "content"} { + p := filepath.Join(dir, cand) + if fileExists(p) { + if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + } + } + if len(hash) >= 2 { + p := filepath.Join(s.objects, hash[:2], hash) + if fileExists(p) { + if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + } + } + // Fallback: whatever findBlobPath locates + if p, err := s.findBlobPath(hash); err == nil { + if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + } + // If we couldn't find anything, treat as success (idempotent delete) + return nil +} + +// Walk calls fn(hash, size, modTime) for each blob file found. +// It recognizes blobs when either: +// - the file name is a 64-char hex hash, or +// - the parent directory name is that hash (folder-per-post). +// +// If multiple files map to the same hash (e.g., dir contains many files), +// the newest file's size/modTime is reported. +func (s *FSStore) Walk(fn func(hash string, size int64, mod time.Time) error) error { + type rec struct { + size int64 + mod time.Time + } + + agg := make(map[string]rec) + + err := filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error { + if err != nil { + return nil // skip unreadable entries + } + if d.IsDir() { + return nil + } + // Only consider regular files + fi, err := os.Stat(p) + if err != nil || !fi.Mode().IsRegular() { + return nil + } + base := filepath.Base(p) + + // Case 1: filename equals hash + if isHexHash(base) { + if r, ok := agg[base]; !ok || fi.ModTime().After(r.mod) { + agg[base] = rec{size: fi.Size(), mod: fi.ModTime()} + } + return nil + } + + // Case 2: parent dir is the hash + parent := filepath.Base(filepath.Dir(p)) + if isHexHash(parent) { + if r, ok := agg[parent]; !ok || fi.ModTime().After(r.mod) { + agg[parent] = rec{size: fi.Size(), mod: fi.ModTime()} + } + return nil + } + + // Case 3: two-level prefix layout e.g. objects/aa/ + // If parent is a 2-char dir and grandparent is objects/, base might be hash. + if len(base) == 64 && isHexHash(strings.ToLower(base)) { + // already handled as Case 1, but keep as safety if different casing sneaks in + if r, ok := agg[base]; !ok || fi.ModTime().After(r.mod) { + agg[base] = rec{size: fi.Size(), mod: fi.ModTime()} + } + return nil + } + return nil + }) + if err != nil { + return err + } + + for h, r := range agg { + if err := fn(h, r.size, r.mod); err != nil { + return err + } + } + return nil +} diff --git a/internal/storage/fsstore.go b/internal/storage/fsstore.go deleted file mode 100644 index c376086..0000000 --- a/internal/storage/fsstore.go +++ /dev/null @@ -1,95 +0,0 @@ -package storage - -import ( - "crypto/sha256" - "encoding/hex" - "errors" - "io" - "os" - "path/filepath" -) - -type FSStore struct { - root string - maxObjectB int64 -} - -func NewFSStore(root string, maxKB int) (*FSStore, error) { - if root == "" { - root = "./data/objects" - } - if err := os.MkdirAll(root, 0o755); err != nil { - return nil, err - } - return &FSStore{root: root, maxObjectB: int64(maxKB) * 1024}, nil -} - -func (s *FSStore) Put(r io.Reader) (string, int64, error) { - h := sha256.New() - tmp := filepath.Join(s.root, ".tmp") - _ = os.MkdirAll(tmp, 0o755) - f, err := os.CreateTemp(tmp, "obj-*") - if err != nil { - return "", 0, err - } - defer f.Close() - - var n int64 - buf := make([]byte, 32*1024) - for { - m, er := r.Read(buf) - if m > 0 { - n += int64(m) - if s.maxObjectB > 0 && n > s.maxObjectB { - return "", 0, errors.New("object too large") - } - _, _ = h.Write(buf[:m]) - if _, werr := f.Write(buf[:m]); werr != nil { - return "", 0, werr - } - } - if er == io.EOF { - break - } - if er != nil { - return "", 0, er - } - } - sum := hex.EncodeToString(h.Sum(nil)) - dst := filepath.Join(s.root, sum[:2], sum[2:4], sum) - if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil { - return "", 0, err - } - if err := os.Rename(f.Name(), dst); err != nil { - return "", 0, err - } - return sum, n, nil -} - -func (s *FSStore) pathFor(hash string) string { - return filepath.Join(s.root, hash[:2], hash[2:4], hash) -} - -func (s *FSStore) Get(hash string) (string, error) { - if len(hash) < 4 { - return "", os.ErrNotExist - } - p := s.pathFor(hash) - if _, err := os.Stat(p); err != nil { - return "", err - } - return p, nil -} - -func (s *FSStore) Delete(hash string) error { - if len(hash) < 4 { - return os.ErrNotExist - } - p := s.pathFor(hash) - if err := os.Remove(p); err != nil { - return err - } - _ = os.Remove(filepath.Dir(p)) - _ = os.Remove(filepath.Dir(filepath.Dir(p))) - return nil -} diff --git a/testdata/index.jsonl b/testdata/index.jsonl new file mode 100644 index 0000000..36e83d9 --- /dev/null +++ b/testdata/index.jsonl @@ -0,0 +1,4 @@ +{"op":"put","hash":"a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb","bytes":110,"stored_at":"2025-08-22T03:00:00Z"} +{"op":"put","hash":"9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891","bytes":105,"stored_at":"2025-08-22T03:00:00Z","private":true} +{"op":"put","hash":"6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973","bytes":98,"stored_at":"2025-08-22T03:00:00Z"} +{"op":"put","hash":"f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292","bytes":46,"stored_at":"2025-08-22T03:00:00Z"} diff --git a/testdata/objects/5dcfd2e73a6c3a17b0efb103f1a3b891fc06b0189a0c5d5916435d9e5d74e963 b/testdata/objects/5dcfd2e73a6c3a17b0efb103f1a3b891fc06b0189a0c5d5916435d9e5d74e963 new file mode 100644 index 0000000..84a07ee --- /dev/null +++ b/testdata/objects/5dcfd2e73a6c3a17b0efb103f1a3b891fc06b0189a0c5d5916435d9e5d74e963 @@ -0,0 +1 @@ +{"title":"Timezone Publish","body":"You can now include your timezone on all of your posts. This is completely optional but lets others see when you posted"} \ No newline at end of file diff --git a/testdata/objects/6a/16/6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973 b/testdata/objects/6a/16/6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973 new file mode 100644 index 0000000..72276b2 --- /dev/null +++ b/testdata/objects/6a/16/6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973 @@ -0,0 +1 @@ +{"title":"Yarn is Testing!","body":"Hello, my name is Yarn. And I like to test. Test test 1 2 3."} \ No newline at end of file diff --git a/testdata/objects/96/28/9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891 b/testdata/objects/96/28/9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891 new file mode 100644 index 0000000..5d4d193 --- /dev/null +++ b/testdata/objects/96/28/9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891 @@ -0,0 +1 @@ +d+V+%!ݚO2ޒ$)zFî)4O:z*Ыe*5)#V H!iS$edx]$t6۩H \ No newline at end of file diff --git a/testdata/objects/a0/08/a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb b/testdata/objects/a0/08/a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb new file mode 100644 index 0000000..78ef8b5 --- /dev/null +++ b/testdata/objects/a0/08/a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb @@ -0,0 +1 @@ +{"title":"Public Test","body":"Hello Everyone,\n\nWelcome to GreenCoast, a BlueSky Replacement\n\nMystiatech"} \ No newline at end of file diff --git a/testdata/objects/f4/52/f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292 b/testdata/objects/f4/52/f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292 new file mode 100644 index 0000000..691634c --- /dev/null +++ b/testdata/objects/f4/52/f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292 @@ -0,0 +1 @@ +{"title":"Test post","body":"Does this work?"} \ No newline at end of file