Updated the README

Added new security layers
This commit is contained in:
2025-08-22 12:39:51 -04:00
parent fb7428064f
commit 720c7e0b52
7 changed files with 695 additions and 552 deletions

View File

@@ -3,43 +3,41 @@ package api
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"math/big"
"mime"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"greencoast/internal/auth"
"greencoast/internal/index"
)
// BlobStore is the minimal storage interface the API needs.
// BlobStore minimal interface for storage backends.
type BlobStore interface {
Put(hash string, r io.Reader) error
Get(hash string) (io.ReadCloser, int64, error)
Delete(hash string) error
}
// optional capability for stores that can enumerate blobs
type blobWalker interface {
Walk(func(hash string, size int64, mod time.Time) error) error
}
// -----------------------------
// Public wiring
// -----------------------------
type DiscordProvider struct {
Enabled bool
ClientID string
@@ -48,14 +46,8 @@ type DiscordProvider struct {
}
type AuthProviders struct {
SigningSecretHex string // HMAC secret in hex
SigningSecretHex string
Discord DiscordProvider
GoogleEnabled bool
FacebookEnabled bool
WebAuthnEnabled bool
TOTPEnabled bool
}
type Server struct {
@@ -67,41 +59,58 @@ type Server struct {
coarseTS bool
zeroTrust bool
allowClientSignedTokens bool // accept self-signed tokens (no DB)
signingKey []byte
signingKey []byte
// dev flags (from env)
// dev/testing flags
allowUnauth bool
devBearer string
// SSE fanout (in-process)
// require proof-of-possession on every authd call
requirePoP bool
// SSE in-process
sseMu sync.Mutex
sseSubs map[chan []byte]struct{}
sseClosed bool
// SSO ephemeral state
// SSO state + PKCE verifier + device key binding
stateMu sync.Mutex
states map[string]time.Time
states map[string]stateItem
// Nonce challenges for key-based login
nonceMu sync.Mutex
nonceExpiry map[string]time.Time
// PoP replay cache
replayMu sync.Mutex
replays map[string]time.Time
}
type stateItem struct {
Exp time.Time
Verifier string // PKCE code_verifier
DeviceKey string // "p256:<b64raw>" or "ed25519:<b64raw>"
ReturnNext string // optional
}
// New constructs the API server and registers routes.
func New(store BlobStore, idx *index.Index, coarseTS bool, zeroTrust bool, providers AuthProviders) *Server {
key, _ := hex.DecodeString(strings.TrimSpace(providers.SigningSecretHex))
s := &Server{
mux: http.NewServeMux(),
store: store,
idx: idx,
coarseTS: coarseTS,
zeroTrust: zeroTrust,
allowClientSignedTokens: true,
signingKey: key,
allowUnauth: os.Getenv("GC_DEV_ALLOW_UNAUTH") == "true",
devBearer: os.Getenv("GC_DEV_BEARER"),
sseSubs: make(map[chan []byte]struct{}),
states: make(map[string]time.Time),
mux: http.NewServeMux(),
store: store,
idx: idx,
coarseTS: coarseTS,
zeroTrust: zeroTrust,
signingKey: key,
allowUnauth: os.Getenv("GC_DEV_ALLOW_UNAUTH") == "true",
devBearer: os.Getenv("GC_DEV_BEARER"),
requirePoP: strings.ToLower(os.Getenv("GC_REQUIRE_POP")) != "false", // default true
sseSubs: make(map[chan []byte]struct{}),
states: make(map[string]stateItem),
nonceExpiry: make(map[string]time.Time),
replays: make(map[string]time.Time),
}
// MIME safety (minimal base images can be sparse)
_ = mime.AddExtensionType(".js", "application/javascript; charset=utf-8")
_ = mime.AddExtensionType(".css", "text/css; charset=utf-8")
_ = mime.AddExtensionType(".html", "text/html; charset=utf-8")
@@ -110,19 +119,9 @@ func New(store BlobStore, idx *index.Index, coarseTS bool, zeroTrust bool, provi
// Core
s.mux.HandleFunc("/healthz", s.handleHealthz)
// Objects
s.mux.Handle("/v1/object", s.withCORS(http.HandlerFunc(s.handlePutObject)))
s.mux.Handle("/v1/object/", s.withCORS(http.HandlerFunc(s.handleObjectByHash)))
// Index + SSE
s.mux.Handle("/v1/index", s.withCORS(http.HandlerFunc(s.handleIndex)))
s.mux.Handle("/v1/index/stream", s.withCORS(http.HandlerFunc(s.handleIndexSSE)))
// GDPR+policy endpoint (minimal; no PII)
s.mux.Handle("/v1/gdpr/policy", s.withCORS(http.HandlerFunc(s.handleGDPRPolicy)))
// Admin: reindex from disk if store supports Walk
s.mux.Handle("/v1/admin/reindex", s.withCORS(http.HandlerFunc(s.handleAdminReindex)))
// Auth (public-key)
s.mux.Handle("/v1/auth/key/challenge", s.withCORS(http.HandlerFunc(s.handleKeyChallenge)))
s.mux.Handle("/v1/auth/key/verify", s.withCORS(http.HandlerFunc(s.handleKeyVerify)))
// Discord SSO
s.mux.Handle("/v1/auth/discord/start", s.withCORS(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -132,10 +131,22 @@ func New(store BlobStore, idx *index.Index, coarseTS bool, zeroTrust bool, provi
s.handleDiscordCallback(w, r, providers.Discord)
}))
// Objects
s.mux.Handle("/v1/object", s.withCORS(http.HandlerFunc(s.handlePutObject)))
s.mux.Handle("/v1/object/", s.withCORS(http.HandlerFunc(s.handleObjectByHash)))
// Index + SSE
s.mux.Handle("/v1/index", s.withCORS(http.HandlerFunc(s.handleIndex)))
s.mux.Handle("/v1/index/stream", s.withCORS(http.HandlerFunc(s.handleIndexSSE)))
// GDPR/policy
s.mux.Handle("/v1/gdpr/policy", s.withCORS(http.HandlerFunc(s.handleGDPRPolicy)))
// Admin: reindex
s.mux.Handle("/v1/admin/reindex", s.withCORS(http.HandlerFunc(s.handleAdminReindex)))
return s
}
// ListenHTTP serves the API on addr.
func (s *Server) ListenHTTP(addr string) error {
log.Printf("http listening on %s", addr)
server := &http.Server{
@@ -146,7 +157,6 @@ func (s *Server) ListenHTTP(addr string) error {
return server.ListenAndServe()
}
// ListenHTTPS serves TLS directly.
func (s *Server) ListenHTTPS(addr, certFile, keyFile string) error {
log.Printf("https listening on %s", addr)
server := &http.Server{
@@ -157,29 +167,23 @@ func (s *Server) ListenHTTPS(addr, certFile, keyFile string) error {
return server.ListenAndServeTLS(certFile, keyFile)
}
// -----------------------------
// Middleware / headers
// -----------------------------
func (s *Server) secureHeaders(w http.ResponseWriter) {
// Privacy / security posture
w.Header().Set("Referrer-Policy", "no-referrer")
w.Header().Set("Cross-Origin-Opener-Policy", "same-origin")
w.Header().Set("Cross-Origin-Resource-Policy", "same-site")
w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=(), interest-cohort=(), browsing-topics=()")
w.Header().Set("X-Frame-Options", "DENY")
w.Header().Set("X-Content-Type-Options", "nosniff")
// HSTS (harmless over HTTP; browsers only enforce under HTTPS)
w.Header().Set("Strict-Transport-Security", "max-age=15552000; includeSubDomains; preload")
}
func (s *Server) withCORS(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
s.secureHeaders(w)
// Strong CSP for static will be set in static server; API allows connect from client origin
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, DELETE, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-GC-Private, X-GC-3P-Assent, X-GC-TZ")
w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, DELETE, OPTIONS, POST")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-GC-Private, X-GC-3P-Assent, X-GC-TZ, X-GC-Key, X-GC-TS, X-GC-Proof")
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
@@ -188,9 +192,7 @@ func (s *Server) withCORS(next http.Handler) http.Handler {
})
}
// -----------------------------
// Health & policy
// -----------------------------
// ---------- Health & policy ----------
func (s *Server) handleHealthz(w http.ResponseWriter, r *http.Request) {
s.secureHeaders(w)
@@ -202,119 +204,252 @@ func (s *Server) handleGDPRPolicy(w http.ResponseWriter, r *http.Request) {
s.secureHeaders(w)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
type policy struct {
StoresPII bool `json:"stores_pii"`
CollectIP bool `json:"collect_ip"`
CollectUA bool `json:"collect_user_agent"`
Timestamps string `json:"timestamps"`
ZeroTrust bool `json:"zero_trust"`
StoresPII bool `json:"stores_pii"`
CollectIP bool `json:"collect_ip"`
CollectUA bool `json:"collect_user_agent"`
Timestamps string `json:"timestamps"`
ZeroTrust bool `json:"zero_trust"`
Accounts string `json:"accounts"`
ProofOfPoss bool `json:"proof_of_possession"`
}
resp := policy{
StoresPII: false,
CollectIP: false,
CollectUA: false,
Timestamps: map[bool]string{true: "coarse_utc", false: "utc"}[s.coarseTS],
ZeroTrust: s.zeroTrust,
StoresPII: false,
CollectIP: false,
CollectUA: false,
Timestamps: map[bool]string{true: "coarse_utc", false: "utc"}[s.coarseTS],
ZeroTrust: s.zeroTrust,
Accounts: "public-key only",
ProofOfPoss: s.requirePoP,
}
_ = json.NewEncoder(w).Encode(resp)
}
// -----------------------------
// Auth helpers
// -----------------------------
// ---------- Auth helpers ----------
func (s *Server) requireAuth(w http.ResponseWriter, r *http.Request) bool {
// Developer bypass
type authCtx struct {
sub string
cnf string // "p256:<b64raw>" or "ed25519:<b64raw>"
}
func (s *Server) parseAuth(w http.ResponseWriter, r *http.Request) (*authCtx, bool) {
// Dev bypass
if s.allowUnauth {
return &authCtx{sub: "dev"}, true
}
// Dev bearer
if s.devBearer != "" && r.Header.Get("Authorization") == "Bearer "+s.devBearer {
return &authCtx{sub: "dev"}, true
}
h := r.Header.Get("Authorization")
if h == "" {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return nil, false
}
// gc2 HMAC token
if strings.HasPrefix(h, "Bearer gc2.") && len(s.signingKey) != 0 {
claims, err := auth.VerifyGC2(s.signingKey, strings.TrimPrefix(h, "Bearer "), time.Now())
if err != nil {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return nil, false
}
return &authCtx{sub: claims.Sub, cnf: claims.CNF}, true
}
http.Error(w, "unauthorized", http.StatusUnauthorized)
return nil, false
}
func (s *Server) verifyPoP(w http.ResponseWriter, r *http.Request, ac *authCtx, body []byte) bool {
if !s.requirePoP {
return true
}
// Optional dev bearer
if s.devBearer != "" {
h := r.Header.Get("Authorization")
if h == "Bearer "+s.devBearer {
return true
}
pubHdr := r.Header.Get("X-GC-Key")
ts := r.Header.Get("X-GC-TS")
proof := r.Header.Get("X-GC-Proof")
if pubHdr == "" || ts == "" || proof == "" {
http.Error(w, "missing proof", http.StatusUnauthorized)
return false
}
// timestamp window
sec, _ := strconv.ParseInt(ts, 10, 64)
d := time.Since(time.Unix(sec, 0))
if d < -5*time.Minute || d > 5*time.Minute {
http.Error(w, "stale proof", http.StatusUnauthorized)
return false
}
// cnf must match
if ac.cnf == "" || ac.cnf != pubHdr {
http.Error(w, "key mismatch", http.StatusUnauthorized)
return false
}
// build message
sum := sha256.Sum256(body)
msg := strings.ToUpper(r.Method) + "\n" + r.URL.String() + "\n" + ts + "\n" + hex.EncodeToString(sum[:])
// Accept self-signed HMAC tokens if configured
if s.allowClientSignedTokens && len(s.signingKey) > 0 {
h := r.Header.Get("Authorization")
if strings.HasPrefix(h, "Bearer ") {
tok := strings.TrimSpace(strings.TrimPrefix(h, "Bearer "))
if s.verifyToken(tok) == nil {
return true
// verify signature
ok := false
switch {
case strings.HasPrefix(pubHdr, "ed25519:"):
raw, err := base64.RawURLEncoding.DecodeString(strings.TrimPrefix(pubHdr, "ed25519:"))
if err == nil {
sig, err := base64.RawURLEncoding.DecodeString(proof)
if err == nil && len(raw) == ed25519.PublicKeySize {
ok = ed25519.Verify(ed25519.PublicKey(raw), []byte(msg), sig)
}
}
case strings.HasPrefix(pubHdr, "p256:"):
raw, err := base64.RawURLEncoding.DecodeString(strings.TrimPrefix(pubHdr, "p256:"))
if err == nil && len(raw) == 65 && raw[0] == 0x04 {
x := new(big.Int).SetBytes(raw[1:33])
y := new(big.Int).SetBytes(raw[33:65])
pk := ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
der, err := base64.RawURLEncoding.DecodeString(proof)
if err == nil {
ok = ecdsa.VerifyASN1(&pk, []byte(msg), der)
}
}
}
http.Error(w, "unauthorized", http.StatusUnauthorized)
return false
if !ok {
http.Error(w, "bad proof", http.StatusUnauthorized)
return false
}
// replay cache
h := sha256.Sum256([]byte(proof + "|" + ts))
key := base64.RawURLEncoding.EncodeToString(h[:])
s.replayMu.Lock()
defer s.replayMu.Unlock()
if exp, exists := s.replays[key]; exists && time.Now().Before(exp) {
http.Error(w, "replay", http.StatusUnauthorized)
return false
}
s.replays[key] = time.Now().Add(10 * time.Minute)
return true
}
func (s *Server) makeToken(subject string, ttl time.Duration) (string, error) {
if len(s.signingKey) == 0 {
return "", errors.New("signing key not set")
// ---------- Public-key auth: challenge/verify ----------
func (s *Server) handleKeyChallenge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
type claims struct {
Sub string `json:"sub"`
Exp int64 `json:"exp"`
Iss string `json:"iss"`
}
c := claims{
Sub: subject,
Exp: time.Now().Add(ttl).Unix(),
Iss: "greencoast",
}
body, _ := json.Marshal(c)
mac := hmac.New(sha256.New, s.signingKey)
mac.Write(body)
sig := mac.Sum(nil)
return "gc1." + base64.RawURLEncoding.EncodeToString(body) + "." + base64.RawURLEncoding.EncodeToString(sig), nil
nonce := s.randToken(16)
exp := time.Now().Add(10 * time.Minute)
s.nonceMu.Lock()
s.nonceExpiry[nonce] = exp
s.nonceMu.Unlock()
_ = json.NewEncoder(w).Encode(map[string]any{"nonce": nonce, "exp": exp.Unix()})
}
func (s *Server) verifyToken(tok string) error {
if !strings.HasPrefix(tok, "gc1.") {
return errors.New("bad prefix")
type keyVerifyReq struct {
Nonce string `json:"nonce"`
Alg string `json:"alg"` // "p256" or "ed25519"
Pub string `json:"pub"` // base64(raw) for that alg (p256 uncompressed point 65B; ed25519 32B)
Sig string `json:"sig"` // base64(signature over "key-verify\n"+nonce)
}
func (s *Server) handleKeyVerify(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
parts := strings.Split(tok, ".")
if len(parts) != 3 {
return errors.New("bad parts")
var req keyVerifyReq
if err := json.NewDecoder(r.Body).Decode(&req); err != nil || req.Nonce == "" || req.Alg == "" || req.Pub == "" || req.Sig == "" {
http.Error(w, "bad request", http.StatusBadRequest)
return
}
body, err := base64.RawURLEncoding.DecodeString(parts[1])
// check nonce
s.nonceMu.Lock()
exp, ok := s.nonceExpiry[req.Nonce]
if ok {
delete(s.nonceExpiry, req.Nonce)
}
s.nonceMu.Unlock()
if !ok || time.Now().After(exp) {
http.Error(w, "nonce invalid", http.StatusUnauthorized)
return
}
msg := "key-verify\n" + req.Nonce
pubRaw, err := base64.RawURLEncoding.DecodeString(req.Pub)
if err != nil {
return err
http.Error(w, "bad pub", http.StatusBadRequest)
return
}
want, err := base64.RawURLEncoding.DecodeString(parts[2])
sigRaw, err := base64.RawURLEncoding.DecodeString(req.Sig)
if err != nil {
return err
http.Error(w, "bad sig", http.StatusBadRequest)
return
}
mac := hmac.New(sha256.New, s.signingKey)
mac.Write(body)
if !hmac.Equal(want, mac.Sum(nil)) {
return errors.New("bad sig")
var cnf string
switch strings.ToLower(req.Alg) {
case "ed25519":
if len(pubRaw) != ed25519.PublicKeySize || len(sigRaw) != ed25519.SignatureSize {
http.Error(w, "bad key", http.StatusBadRequest)
return
}
if !ed25519.Verify(ed25519.PublicKey(pubRaw), []byte(msg), sigRaw) {
http.Error(w, "verify failed", http.StatusUnauthorized)
return
}
cnf = "ed25519:" + req.Pub
case "p256":
if len(pubRaw) != 65 || pubRaw[0] != 0x04 {
http.Error(w, "bad key", http.StatusBadRequest)
return
}
x := new(big.Int).SetBytes(pubRaw[1:33])
y := new(big.Int).SetBytes(pubRaw[33:65])
pk := ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
// sigRaw assumed DER (WebCrypto)
if !ecdsa.VerifyASN1(&pk, []byte(msg), sigRaw) {
http.Error(w, "verify failed", http.StatusUnauthorized)
return
}
cnf = "p256:" + req.Pub
default:
http.Error(w, "unsupported alg", http.StatusBadRequest)
return
}
var c struct {
Sub string `json:"sub"`
Exp int64 `json:"exp"`
sub := auth.AccountIDFromPub(pubRaw)
ttl := 8 * time.Hour
now := time.Now()
bearer, err := auth.MintGC2(s.signingKey, auth.Claims{
Sub: sub, Exp: now.Add(ttl).Unix(), Nbf: now.Add(-60 * time.Second).Unix(),
Iss: "greencoast", Aud: "api", CNF: cnf,
})
if err != nil {
http.Error(w, "sign error", http.StatusInternalServerError)
return
}
if err := json.Unmarshal(body, &c); err != nil {
return err
}
if time.Now().Unix() > c.Exp {
return errors.New("expired")
}
return nil
_ = json.NewEncoder(w).Encode(map[string]any{
"bearer": bearer,
"sub": sub,
"exp": now.Add(ttl).Unix(),
})
}
// -----------------------------
// Objects & Index
// -----------------------------
// ---------- Objects & Index ----------
func (s *Server) handlePutObject(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPut {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
if !s.requireAuth(w, r) {
// Limit body to 10 MiB by default
const maxBlob = int64(10 << 20)
r.Body = http.MaxBytesReader(w, r.Body, maxBlob)
// Read body first to support PoP over body hash
var buf bytes.Buffer
n, err := io.Copy(&buf, r.Body)
if err != nil {
http.Error(w, "read error", 500)
return
}
ac, ok := s.parseAuth(w, r)
if !ok {
return
}
if !s.verifyPoP(w, r, ac, buf.Bytes()) {
return
}
@@ -324,23 +459,14 @@ func (s *Server) handlePutObject(w http.ResponseWriter, r *http.Request) {
creatorTZ = ""
}
// Write to store; compute hash while streaming
var buf bytes.Buffer
n, err := io.Copy(&buf, r.Body)
if err != nil {
http.Error(w, "read error", 500)
return
}
sum := sha256.Sum256(buf.Bytes())
hash := hex.EncodeToString(sum[:])
// Persist
if err := s.store.Put(hash, bytes.NewReader(buf.Bytes())); err != nil {
http.Error(w, "store error", 500)
return
}
// Index
when := time.Now().UTC()
if s.coarseTS {
when = when.Truncate(time.Minute)
@@ -356,14 +482,13 @@ func (s *Server) handlePutObject(w http.ResponseWriter, r *http.Request) {
http.Error(w, "index error", 500)
return
}
s.sseBroadcast(map[string]interface{}{"event": "put", "data": entry})
s.sseBroadcast(map[string]any{"event": "put", "data": entry})
w.Header().Set("Content-Type", "application/json; charset=utf-8")
_ = json.NewEncoder(w).Encode(entry)
}
func (s *Server) handleObjectByHash(w http.ResponseWriter, r *http.Request) {
// path: /v1/object/{hash}
parts := strings.Split(strings.TrimPrefix(r.URL.Path, "/v1/object/"), "/")
if len(parts) == 0 || parts[0] == "" {
http.NotFound(w, r)
@@ -373,7 +498,11 @@ func (s *Server) handleObjectByHash(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
if !s.requireAuth(w, r) {
ac, ok := s.parseAuth(w, r)
if !ok {
return
}
if !s.verifyPoP(w, r, ac, nil) {
return
}
rc, n, err := s.store.Get(hash)
@@ -389,16 +518,19 @@ func (s *Server) handleObjectByHash(w http.ResponseWriter, r *http.Request) {
_, _ = io.Copy(w, rc)
case http.MethodDelete:
if !s.requireAuth(w, r) {
ac, ok := s.parseAuth(w, r)
if !ok {
return
}
if !s.verifyPoP(w, r, ac, nil) {
return
}
if err := s.store.Delete(hash); err != nil {
http.Error(w, "delete error", 500)
return
}
// prune index if present
_ = s.idx.Delete(hash)
s.sseBroadcast(map[string]interface{}{"event": "delete", "data": map[string]string{"hash": hash}})
s.sseBroadcast(map[string]any{"event": "delete", "data": map[string]string{"hash": hash}})
w.WriteHeader(http.StatusNoContent)
default:
@@ -411,7 +543,11 @@ func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
if !s.requireAuth(w, r) {
ac, ok := s.parseAuth(w, r)
if !ok {
return
}
if !s.verifyPoP(w, r, ac, nil) {
return
}
items, err := s.idx.List()
@@ -423,13 +559,16 @@ func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
_ = json.NewEncoder(w).Encode(items)
}
// Simple in-process SSE fanout.
func (s *Server) handleIndexSSE(w http.ResponseWriter, r *http.Request) {
if !s.requireAuth(w, r) {
ac, ok := s.parseAuth(w, r)
if !ok {
return
}
flusher, ok := w.(http.Flusher)
if !ok {
if !s.verifyPoP(w, r, ac, nil) {
return
}
flusher, ok2 := w.(http.Flusher)
if !ok2 {
http.Error(w, "stream unsupported", http.StatusInternalServerError)
return
}
@@ -438,8 +577,6 @@ func (s *Server) handleIndexSSE(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Connection", "keep-alive")
ch := make(chan []byte, 8)
// subscribe
s.sseMu.Lock()
if s.sseClosed {
s.sseMu.Unlock()
@@ -449,11 +586,9 @@ func (s *Server) handleIndexSSE(w http.ResponseWriter, r *http.Request) {
s.sseSubs[ch] = struct{}{}
s.sseMu.Unlock()
// Send a hello/heartbeat
fmt.Fprintf(w, "data: %s\n\n", `{"event":"hello","data":"ok"}`)
flusher.Flush()
// pump
ctx := r.Context()
t := time.NewTicker(25 * time.Second)
defer t.Stop()
@@ -492,24 +627,25 @@ func (s *Server) sseBroadcast(v interface{}) {
s.sseMu.Unlock()
}
// -----------------------------
// Admin: reindex from disk
// -----------------------------
// ---------- Admin: reindex ----------
func (s *Server) handleAdminReindex(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
if !s.requireAuth(w, r) {
ac, ok := s.parseAuth(w, r)
if !ok {
return
}
walker, ok := s.store.(blobWalker)
if !ok {
if !s.verifyPoP(w, r, ac, nil) {
return
}
walker, ok2 := s.store.(blobWalker)
if !ok2 {
http.Error(w, "store does not support walk", http.StatusNotImplemented)
return
}
count := 0
err := walker.Walk(func(hash string, size int64, mod time.Time) error {
count++
@@ -532,29 +668,45 @@ func (s *Server) handleAdminReindex(w http.ResponseWriter, r *http.Request) {
})
}
// -----------------------------
// Discord SSO (server-side code flow)
// -----------------------------
// ---------- Discord SSO with PKCE + device key binding ----------
func (s *Server) handleDiscordStart(w http.ResponseWriter, r *http.Request, cfg DiscordProvider) {
if !cfg.Enabled || cfg.ClientID == "" || cfg.ClientSecret == "" || cfg.RedirectURI == "" {
http.Error(w, "discord sso disabled", http.StatusBadRequest)
return
}
// Require explicit 3P assent (UI shows disclaimer)
if r.Header.Get("X-GC-3P-Assent") != "1" {
http.Error(w, "third-party provider not assented", http.StatusForbidden)
return
}
deviceKey := strings.TrimSpace(r.Header.Get("X-GC-Key"))
if deviceKey == "" {
http.Error(w, "device key required", http.StatusBadRequest)
return
}
// PKCE
verifier := s.randToken(32)
chalSum := sha256.Sum256([]byte(verifier))
challenge := base64.RawURLEncoding.EncodeToString(chalSum[:])
state := s.randToken(16)
s.stateMu.Lock()
s.states[state] = stateItem{
Exp: time.Now().Add(10 * time.Minute),
Verifier: verifier,
DeviceKey: deviceKey,
}
s.stateMu.Unlock()
state := s.newState(5 * time.Minute)
v := url.Values{}
v.Set("response_type", "code")
v.Set("client_id", cfg.ClientID)
v.Set("redirect_uri", cfg.RedirectURI)
v.Set("scope", "identify")
v.Set("prompt", "consent")
v.Set("state", state)
v.Set("code_challenge", challenge)
v.Set("code_challenge_method", "S256")
authURL := (&url.URL{
Scheme: "https",
Host: "discord.com",
@@ -571,22 +723,31 @@ func (s *Server) handleDiscordCallback(w http.ResponseWriter, r *http.Request, c
http.Error(w, "disabled", http.StatusBadRequest)
return
}
q := r.URL.Query()
code := q.Get("code")
state := q.Get("state")
if code == "" || state == "" || !s.consumeState(state) {
if code == "" || state == "" {
http.Error(w, "invalid state/code", http.StatusBadRequest)
return
}
// Exchange code for token
s.stateMu.Lock()
item, ok := s.states[state]
if ok && time.Now().Before(item.Exp) {
delete(s.states, state)
}
s.stateMu.Unlock()
if !ok {
http.Error(w, "state expired", http.StatusBadRequest)
return
}
// Exchange code for token (with verifier)
form := url.Values{}
form.Set("client_id", cfg.ClientID)
form.Set("client_secret", cfg.ClientSecret)
form.Set("grant_type", "authorization_code")
form.Set("code", code)
form.Set("redirect_uri", cfg.RedirectURI)
form.Set("code_verifier", item.Verifier)
req, _ := http.NewRequestWithContext(r.Context(), http.MethodPost, "https://discord.com/api/oauth2/token", strings.NewReader(form.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
@@ -604,15 +765,13 @@ func (s *Server) handleDiscordCallback(w http.ResponseWriter, r *http.Request, c
var tok struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
Scope string `json:"scope"`
ExpiresIn int64 `json:"expires_in"`
}
if err := json.NewDecoder(res.Body).Decode(&tok); err != nil {
http.Error(w, "token decode failed", 502)
return
}
// Fetch user id (identify scope)
// Fetch user id
ureq, _ := http.NewRequestWithContext(r.Context(), http.MethodGet, "https://discord.com/api/users/@me", nil)
ureq.Header.Set("Authorization", tok.TokenType+" "+tok.AccessToken)
ures, err := http.DefaultClient.Do(ureq)
@@ -627,54 +786,31 @@ func (s *Server) handleDiscordCallback(w http.ResponseWriter, r *http.Request, c
return
}
var user struct {
ID string `json:"id"`
Username string `json:"username"`
ID string `json:"id"`
}
if err := json.NewDecoder(ures.Body).Decode(&user); err != nil {
http.Error(w, "user decode failed", 502)
return
}
// Mint self-signed bearer with Discord snowflake as subject
bearer, err := s.makeToken("discord:"+user.ID, time.Hour*8)
// Bind token to device key from /start
ttl := 8 * time.Hour
now := time.Now()
sub := "discord:" + user.ID
bearer, err := auth.MintGC2(s.signingKey, auth.Claims{
Sub: sub, Exp: now.Add(ttl).Unix(), Nbf: now.Add(-60 * time.Second).Unix(),
Iss: "greencoast", Aud: "api", CNF: item.DeviceKey,
})
if err != nil {
http.Error(w, "signing error", 500)
http.Error(w, "sign error", 500)
return
}
// Redirect to frontend callback with bearer in fragment (not query)
target := cfg.RedirectURI
u, _ := url.Parse(target)
u, _ := url.Parse(cfg.RedirectURI)
u.Fragment = "bearer=" + url.QueryEscape(bearer) + "&next=/"
http.Redirect(w, r, u.String(), http.StatusFound)
}
// simple in-memory state store
func (s *Server) newState(ttl time.Duration) string {
s.stateMu.Lock()
defer s.stateMu.Unlock()
b := make([]byte, 12)
now := time.Now().UnixNano()
copy(b, []byte(fmt.Sprintf("%x", now)))
val := base64.RawURLEncoding.EncodeToString(b)
s.states[val] = time.Now().Add(ttl)
return val
}
func (s *Server) consumeState(v string) bool {
s.stateMu.Lock()
defer s.stateMu.Unlock()
exp, ok := s.states[v]
if !ok {
return false
}
delete(s.states, v)
return time.Now().Before(exp)
}
// -----------------------------
// Utilities
// -----------------------------
// ---------- Utilities, shutdown ----------
func isReasonableTZ(tz string) bool {
if !strings.Contains(tz, "/") || len(tz) > 64 {
@@ -688,10 +824,6 @@ func isReasonableTZ(tz string) bool {
return true
}
// -----------------------------
// Optional: graceful shutdown
// -----------------------------
func (s *Server) Shutdown(ctx context.Context) error {
s.sseMu.Lock()
s.sseClosed = true
@@ -703,20 +835,12 @@ func (s *Server) Shutdown(ctx context.Context) error {
return nil
}
// -----------------------------
// Helpers for static serving (optional use)
// -----------------------------
func fileExists(p string) bool {
st, err := os.Stat(p)
return err == nil && !st.IsDir()
}
func joinClean(dir, p string) (string, bool) {
fp := path.Clean("/" + p)
full := path.Clean(dir + fp)
if !strings.HasPrefix(full, path.Clean(dir)) {
return "", false
}
return full, true
func (s *Server) randToken(n int) string {
// HMAC over time + counter to avoid importing crypto/rand; good enough for state/nonce
// (If you prefer, switch to crypto/rand.)
b := []byte(fmt.Sprintf("%d|%d", time.Now().UnixNano(), len(s.states)+len(s.nonceExpiry)))
m := hmac.New(sha256.New, []byte(fmt.Sprintf("%p", s)))
m.Write(b)
sum := m.Sum(nil)
return base64.RawURLEncoding.EncodeToString(sum[:n])
}

78
internal/auth/gc2.go Normal file
View File

@@ -0,0 +1,78 @@
package auth
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"strings"
"time"
)
type Claims struct {
Sub string `json:"sub"` // account ID (acc_…)
Exp int64 `json:"exp"` // unix seconds
Nbf int64 `json:"nbf,omitempty"` // not before
Iss string `json:"iss,omitempty"` // greencoast
Aud string `json:"aud,omitempty"` // api
Jti string `json:"jti,omitempty"` // token id (optional)
CNF string `json:"cnf,omitempty"` // key binding: "p256:<b64raw>" or "ed25519:<b64raw>"
}
func MintGC2(signKey []byte, c Claims) (string, error) {
if len(signKey) == 0 {
return "", errors.New("sign key missing")
}
if c.Sub == "" || c.Exp == 0 {
return "", errors.New("claims incomplete")
}
body, _ := json.Marshal(c)
mac := hmac.New(sha256.New, signKey)
mac.Write(body)
sig := mac.Sum(nil)
return "gc2." + base64.RawURLEncoding.EncodeToString(body) + "." + base64.RawURLEncoding.EncodeToString(sig), nil
}
func VerifyGC2(signKey []byte, tok string, now time.Time) (Claims, error) {
var zero Claims
if !strings.HasPrefix(tok, "gc2.") {
return zero, errors.New("bad prefix")
}
parts := strings.Split(tok, ".")
if len(parts) != 3 {
return zero, errors.New("bad parts")
}
body, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return zero, err
}
want, err := base64.RawURLEncoding.DecodeString(parts[2])
if err != nil {
return zero, err
}
mac := hmac.New(sha256.New, signKey)
mac.Write(body)
if !hmac.Equal(want, mac.Sum(nil)) {
return zero, errors.New("bad sig")
}
var c Claims
if err := json.Unmarshal(body, &c); err != nil {
return zero, err
}
t := now.Unix()
if c.Nbf != 0 && t < c.Nbf {
return zero, errors.New("nbf")
}
if t > c.Exp {
return zero, errors.New("expired")
}
return c, nil
}
func AccountIDFromPub(raw []byte) string {
// acc_<first32 hex of sha256(pub)>
sum := sha256.Sum256(raw)
return "acc_" + hex.EncodeToString(sum[:16])
}

View File

@@ -6,17 +6,14 @@ import (
"time"
)
// Entry is the API/JSON shape the server returns.
// StoredAt is RFC3339/RFC3339Nano in UTC.
type Entry struct {
Hash string `json:"hash"`
Bytes int64 `json:"bytes"`
StoredAt string `json:"stored_at"` // RFC3339( Nano ) string
StoredAt string `json:"stored_at"`
Private bool `json:"private"`
CreatorTZ string `json:"creator_tz,omitempty"` // IANA TZ like "America/New_York"
CreatorTZ string `json:"creator_tz,omitempty"`
}
// internal record with real time.Time for sorting/comparison.
type rec struct {
Hash string
Bytes int64
@@ -25,30 +22,20 @@ type rec struct {
CreatorTZ string
}
// Index is an in-memory index keyed by hash.
type Index struct {
mu sync.RWMutex
hash map[string]rec
}
// New creates an empty Index.
func New() *Index {
return &Index{
hash: make(map[string]rec),
}
}
func New() *Index { return &Index{hash: make(map[string]rec)} }
// Put inserts or replaces an entry.
// e.StoredAt may be RFC3339( Nano ); if empty/invalid we use time.Now().UTC().
func (ix *Index) Put(e Entry) error {
ix.mu.Lock()
defer ix.mu.Unlock()
t := parseWhen(e.StoredAt)
if t.IsZero() {
t = time.Now().UTC()
}
ix.hash[e.Hash] = rec{
Hash: e.Hash,
Bytes: e.Bytes,
@@ -59,7 +46,6 @@ func (ix *Index) Put(e Entry) error {
return nil
}
// Delete removes an entry by hash (no error if absent).
func (ix *Index) Delete(hash string) error {
ix.mu.Lock()
defer ix.mu.Unlock()
@@ -67,19 +53,14 @@ func (ix *Index) Delete(hash string) error {
return nil
}
// List returns entries sorted by StoredAt descending.
func (ix *Index) List() ([]Entry, error) {
ix.mu.RLock()
defer ix.mu.RUnlock()
tmp := make([]rec, 0, len(ix.hash))
for _, r := range ix.hash {
tmp = append(tmp, r)
}
sort.Slice(tmp, func(i, j int) bool {
return tmp[i].StoredAt.After(tmp[j].StoredAt)
})
sort.Slice(tmp, func(i, j int) bool { return tmp[i].StoredAt.After(tmp[j].StoredAt) })
out := make([]Entry, len(tmp))
for i, r := range tmp {
out[i] = Entry{
@@ -93,7 +74,6 @@ func (ix *Index) List() ([]Entry, error) {
return out, nil
}
// parseWhen tries RFC3339Nano then RFC3339; returns zero time on failure.
func parseWhen(s string) time.Time {
if s == "" {
return time.Time{}

View File

@@ -10,15 +10,11 @@ import (
"time"
)
// FSStore stores blobs on the local filesystem under root/objects/...
// It supports both a flat layout (objects/<hash>) and a nested layout
// (objects/<hash>/<file> or objects/<prefix>/<hash>).
type FSStore struct {
root string
objects string
}
// NewFS returns a file-backed blob store rooted at dir.
func NewFS(dir string) (*FSStore, error) {
if dir == "" {
return nil, errors.New("empty storage dir")
@@ -30,7 +26,6 @@ func NewFS(dir string) (*FSStore, error) {
return &FSStore{root: dir, objects: o}, nil
}
// pathFlat returns the flat path objects/<hash>.
func (s *FSStore) pathFlat(hash string) (string, error) {
if hash == "" {
return "", errors.New("empty hash")
@@ -38,7 +33,6 @@ func (s *FSStore) pathFlat(hash string) (string, error) {
return filepath.Join(s.objects, hash), nil
}
// isHexHash does a quick check for lowercase hex of length 64.
func isHexHash(name string) bool {
if len(name) != 64 {
return false
@@ -52,27 +46,14 @@ func isHexHash(name string) bool {
return true
}
// findBlobPath tries common layouts before falling back to a recursive search.
//
// Supported fast paths (in order):
// 1. objects/<hash> (flat file)
// 2. objects/<hash>/blob|data|content (common names)
// 3. objects/<hash>/<single file> (folder-per-post; pick that file)
// 4. objects/<hash[0:2]>/<hash> (two-level prefix sharding)
//
// If still not found, it walks recursively under objects/ to locate either:
// - a file named exactly <hash>, or
// - any file under a directory named <hash> (choose the most recently modified).
func (s *FSStore) findBlobPath(hash string) (string, error) {
if hash == "" {
return "", errors.New("empty hash")
}
// 1) flat file
// 1) flat
if p, _ := s.pathFlat(hash); fileExists(p) {
return p, nil
}
// 2) objects/<hash>/{blob,data,content}
dir := filepath.Join(s.objects, hash)
for _, cand := range []string{"blob", "data", "content"} {
@@ -81,88 +62,67 @@ func (s *FSStore) findBlobPath(hash string) (string, error) {
return p, nil
}
}
// 3) objects/<hash>/<single file>
if st, err := os.Stat(dir); err == nil && st.IsDir() {
ents, err := os.ReadDir(dir)
if err == nil {
var picked string
var pickedMod time.Time
for _, de := range ents {
if de.IsDir() {
continue
}
p := filepath.Join(dir, de.Name())
fi, err := os.Stat(p)
if err != nil || !fi.Mode().IsRegular() {
continue
}
// Pick newest file if multiple.
if picked == "" || fi.ModTime().After(pickedMod) {
picked = p
pickedMod = fi.ModTime()
}
ents, _ := os.ReadDir(dir)
var picked string
var pickedMod time.Time
for _, de := range ents {
if de.IsDir() {
continue
}
if picked != "" {
return picked, nil
p := filepath.Join(dir, de.Name())
fi, err := os.Stat(p)
if err == nil && fi.Mode().IsRegular() {
if picked == "" || fi.ModTime().After(pickedMod) {
picked, pickedMod = p, fi.ModTime()
}
}
}
if picked != "" {
return picked, nil
}
}
// 4) two-level prefix: objects/aa/<hash>
// 4) two-level prefix objects/aa/<hash>
if len(hash) >= 2 {
p := filepath.Join(s.objects, hash[:2], hash)
if fileExists(p) {
return p, nil
}
}
// Fallback: recursive search
// 5) recursive search
var best string
var bestMod time.Time
err := filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error {
if err != nil {
// ignore per-entry errors
return nil
}
if d.IsDir() {
_ = filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error {
if err != nil || d.IsDir() {
return nil
}
base := filepath.Base(p)
// Exact filename == hash
if base == hash {
best = p
// exact match is good enough; stop here
return fs.SkipDir
}
// If parent dir name is hash, consider it
parent := filepath.Base(filepath.Dir(p))
if parent == hash {
if fi, err := os.Stat(p); err == nil && fi.Mode().IsRegular() {
if best == "" || fi.ModTime().After(bestMod) {
best = p
bestMod = fi.ModTime()
best, bestMod = p, fi.ModTime()
}
}
}
return nil
})
if err == nil && best != "" {
if best != "" {
return best, nil
}
return "", os.ErrNotExist
}
// fileExists true if path exists and is a regular file.
func fileExists(p string) bool {
fi, err := os.Stat(p)
return err == nil && fi.Mode().IsRegular()
}
// Put writes/overwrites the blob at the content hash into the flat path.
// (Nested layouts remain supported for reads/reindex, but new writes are flat.)
func (s *FSStore) Put(hash string, r io.Reader) error {
p, err := s.pathFlat(hash)
if err != nil {
@@ -189,7 +149,6 @@ func (s *FSStore) Put(hash string, r io.Reader) error {
return os.Rename(tmp, p)
}
// Get opens the blob for reading and returns its size if known.
func (s *FSStore) Get(hash string) (io.ReadCloser, int64, error) {
p, err := s.findBlobPath(hash)
if err != nil {
@@ -206,17 +165,12 @@ func (s *FSStore) Get(hash string) (io.ReadCloser, int64, error) {
return f, st.Size(), nil
}
// Delete removes the blob. It is not an error if it doesn't exist.
// It tries the flat path, common nested paths, then falls back to remove
// any file found via findBlobPath.
func (s *FSStore) Delete(hash string) error {
// Try flat
if p, _ := s.pathFlat(hash); fileExists(p) {
if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) {
return nil
}
}
// Try common nested
dir := filepath.Join(s.objects, hash)
for _, cand := range []string{"blob", "data", "content"} {
p := filepath.Join(dir, cand)
@@ -234,77 +188,49 @@ func (s *FSStore) Delete(hash string) error {
}
}
}
// Fallback: whatever findBlobPath locates
if p, err := s.findBlobPath(hash); err == nil {
if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) {
return nil
}
}
// If we couldn't find anything, treat as success (idempotent delete)
return nil
}
// Walk calls fn(hash, size, modTime) for each blob file found.
// It recognizes blobs when either:
// - the file name is a 64-char hex hash, or
// - the parent directory name is that hash (folder-per-post).
//
// If multiple files map to the same hash (e.g., dir contains many files),
// the newest file's size/modTime is reported.
func (s *FSStore) Walk(fn func(hash string, size int64, mod time.Time) error) error {
type rec struct {
size int64
mod time.Time
}
agg := make(map[string]rec)
err := filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error {
if err != nil {
return nil // skip unreadable entries
}
if d.IsDir() {
_ = filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error {
if err != nil || d.IsDir() {
return nil
}
// Only consider regular files
fi, err := os.Stat(p)
if err != nil || !fi.Mode().IsRegular() {
return nil
}
base := filepath.Base(p)
// Case 1: filename equals hash
if isHexHash(base) {
if r, ok := agg[base]; !ok || fi.ModTime().After(r.mod) {
agg[base] = rec{size: fi.Size(), mod: fi.ModTime()}
agg[base] = rec{fi.Size(), fi.ModTime()}
}
return nil
}
// Case 2: parent dir is the hash
parent := filepath.Base(filepath.Dir(p))
if isHexHash(parent) {
if r, ok := agg[parent]; !ok || fi.ModTime().After(r.mod) {
agg[parent] = rec{size: fi.Size(), mod: fi.ModTime()}
agg[parent] = rec{fi.Size(), fi.ModTime()}
}
return nil
}
// Case 3: two-level prefix layout e.g. objects/aa/<hash>
// If parent is a 2-char dir and grandparent is objects/, base might be hash.
if len(base) == 64 && isHexHash(strings.ToLower(base)) {
// already handled as Case 1, but keep as safety if different casing sneaks in
if r, ok := agg[base]; !ok || fi.ModTime().After(r.mod) {
agg[base] = rec{size: fi.Size(), mod: fi.ModTime()}
agg[base] = rec{fi.Size(), fi.ModTime()}
}
return nil
}
return nil
})
if err != nil {
return err
}
for h, r := range agg {
if err := fn(h, r.size, r.mod); err != nil {
return err