Fixed the Discord SSO somewhat
Fixed FS system Added TZ options
This commit is contained in:
@@ -31,7 +31,7 @@ FROM gcr.io/distroless/base-debian12:nonroot
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY --from=build /out/greencoast-shard /app/greencoast-shard
|
COPY --from=build /out/greencoast-shard /app/greencoast-shard
|
||||||
COPY configs/shard.sample.yaml /app/shard.yaml
|
COPY configs/shard.sample.yaml /app/shard.yaml
|
||||||
COPY client /app/client
|
COPY client/ /opt/greencoast/client/
|
||||||
VOLUME ["/var/lib/greencoast"]
|
VOLUME ["/var/lib/greencoast"]
|
||||||
EXPOSE 8080 8081 8443 9443
|
EXPOSE 8080 8081 8443 9443
|
||||||
USER nonroot:nonroot
|
USER nonroot:nonroot
|
||||||
|
@@ -2,18 +2,15 @@ import { encryptString, decryptToString, toBlob } from "./crypto.js";
|
|||||||
|
|
||||||
// ---- Helpers ----
|
// ---- Helpers ----
|
||||||
function defaultApiBase() {
|
function defaultApiBase() {
|
||||||
// 1) URL query override: …/index.html?api=https://api.domain
|
|
||||||
try {
|
try {
|
||||||
const qs = new URLSearchParams(window.location.search);
|
const qs = new URLSearchParams(window.location.search);
|
||||||
const qApi = qs.get("api");
|
const qApi = qs.get("api");
|
||||||
if (qApi) return qApi.replace(/\/+$/, "");
|
if (qApi) return qApi.replace(/\/+$/, "");
|
||||||
} catch {}
|
} catch {}
|
||||||
|
|
||||||
// 2) Meta override in index.html: <meta name="gc-api-base" content="https://api.domain">
|
|
||||||
const m = document.querySelector('meta[name="gc-api-base"]');
|
const m = document.querySelector('meta[name="gc-api-base"]');
|
||||||
if (m && m.content) return m.content.replace(/\/+$/, "");
|
if (m && m.content) return m.content.replace(/\/+$/, "");
|
||||||
|
|
||||||
// 3) Heuristic from frontend origin
|
|
||||||
try {
|
try {
|
||||||
const u = new URL(window.location.href);
|
const u = new URL(window.location.href);
|
||||||
const proto = u.protocol;
|
const proto = u.protocol;
|
||||||
@@ -33,6 +30,8 @@ function defaultApiBase() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const LOCAL_TZ = Intl.DateTimeFormat().resolvedOptions().timeZone || "UTC";
|
||||||
|
|
||||||
// ---- DOM refs ----
|
// ---- DOM refs ----
|
||||||
const els = {
|
const els = {
|
||||||
shardUrl: document.getElementById("shardUrl"),
|
shardUrl: document.getElementById("shardUrl"),
|
||||||
@@ -47,13 +46,12 @@ const els = {
|
|||||||
publishStatus: document.getElementById("publishStatus"),
|
publishStatus: document.getElementById("publishStatus"),
|
||||||
posts: document.getElementById("posts"),
|
posts: document.getElementById("posts"),
|
||||||
discordStart: document.getElementById("discordStart"),
|
discordStart: document.getElementById("discordStart"),
|
||||||
|
shareTZ: document.getElementById("shareTZ"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// ---- Config + state ----
|
// ---- Config + state ----
|
||||||
const LS_KEY = "gc_client_config_v1";
|
const LS_KEY = "gc_client_config_v1";
|
||||||
const POSTS_KEY = "gc_posts_index_v1";
|
const POSTS_KEY = "gc_posts_index_v1";
|
||||||
|
|
||||||
// IMPORTANT: define before sse() is ever called
|
|
||||||
let sseCtrl = null;
|
let sseCtrl = null;
|
||||||
|
|
||||||
// ---- Boot ----
|
// ---- Boot ----
|
||||||
@@ -63,20 +61,18 @@ checkHealth();
|
|||||||
syncIndex();
|
syncIndex();
|
||||||
sse();
|
sse();
|
||||||
|
|
||||||
|
// ---- Storage helpers ----
|
||||||
els.saveConn.onclick = async () => {
|
|
||||||
const c = { url: norm(els.shardUrl.value), bearer: els.bearer.value.trim(), passphrase: els.passphrase.value };
|
|
||||||
saveConfig(c); await checkHealth(); await syncIndex(); sse(true);
|
|
||||||
};
|
|
||||||
|
|
||||||
els.publish.onclick = publish;
|
|
||||||
els.discordStart.onclick = discordStart;
|
|
||||||
|
|
||||||
function loadConfig(){ try { return JSON.parse(localStorage.getItem(LS_KEY)) ?? {}; } catch { return {}; } }
|
function loadConfig(){ try { return JSON.parse(localStorage.getItem(LS_KEY)) ?? {}; } catch { return {}; } }
|
||||||
function saveConfig(c){ localStorage.setItem(LS_KEY, JSON.stringify(c)); Object.assign(cfg, c); }
|
function saveConfig(c){ localStorage.setItem(LS_KEY, JSON.stringify(c)); Object.assign(cfg, c); }
|
||||||
function getPosts(){ try { return JSON.parse(localStorage.getItem(POSTS_KEY)) ?? []; } catch { return []; } }
|
function getPosts(){ try { return JSON.parse(localStorage.getItem(POSTS_KEY)) ?? []; } catch { return []; } }
|
||||||
function setPosts(v){ localStorage.setItem(POSTS_KEY, JSON.stringify(v)); renderPosts(); }
|
function setPosts(v){ localStorage.setItem(POSTS_KEY, JSON.stringify(v)); renderPosts(); }
|
||||||
function norm(u){ return (u||"").replace(/\/+$/,""); }
|
function norm(u){ return (u||"").replace(/\/+$/,""); }
|
||||||
|
function fmtWhen(ts, tz) {
|
||||||
|
try {
|
||||||
|
return new Intl.DateTimeFormat(undefined, { dateStyle:"medium", timeStyle:"short", timeZone: tz }).format(new Date(ts));
|
||||||
|
} catch { return ts; }
|
||||||
|
}
|
||||||
|
|
||||||
function applyConfig() {
|
function applyConfig() {
|
||||||
if (!cfg.url) {
|
if (!cfg.url) {
|
||||||
const detected = defaultApiBase();
|
const detected = defaultApiBase();
|
||||||
@@ -88,11 +84,23 @@ function applyConfig() {
|
|||||||
els.passphrase.value = cfg.passphrase ?? "";
|
els.passphrase.value = cfg.passphrase ?? "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
els.saveConn.onclick = async () => {
|
||||||
|
const c = { url: norm(els.shardUrl.value), bearer: els.bearer.value.trim(), passphrase: els.passphrase.value };
|
||||||
|
saveConfig(c); await checkHealth(); await syncIndex(); sse(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
els.publish.onclick = publish;
|
||||||
|
els.discordStart.onclick = discordStart;
|
||||||
|
|
||||||
async function checkHealth() {
|
async function checkHealth() {
|
||||||
if (!cfg.url) return; els.health.textContent = "Checking…";
|
if (!cfg.url) { els.health.textContent = "No API base set"; return; }
|
||||||
try { const r = await fetch(cfg.url + "/healthz"); els.health.textContent = r.ok ? "Connected ✔" : `Error: ${r.status}`; }
|
els.health.textContent = "Checking…";
|
||||||
catch { els.health.textContent = "Not reachable"; }
|
try {
|
||||||
|
const r = await fetch(cfg.url + "/healthz", { mode:"cors" });
|
||||||
|
els.health.textContent = r.ok ? "Connected ✔" : `Error: ${r.status}`;
|
||||||
|
} catch (e) {
|
||||||
|
els.health.textContent = "Not reachable";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function publish() {
|
async function publish() {
|
||||||
@@ -104,15 +112,19 @@ async function publish() {
|
|||||||
if (!cfg.passphrase) return msg("Set a passphrase for private posts.", true);
|
if (!cfg.passphrase) return msg("Set a passphrase for private posts.", true);
|
||||||
const payload = await encryptString(JSON.stringify({ title, body }), cfg.passphrase);
|
const payload = await encryptString(JSON.stringify({ title, body }), cfg.passphrase);
|
||||||
blob = toBlob(payload); enc=true;
|
blob = toBlob(payload); enc=true;
|
||||||
} else { blob = toBlob(JSON.stringify({ title, body })); }
|
} else {
|
||||||
|
blob = toBlob(JSON.stringify({ title, body }));
|
||||||
|
}
|
||||||
const headers = { "Content-Type":"application/octet-stream" };
|
const headers = { "Content-Type":"application/octet-stream" };
|
||||||
if (cfg.bearer) headers["Authorization"] = "Bearer " + cfg.bearer;
|
if (cfg.bearer) headers["Authorization"] = "Bearer " + cfg.bearer;
|
||||||
if (enc) headers["X-GC-Private"] = "1";
|
if (enc) headers["X-GC-Private"] = "1";
|
||||||
|
if (els.shareTZ && els.shareTZ.checked && LOCAL_TZ) headers["X-GC-TZ"] = LOCAL_TZ; // NEW
|
||||||
|
|
||||||
const r = await fetch(cfg.url + "/v1/object", { method:"PUT", headers, body: blob });
|
const r = await fetch(cfg.url + "/v1/object", { method:"PUT", headers, body: blob });
|
||||||
if (!r.ok) throw new Error(await r.text());
|
if (!r.ok) throw new Error(await r.text());
|
||||||
const j = await r.json();
|
const j = await r.json();
|
||||||
const posts = getPosts();
|
const posts = getPosts();
|
||||||
posts.unshift({ hash:j.hash, title: title || "(untitled)", bytes:j.bytes, ts:j.stored_at, enc });
|
posts.unshift({ hash:j.hash, title: title || "(untitled)", bytes:j.bytes, ts:j.stored_at, enc, creator_tz: j.creator_tz || "" });
|
||||||
setPosts(posts);
|
setPosts(posts);
|
||||||
els.body.value = ""; msg(`Published ${enc?"private":"public"} post. Hash: ${j.hash}`);
|
els.body.value = ""; msg(`Published ${enc?"private":"public"} post. Hash: ${j.hash}`);
|
||||||
} catch(e){ msg("Publish failed: " + (e?.message||e), true); }
|
} catch(e){ msg("Publish failed: " + (e?.message||e), true); }
|
||||||
@@ -127,13 +139,13 @@ async function syncIndex() {
|
|||||||
const r = await fetch(cfg.url + "/v1/index", { headers });
|
const r = await fetch(cfg.url + "/v1/index", { headers });
|
||||||
if (!r.ok) throw new Error("index fetch failed");
|
if (!r.ok) throw new Error("index fetch failed");
|
||||||
const entries = await r.json();
|
const entries = await r.json();
|
||||||
setPosts(entries.map(e => ({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private })));
|
setPosts(entries.map(e => ({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private, creator_tz: e.creator_tz || "" })));
|
||||||
} catch(e){ console.warn("index sync failed", e); }
|
} catch(e){ console.warn("index sync failed", e); }
|
||||||
}
|
}
|
||||||
|
|
||||||
function sse(){
|
function sse(forceRestart=false){
|
||||||
if (!cfg.url) return;
|
if (!cfg.url) return;
|
||||||
if (sseCtrl) { sseCtrl.abort(); sseCtrl = undefined; }
|
if (sseCtrl) { sseCtrl.abort(); sseCtrl = null; }
|
||||||
sseCtrl = new AbortController();
|
sseCtrl = new AbortController();
|
||||||
const url = cfg.url + "/v1/index/stream";
|
const url = cfg.url + "/v1/index/stream";
|
||||||
const headers = {}; if (cfg.bearer) headers["Authorization"] = "Bearer " + cfg.bearer;
|
const headers = {}; if (cfg.bearer) headers["Authorization"] = "Bearer " + cfg.bearer;
|
||||||
@@ -154,7 +166,7 @@ function sse(){
|
|||||||
const e = ev.data;
|
const e = ev.data;
|
||||||
const posts = getPosts();
|
const posts = getPosts();
|
||||||
if (!posts.find(p => p.hash === e.hash)) {
|
if (!posts.find(p => p.hash === e.hash)) {
|
||||||
posts.unshift({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private });
|
posts.unshift({ hash:e.hash, title:"(title unknown — fetch)", bytes:e.bytes, ts:e.stored_at, enc:e.private, creator_tz: e.creator_tz || "" });
|
||||||
setPosts(posts);
|
setPosts(posts);
|
||||||
}
|
}
|
||||||
} else if (ev.event === "delete") {
|
} else if (ev.event === "delete") {
|
||||||
@@ -204,17 +216,14 @@ async function delServer(p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async function discordStart() {
|
async function discordStart() {
|
||||||
// Last-resort auto-fill if user didn’t hit Save
|
|
||||||
if (!cfg.url) {
|
if (!cfg.url) {
|
||||||
const derived = defaultApiBase();
|
const derived = defaultApiBase();
|
||||||
if (derived) {
|
if (derived) {
|
||||||
cfg.url = derived;
|
cfg.url = derived; try { localStorage.setItem(LS_KEY, JSON.stringify(cfg)); } catch {}
|
||||||
try { localStorage.setItem(LS_KEY, JSON.stringify(cfg)); } catch {}
|
|
||||||
els.shardUrl.value = derived;
|
els.shardUrl.value = derived;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!cfg.url) { alert("Set shard URL first."); return; }
|
if (!cfg.url) { alert("Set shard URL first."); return; }
|
||||||
|
|
||||||
const r = await fetch(cfg.url + "/v1/auth/discord/start", { headers: { "X-GC-3P-Assent":"1" }});
|
const r = await fetch(cfg.url + "/v1/auth/discord/start", { headers: { "X-GC-3P-Assent":"1" }});
|
||||||
if (!r.ok) { alert("Discord SSO not available"); return; }
|
if (!r.ok) { alert("Discord SSO not available"); return; }
|
||||||
const j = await r.json();
|
const j = await r.json();
|
||||||
@@ -224,10 +233,17 @@ async function discordStart() {
|
|||||||
function renderPosts() {
|
function renderPosts() {
|
||||||
const posts = getPosts(); els.posts.innerHTML = "";
|
const posts = getPosts(); els.posts.innerHTML = "";
|
||||||
for (const p of posts) {
|
for (const p of posts) {
|
||||||
|
const localStr = fmtWhen(p.ts, LOCAL_TZ) + ` (${LOCAL_TZ})`;
|
||||||
|
let creatorStr = "";
|
||||||
|
if (p.creator_tz && p.creator_tz !== LOCAL_TZ) {
|
||||||
|
creatorStr = ` · creator: ${fmtWhen(p.ts, p.creator_tz)} (${p.creator_tz})`;
|
||||||
|
}
|
||||||
const div = document.createElement("div"); div.className = "post";
|
const div = document.createElement("div"); div.className = "post";
|
||||||
const badge = p.enc ? `<span class="badge">private</span>` : `<span class="badge">public</span>`;
|
const badge = p.enc ? `<span class="badge">private</span>` : `<span class="badge">public</span>`;
|
||||||
div.innerHTML = `
|
div.innerHTML = `
|
||||||
<div class="meta"><code>${p.hash.slice(0,10)}…</code> · ${p.bytes} bytes · ${p.ts} ${badge}</div>
|
<div class="meta">
|
||||||
|
<code>${p.hash.slice(0,10)}…</code> · ${p.bytes} bytes · ${localStr}${creatorStr} ${badge}
|
||||||
|
</div>
|
||||||
<div class="actions">
|
<div class="actions">
|
||||||
<button data-act="view">View</button>
|
<button data-act="view">View</button>
|
||||||
<button data-act="save">Save blob</button>
|
<button data-act="save">Save blob</button>
|
||||||
|
@@ -4,9 +4,8 @@
|
|||||||
<meta charset="utf-8"/>
|
<meta charset="utf-8"/>
|
||||||
<title>GreenCoast — Client</title>
|
<title>GreenCoast — Client</title>
|
||||||
<meta name="viewport" content="width=device-width,initial-scale=1"/>
|
<meta name="viewport" content="width=device-width,initial-scale=1"/>
|
||||||
<meta name="gc-api-base" content="https://api.greencoast.fullmooncyberworks.com">
|
<!-- Force API base for Cloudflare tunneled API -->
|
||||||
<!-- Optional: hard override API base during testing -->
|
<meta name="gc-api-base" content="https://api-gc.fullmooncyberworks.com">
|
||||||
<!-- <meta name="gc-api-base" content="http://127.0.0.1:9080"> -->
|
|
||||||
<link rel="stylesheet" href="./styles.css"/>
|
<link rel="stylesheet" href="./styles.css"/>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
@@ -15,13 +14,9 @@
|
|||||||
|
|
||||||
<section class="card">
|
<section class="card">
|
||||||
<h2>Connect</h2>
|
<h2>Connect</h2>
|
||||||
<div class="row">
|
|
||||||
<label>Detected API</label>
|
|
||||||
<input id="detectedApi" readonly />
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<label>Shard URL</label>
|
<label>Shard URL</label>
|
||||||
<input id="shardUrl" placeholder="http://localhost:8080" />
|
<input id="shardUrl" placeholder="https://api-gc.fullmooncyberworks.com" />
|
||||||
</div>
|
</div>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<label>Bearer (optional)</label>
|
<label>Bearer (optional)</label>
|
||||||
@@ -61,6 +56,9 @@
|
|||||||
<label>Body</label>
|
<label>Body</label>
|
||||||
<textarea id="body" rows="6" placeholder="Write your post..."></textarea>
|
<textarea id="body" rows="6" placeholder="Write your post..."></textarea>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="row">
|
||||||
|
<label><input type="checkbox" id="shareTZ" checked> Include my time zone on this post</label>
|
||||||
|
</div>
|
||||||
<button id="publish">Publish</button>
|
<button id="publish">Publish</button>
|
||||||
<div id="publishStatus" class="muted"></div>
|
<div id="publishStatus" class="muted"></div>
|
||||||
</section>
|
</section>
|
||||||
@@ -72,16 +70,5 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script type="module" src="./app.js"></script>
|
<script type="module" src="./app.js"></script>
|
||||||
<script>
|
|
||||||
// Show what the client detected for the API base, to confirm routing
|
|
||||||
(function(){
|
|
||||||
try {
|
|
||||||
if (typeof defaultApiBase === "function") {
|
|
||||||
const el = document.getElementById("detectedApi");
|
|
||||||
if (el) el.value = defaultApiBase();
|
|
||||||
}
|
|
||||||
} catch {}
|
|
||||||
})();
|
|
||||||
</script>
|
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
@@ -1,89 +1,154 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
|
||||||
"log"
|
"log"
|
||||||
"path/filepath"
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"greencoast/internal/api"
|
"greencoast/internal/api"
|
||||||
"greencoast/internal/config"
|
|
||||||
"greencoast/internal/federation"
|
|
||||||
"greencoast/internal/index"
|
"greencoast/internal/index"
|
||||||
"greencoast/internal/storage"
|
"greencoast/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func getenvBool(key string, def bool) bool {
|
||||||
cfgPath := flag.String("config", "shard.yaml", "path to config")
|
v := os.Getenv(key)
|
||||||
flag.Parse()
|
if v == "" {
|
||||||
|
return def
|
||||||
cfg, err := config.Load(*cfgPath)
|
}
|
||||||
|
b, err := strconv.ParseBool(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("config error: %v", err)
|
return def
|
||||||
|
}
|
||||||
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
store, err := storage.NewFSStore(cfg.Storage.Path, cfg.Storage.MaxObjectKB)
|
func staticHeaders(next http.Handler) http.Handler {
|
||||||
if err != nil {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
log.Fatalf("storage error: %v", err)
|
// Same security posture as API
|
||||||
}
|
w.Header().Set("Referrer-Policy", "no-referrer")
|
||||||
|
w.Header().Set("Cross-Origin-Opener-Policy", "same-origin")
|
||||||
|
w.Header().Set("Cross-Origin-Resource-Policy", "same-site")
|
||||||
|
w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=(), interest-cohort=(), browsing-topics=()")
|
||||||
|
w.Header().Set("X-Frame-Options", "DENY")
|
||||||
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||||
|
w.Header().Set("Strict-Transport-Security", "max-age=15552000; includeSubDomains; preload")
|
||||||
|
|
||||||
dataRoot := filepath.Dir(cfg.Storage.Path)
|
// Basic CORS for client assets
|
||||||
idx := index.New(dataRoot)
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
if r.Method == http.MethodOptions {
|
||||||
srv := api.New(
|
w.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS")
|
||||||
store, idx,
|
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||||
cfg.Privacy.RetainTimestamps == "coarse",
|
w.WriteHeader(http.StatusNoContent)
|
||||||
cfg.Security.ZeroTrust,
|
|
||||||
api.AuthProviders{
|
|
||||||
SigningSecretHex: cfg.Auth.SigningSecret,
|
|
||||||
Discord: api.DiscordProvider{
|
|
||||||
Enabled: cfg.Auth.SSO.Discord.Enabled,
|
|
||||||
ClientID: cfg.Auth.SSO.Discord.ClientID,
|
|
||||||
ClientSecret: cfg.Auth.SSO.Discord.ClientSecret,
|
|
||||||
RedirectURI: cfg.Auth.SSO.Discord.RedirectURI,
|
|
||||||
},
|
|
||||||
GoogleEnabled: cfg.Auth.SSO.Google.Enabled,
|
|
||||||
FacebookEnabled: cfg.Auth.SSO.Facebook.Enabled,
|
|
||||||
WebAuthnEnabled: cfg.Auth.TwoFactor.WebAuthnEnabled,
|
|
||||||
TOTPEnabled: cfg.Auth.TwoFactor.TOTPEnabled,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Optional: also mount static under API mux (subpath) if you later want that.
|
|
||||||
// srv.MountStatic(cfg.UI.Path, "/app")
|
|
||||||
|
|
||||||
// Start federation mTLS (if enabled)
|
|
||||||
if cfg.Federation.MTLSEnable {
|
|
||||||
tlsCfg, err := federation.ServerTLSConfig(
|
|
||||||
cfg.Federation.CertFile,
|
|
||||||
cfg.Federation.KeyFile,
|
|
||||||
cfg.Federation.ClientCAFile,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("federation tls config error: %v", err)
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
if err := srv.ListenMTLS(cfg.Federation.Listen, tlsCfg); err != nil {
|
|
||||||
log.Fatalf("federation mTLS listener error: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start FRONTEND listener (separate port) if enabled
|
|
||||||
if cfg.UI.Enable && cfg.UI.FrontendHTTP != "" {
|
|
||||||
go func() {
|
|
||||||
if err := srv.ListenFrontendHTTP(cfg.UI.FrontendHTTP, cfg.UI.Path, cfg.UI.BaseURL); err != nil {
|
|
||||||
log.Fatalf("frontend listener error: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Choose ONE foreground listener for API: HTTPS if enabled, else HTTP.
|
|
||||||
if cfg.TLS.Enable && cfg.Listen.HTTPS != "" {
|
|
||||||
log.Fatal(srv.ListenHTTPS(cfg.Listen.HTTPS, cfg.TLS.CertFile, cfg.TLS.KeyFile))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if cfg.Listen.HTTP == "" {
|
next.ServeHTTP(w, r)
|
||||||
log.Fatal("no API listeners configured (set listen.http or listen.https)")
|
})
|
||||||
}
|
}
|
||||||
log.Fatal(srv.ListenHTTP(cfg.Listen.HTTP))
|
|
||||||
|
func main() {
|
||||||
|
// ---- Config via env ----
|
||||||
|
httpAddr := os.Getenv("GC_HTTP_ADDR")
|
||||||
|
if httpAddr == "" {
|
||||||
|
httpAddr = ":9080" // API
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional TLS for API
|
||||||
|
httpsAddr := os.Getenv("GC_HTTPS_ADDR") // leave empty for HTTP
|
||||||
|
certFile := os.Getenv("GC_TLS_CERT")
|
||||||
|
keyFile := os.Getenv("GC_TLS_KEY")
|
||||||
|
|
||||||
|
dataDir := os.Getenv("GC_DATA_DIR")
|
||||||
|
if dataDir == "" {
|
||||||
|
dataDir = "/var/lib/greencoast"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Static dir + port (frontend)
|
||||||
|
staticDir := os.Getenv("GC_STATIC_DIR")
|
||||||
|
if staticDir == "" {
|
||||||
|
staticDir = "/opt/greencoast/client"
|
||||||
|
}
|
||||||
|
staticAddr := os.Getenv("GC_STATIC_ADDR")
|
||||||
|
if staticAddr == "" {
|
||||||
|
staticAddr = ":9082"
|
||||||
|
}
|
||||||
|
|
||||||
|
coarseTS := getenvBool("GC_COARSE_TS", false)
|
||||||
|
zeroTrust := getenvBool("GC_ZERO_TRUST", true)
|
||||||
|
signingSecretHex := os.Getenv("GC_SIGNING_SECRET_HEX")
|
||||||
|
|
||||||
|
// Discord SSO
|
||||||
|
discID := os.Getenv("GC_DISCORD_CLIENT_ID")
|
||||||
|
discSecret := os.Getenv("GC_DISCORD_CLIENT_SECRET")
|
||||||
|
discRedirect := os.Getenv("GC_DISCORD_REDIRECT_URI")
|
||||||
|
|
||||||
|
// ---- Storage ----
|
||||||
|
store, err := storage.NewFS(dataDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("storage init: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Index ----
|
||||||
|
ix := index.New()
|
||||||
|
|
||||||
|
// Optional: auto-reindex from disk on boot
|
||||||
|
if w, ok := any(store).(interface {
|
||||||
|
Walk(func(hash string, size int64, mod time.Time) error) error
|
||||||
|
}); ok {
|
||||||
|
if err := w.Walk(func(hash string, size int64, mod time.Time) error {
|
||||||
|
return ix.Put(index.Entry{
|
||||||
|
Hash: hash,
|
||||||
|
Bytes: size,
|
||||||
|
StoredAt: mod.UTC().Format(time.RFC3339Nano),
|
||||||
|
Private: false,
|
||||||
|
})
|
||||||
|
}); err != nil {
|
||||||
|
log.Printf("reindex on boot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Auth/Providers ----
|
||||||
|
ap := api.AuthProviders{
|
||||||
|
SigningSecretHex: signingSecretHex,
|
||||||
|
Discord: api.DiscordProvider{
|
||||||
|
Enabled: discID != "" && discSecret != "" && discRedirect != "",
|
||||||
|
ClientID: discID,
|
||||||
|
ClientSecret: discSecret,
|
||||||
|
RedirectURI: discRedirect,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- API server (9080/HTTPS optional) ----
|
||||||
|
srv := api.New(store, ix, coarseTS, zeroTrust, ap)
|
||||||
|
|
||||||
|
// Serve the static client in a goroutine on 9082
|
||||||
|
go func() {
|
||||||
|
if st, err := os.Stat(staticDir); err != nil || !st.IsDir() {
|
||||||
|
log.Printf("WARN: GC_STATIC_DIR %q not found or not a dir; client may 404", staticDir)
|
||||||
|
}
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle("/", http.FileServer(http.Dir(staticDir)))
|
||||||
|
log.Printf("static listening on %s (dir=%s)", staticAddr, staticDir)
|
||||||
|
if err := http.ListenAndServe(staticAddr, staticHeaders(mux)); err != nil {
|
||||||
|
log.Fatalf("static server: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Prefer HTTPS if configured
|
||||||
|
if httpsAddr != "" && certFile != "" && keyFile != "" {
|
||||||
|
log.Printf("starting HTTPS API on %s", httpsAddr)
|
||||||
|
if err := srv.ListenHTTPS(httpsAddr, certFile, keyFile); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise HTTP
|
||||||
|
log.Printf("starting HTTP API on %s", httpAddr)
|
||||||
|
if err := srv.ListenHTTP(httpAddr); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = time.Second
|
||||||
}
|
}
|
||||||
|
@@ -45,8 +45,8 @@ auth:
|
|||||||
sso:
|
sso:
|
||||||
discord:
|
discord:
|
||||||
enabled: true
|
enabled: true
|
||||||
client_id: "REPLACE"
|
client_id: "1408292766319906946"
|
||||||
client_secret: "REPLACE"
|
client_secret: "zJ6GnUUykHbMFbWsPPneNxNK-PtOXYg1"
|
||||||
# must exactly match your Discord app's allowed redirect
|
# must exactly match your Discord app's allowed redirect
|
||||||
redirect_uri: "https://greencoast.fullmooncyberworks.com/auth-callback.html"
|
redirect_uri: "https://greencoast.fullmooncyberworks.com/auth-callback.html"
|
||||||
google:
|
google:
|
||||||
|
@@ -11,7 +11,7 @@ services:
|
|||||||
- "9080:9080" # API
|
- "9080:9080" # API
|
||||||
- "9082:9082" # Frontend
|
- "9082:9082" # Frontend
|
||||||
environment:
|
environment:
|
||||||
- GC_DEV_ALLOW_UNAUTH=false
|
- GC_DEV_ALLOW_UNAUTH=true
|
||||||
volumes:
|
volumes:
|
||||||
- ./testdata:/var/lib/greencoast
|
- ./testdata:/var/lib/greencoast
|
||||||
- ./configs/shard.test.yaml:/app/shard.yaml:ro
|
- ./configs/shard.test.yaml:/app/shard.yaml:ro
|
||||||
|
1130
internal/api/http.go
1130
internal/api/http.go
File diff suppressed because it is too large
Load Diff
@@ -1,123 +1,108 @@
|
|||||||
package index
|
package index
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type opType string
|
// Entry is the API/JSON shape the server returns.
|
||||||
|
// StoredAt is RFC3339/RFC3339Nano in UTC.
|
||||||
const (
|
|
||||||
OpPut opType = "put"
|
|
||||||
OpDel opType = "del"
|
|
||||||
)
|
|
||||||
|
|
||||||
type record struct {
|
|
||||||
Op opType `json:"op"`
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
Bytes int64 `json:"bytes,omitempty"`
|
|
||||||
StoredAt time.Time `json:"stored_at,omitempty"`
|
|
||||||
Private bool `json:"private,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Entry struct {
|
type Entry struct {
|
||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
Bytes int64 `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
StoredAt time.Time `json:"stored_at"`
|
StoredAt string `json:"stored_at"` // RFC3339( Nano ) string
|
||||||
Private bool `json:"private"`
|
Private bool `json:"private"`
|
||||||
|
CreatorTZ string `json:"creator_tz,omitempty"` // IANA TZ like "America/New_York"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// internal record with real time.Time for sorting/comparison.
|
||||||
|
type rec struct {
|
||||||
|
Hash string
|
||||||
|
Bytes int64
|
||||||
|
StoredAt time.Time
|
||||||
|
Private bool
|
||||||
|
CreatorTZ string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index is an in-memory index keyed by hash.
|
||||||
type Index struct {
|
type Index struct {
|
||||||
path string
|
mu sync.RWMutex
|
||||||
mu sync.Mutex
|
hash map[string]rec
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(baseDir string) *Index {
|
// New creates an empty Index.
|
||||||
return &Index{path: filepath.Join(baseDir, "index.jsonl")}
|
func New() *Index {
|
||||||
|
return &Index{
|
||||||
|
hash: make(map[string]rec),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) AppendPut(e Entry) error {
|
// Put inserts or replaces an entry.
|
||||||
i.mu.Lock()
|
// e.StoredAt may be RFC3339( Nano ); if empty/invalid we use time.Now().UTC().
|
||||||
defer i.mu.Unlock()
|
func (ix *Index) Put(e Entry) error {
|
||||||
return appendRec(i.path, record{
|
ix.mu.Lock()
|
||||||
Op: OpPut,
|
defer ix.mu.Unlock()
|
||||||
|
|
||||||
|
t := parseWhen(e.StoredAt)
|
||||||
|
if t.IsZero() {
|
||||||
|
t = time.Now().UTC()
|
||||||
|
}
|
||||||
|
|
||||||
|
ix.hash[e.Hash] = rec{
|
||||||
Hash: e.Hash,
|
Hash: e.Hash,
|
||||||
Bytes: e.Bytes,
|
Bytes: e.Bytes,
|
||||||
StoredAt: e.StoredAt,
|
StoredAt: t,
|
||||||
Private: e.Private,
|
Private: e.Private,
|
||||||
|
CreatorTZ: e.CreatorTZ,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an entry by hash (no error if absent).
|
||||||
|
func (ix *Index) Delete(hash string) error {
|
||||||
|
ix.mu.Lock()
|
||||||
|
defer ix.mu.Unlock()
|
||||||
|
delete(ix.hash, hash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns entries sorted by StoredAt descending.
|
||||||
|
func (ix *Index) List() ([]Entry, error) {
|
||||||
|
ix.mu.RLock()
|
||||||
|
defer ix.mu.RUnlock()
|
||||||
|
|
||||||
|
tmp := make([]rec, 0, len(ix.hash))
|
||||||
|
for _, r := range ix.hash {
|
||||||
|
tmp = append(tmp, r)
|
||||||
|
}
|
||||||
|
sort.Slice(tmp, func(i, j int) bool {
|
||||||
|
return tmp[i].StoredAt.After(tmp[j].StoredAt)
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Index) AppendDelete(hash string) error {
|
out := make([]Entry, len(tmp))
|
||||||
i.mu.Lock()
|
for i, r := range tmp {
|
||||||
defer i.mu.Unlock()
|
out[i] = Entry{
|
||||||
return appendRec(i.path, record{Op: OpDel, Hash: hash})
|
Hash: r.Hash,
|
||||||
}
|
Bytes: r.Bytes,
|
||||||
|
StoredAt: r.StoredAt.UTC().Format(time.RFC3339Nano),
|
||||||
func appendRec(path string, r record) error {
|
Private: r.Private,
|
||||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
CreatorTZ: r.CreatorTZ,
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
enc := json.NewEncoder(f)
|
|
||||||
return enc.Encode(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Index) Snapshot() ([]Entry, error) {
|
|
||||||
i.mu.Lock()
|
|
||||||
defer i.mu.Unlock()
|
|
||||||
|
|
||||||
f, err := os.Open(i.path)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
sc := bufio.NewScanner(f)
|
|
||||||
sc.Buffer(make([]byte, 0, 64*1024), 4*1024*1024)
|
|
||||||
|
|
||||||
type state struct {
|
|
||||||
Entry Entry
|
|
||||||
Deleted bool
|
|
||||||
}
|
|
||||||
m := make(map[string]state)
|
|
||||||
for sc.Scan() {
|
|
||||||
var rec record
|
|
||||||
if err := json.Unmarshal(sc.Bytes(), &rec); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch rec.Op {
|
|
||||||
case OpPut:
|
|
||||||
m[rec.Hash] = state{Entry: Entry{
|
|
||||||
Hash: rec.Hash, Bytes: rec.Bytes, StoredAt: rec.StoredAt, Private: rec.Private,
|
|
||||||
}}
|
|
||||||
case OpDel:
|
|
||||||
s := m[rec.Hash]
|
|
||||||
s.Deleted = true
|
|
||||||
m[rec.Hash] = s
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := sc.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var out []Entry
|
|
||||||
for _, s := range m {
|
|
||||||
if !s.Deleted && s.Entry.Hash != "" {
|
|
||||||
out = append(out, s.Entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Slice(out, func(i, j int) bool { return out[i].StoredAt.After(out[j].StoredAt) })
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseWhen tries RFC3339Nano then RFC3339; returns zero time on failure.
|
||||||
|
func parseWhen(s string) time.Time {
|
||||||
|
if s == "" {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if t, err := time.Parse(time.RFC3339, s); err == nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
314
internal/storage/fs.go
Normal file
314
internal/storage/fs.go
Normal file
@@ -0,0 +1,314 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FSStore stores blobs on the local filesystem under root/objects/...
|
||||||
|
// It supports both a flat layout (objects/<hash>) and a nested layout
|
||||||
|
// (objects/<hash>/<file> or objects/<prefix>/<hash>).
|
||||||
|
type FSStore struct {
|
||||||
|
root string
|
||||||
|
objects string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFS returns a file-backed blob store rooted at dir.
|
||||||
|
func NewFS(dir string) (*FSStore, error) {
|
||||||
|
if dir == "" {
|
||||||
|
return nil, errors.New("empty storage dir")
|
||||||
|
}
|
||||||
|
o := filepath.Join(dir, "objects")
|
||||||
|
if err := os.MkdirAll(o, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &FSStore{root: dir, objects: o}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pathFlat returns the flat path objects/<hash>.
|
||||||
|
func (s *FSStore) pathFlat(hash string) (string, error) {
|
||||||
|
if hash == "" {
|
||||||
|
return "", errors.New("empty hash")
|
||||||
|
}
|
||||||
|
return filepath.Join(s.objects, hash), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHexHash does a quick check for lowercase hex of length 64.
|
||||||
|
func isHexHash(name string) bool {
|
||||||
|
if len(name) != 64 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < 64; i++ {
|
||||||
|
c := name[i]
|
||||||
|
if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// findBlobPath tries common layouts before falling back to a recursive search.
|
||||||
|
//
|
||||||
|
// Supported fast paths (in order):
|
||||||
|
// 1. objects/<hash> (flat file)
|
||||||
|
// 2. objects/<hash>/blob|data|content (common names)
|
||||||
|
// 3. objects/<hash>/<single file> (folder-per-post; pick that file)
|
||||||
|
// 4. objects/<hash[0:2]>/<hash> (two-level prefix sharding)
|
||||||
|
//
|
||||||
|
// If still not found, it walks recursively under objects/ to locate either:
|
||||||
|
// - a file named exactly <hash>, or
|
||||||
|
// - any file under a directory named <hash> (choose the most recently modified).
|
||||||
|
func (s *FSStore) findBlobPath(hash string) (string, error) {
|
||||||
|
if hash == "" {
|
||||||
|
return "", errors.New("empty hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1) flat file
|
||||||
|
if p, _ := s.pathFlat(hash); fileExists(p) {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2) objects/<hash>/{blob,data,content}
|
||||||
|
dir := filepath.Join(s.objects, hash)
|
||||||
|
for _, cand := range []string{"blob", "data", "content"} {
|
||||||
|
p := filepath.Join(dir, cand)
|
||||||
|
if fileExists(p) {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3) objects/<hash>/<single file>
|
||||||
|
if st, err := os.Stat(dir); err == nil && st.IsDir() {
|
||||||
|
ents, err := os.ReadDir(dir)
|
||||||
|
if err == nil {
|
||||||
|
var picked string
|
||||||
|
var pickedMod time.Time
|
||||||
|
for _, de := range ents {
|
||||||
|
if de.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := filepath.Join(dir, de.Name())
|
||||||
|
fi, err := os.Stat(p)
|
||||||
|
if err != nil || !fi.Mode().IsRegular() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Pick newest file if multiple.
|
||||||
|
if picked == "" || fi.ModTime().After(pickedMod) {
|
||||||
|
picked = p
|
||||||
|
pickedMod = fi.ModTime()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if picked != "" {
|
||||||
|
return picked, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4) two-level prefix: objects/aa/<hash>
|
||||||
|
if len(hash) >= 2 {
|
||||||
|
p := filepath.Join(s.objects, hash[:2], hash)
|
||||||
|
if fileExists(p) {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: recursive search
|
||||||
|
var best string
|
||||||
|
var bestMod time.Time
|
||||||
|
|
||||||
|
err := filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
// ignore per-entry errors
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if d.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
base := filepath.Base(p)
|
||||||
|
// Exact filename == hash
|
||||||
|
if base == hash {
|
||||||
|
best = p
|
||||||
|
// exact match is good enough; stop here
|
||||||
|
return fs.SkipDir
|
||||||
|
}
|
||||||
|
// If parent dir name is hash, consider it
|
||||||
|
parent := filepath.Base(filepath.Dir(p))
|
||||||
|
if parent == hash {
|
||||||
|
if fi, err := os.Stat(p); err == nil && fi.Mode().IsRegular() {
|
||||||
|
if best == "" || fi.ModTime().After(bestMod) {
|
||||||
|
best = p
|
||||||
|
bestMod = fi.ModTime()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err == nil && best != "" {
|
||||||
|
return best, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileExists true if path exists and is a regular file.
|
||||||
|
func fileExists(p string) bool {
|
||||||
|
fi, err := os.Stat(p)
|
||||||
|
return err == nil && fi.Mode().IsRegular()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put writes/overwrites the blob at the content hash into the flat path.
|
||||||
|
// (Nested layouts remain supported for reads/reindex, but new writes are flat.)
|
||||||
|
func (s *FSStore) Put(hash string, r io.Reader) error {
|
||||||
|
p, err := s.pathFlat(hash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tmp := p + ".tmp"
|
||||||
|
f, err := os.Create(tmp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, werr := io.Copy(f, r)
|
||||||
|
cerr := f.Close()
|
||||||
|
if werr != nil {
|
||||||
|
_ = os.Remove(tmp)
|
||||||
|
return werr
|
||||||
|
}
|
||||||
|
if cerr != nil {
|
||||||
|
_ = os.Remove(tmp)
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
return os.Rename(tmp, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get opens the blob for reading and returns its size if known.
|
||||||
|
func (s *FSStore) Get(hash string) (io.ReadCloser, int64, error) {
|
||||||
|
p, err := s.findBlobPath(hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
f, err := os.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
st, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return f, 0, nil
|
||||||
|
}
|
||||||
|
return f, st.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the blob. It is not an error if it doesn't exist.
|
||||||
|
// It tries the flat path, common nested paths, then falls back to remove
|
||||||
|
// any file found via findBlobPath.
|
||||||
|
func (s *FSStore) Delete(hash string) error {
|
||||||
|
// Try flat
|
||||||
|
if p, _ := s.pathFlat(hash); fileExists(p) {
|
||||||
|
if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Try common nested
|
||||||
|
dir := filepath.Join(s.objects, hash)
|
||||||
|
for _, cand := range []string{"blob", "data", "content"} {
|
||||||
|
p := filepath.Join(dir, cand)
|
||||||
|
if fileExists(p) {
|
||||||
|
if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(hash) >= 2 {
|
||||||
|
p := filepath.Join(s.objects, hash[:2], hash)
|
||||||
|
if fileExists(p) {
|
||||||
|
if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fallback: whatever findBlobPath locates
|
||||||
|
if p, err := s.findBlobPath(hash); err == nil {
|
||||||
|
if err := os.Remove(p); err == nil || errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If we couldn't find anything, treat as success (idempotent delete)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk calls fn(hash, size, modTime) for each blob file found.
|
||||||
|
// It recognizes blobs when either:
|
||||||
|
// - the file name is a 64-char hex hash, or
|
||||||
|
// - the parent directory name is that hash (folder-per-post).
|
||||||
|
//
|
||||||
|
// If multiple files map to the same hash (e.g., dir contains many files),
|
||||||
|
// the newest file's size/modTime is reported.
|
||||||
|
func (s *FSStore) Walk(fn func(hash string, size int64, mod time.Time) error) error {
|
||||||
|
type rec struct {
|
||||||
|
size int64
|
||||||
|
mod time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
agg := make(map[string]rec)
|
||||||
|
|
||||||
|
err := filepath.WalkDir(s.objects, func(p string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return nil // skip unreadable entries
|
||||||
|
}
|
||||||
|
if d.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Only consider regular files
|
||||||
|
fi, err := os.Stat(p)
|
||||||
|
if err != nil || !fi.Mode().IsRegular() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
base := filepath.Base(p)
|
||||||
|
|
||||||
|
// Case 1: filename equals hash
|
||||||
|
if isHexHash(base) {
|
||||||
|
if r, ok := agg[base]; !ok || fi.ModTime().After(r.mod) {
|
||||||
|
agg[base] = rec{size: fi.Size(), mod: fi.ModTime()}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 2: parent dir is the hash
|
||||||
|
parent := filepath.Base(filepath.Dir(p))
|
||||||
|
if isHexHash(parent) {
|
||||||
|
if r, ok := agg[parent]; !ok || fi.ModTime().After(r.mod) {
|
||||||
|
agg[parent] = rec{size: fi.Size(), mod: fi.ModTime()}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 3: two-level prefix layout e.g. objects/aa/<hash>
|
||||||
|
// If parent is a 2-char dir and grandparent is objects/, base might be hash.
|
||||||
|
if len(base) == 64 && isHexHash(strings.ToLower(base)) {
|
||||||
|
// already handled as Case 1, but keep as safety if different casing sneaks in
|
||||||
|
if r, ok := agg[base]; !ok || fi.ModTime().After(r.mod) {
|
||||||
|
agg[base] = rec{size: fi.Size(), mod: fi.ModTime()}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for h, r := range agg {
|
||||||
|
if err := fn(h, r.size, r.mod); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@@ -1,95 +0,0 @@
|
|||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FSStore struct {
|
|
||||||
root string
|
|
||||||
maxObjectB int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFSStore(root string, maxKB int) (*FSStore, error) {
|
|
||||||
if root == "" {
|
|
||||||
root = "./data/objects"
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(root, 0o755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &FSStore{root: root, maxObjectB: int64(maxKB) * 1024}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FSStore) Put(r io.Reader) (string, int64, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
tmp := filepath.Join(s.root, ".tmp")
|
|
||||||
_ = os.MkdirAll(tmp, 0o755)
|
|
||||||
f, err := os.CreateTemp(tmp, "obj-*")
|
|
||||||
if err != nil {
|
|
||||||
return "", 0, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
var n int64
|
|
||||||
buf := make([]byte, 32*1024)
|
|
||||||
for {
|
|
||||||
m, er := r.Read(buf)
|
|
||||||
if m > 0 {
|
|
||||||
n += int64(m)
|
|
||||||
if s.maxObjectB > 0 && n > s.maxObjectB {
|
|
||||||
return "", 0, errors.New("object too large")
|
|
||||||
}
|
|
||||||
_, _ = h.Write(buf[:m])
|
|
||||||
if _, werr := f.Write(buf[:m]); werr != nil {
|
|
||||||
return "", 0, werr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if er == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if er != nil {
|
|
||||||
return "", 0, er
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sum := hex.EncodeToString(h.Sum(nil))
|
|
||||||
dst := filepath.Join(s.root, sum[:2], sum[2:4], sum)
|
|
||||||
if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil {
|
|
||||||
return "", 0, err
|
|
||||||
}
|
|
||||||
if err := os.Rename(f.Name(), dst); err != nil {
|
|
||||||
return "", 0, err
|
|
||||||
}
|
|
||||||
return sum, n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FSStore) pathFor(hash string) string {
|
|
||||||
return filepath.Join(s.root, hash[:2], hash[2:4], hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FSStore) Get(hash string) (string, error) {
|
|
||||||
if len(hash) < 4 {
|
|
||||||
return "", os.ErrNotExist
|
|
||||||
}
|
|
||||||
p := s.pathFor(hash)
|
|
||||||
if _, err := os.Stat(p); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FSStore) Delete(hash string) error {
|
|
||||||
if len(hash) < 4 {
|
|
||||||
return os.ErrNotExist
|
|
||||||
}
|
|
||||||
p := s.pathFor(hash)
|
|
||||||
if err := os.Remove(p); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = os.Remove(filepath.Dir(p))
|
|
||||||
_ = os.Remove(filepath.Dir(filepath.Dir(p)))
|
|
||||||
return nil
|
|
||||||
}
|
|
4
testdata/index.jsonl
vendored
Normal file
4
testdata/index.jsonl
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{"op":"put","hash":"a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb","bytes":110,"stored_at":"2025-08-22T03:00:00Z"}
|
||||||
|
{"op":"put","hash":"9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891","bytes":105,"stored_at":"2025-08-22T03:00:00Z","private":true}
|
||||||
|
{"op":"put","hash":"6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973","bytes":98,"stored_at":"2025-08-22T03:00:00Z"}
|
||||||
|
{"op":"put","hash":"f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292","bytes":46,"stored_at":"2025-08-22T03:00:00Z"}
|
1
testdata/objects/5dcfd2e73a6c3a17b0efb103f1a3b891fc06b0189a0c5d5916435d9e5d74e963
vendored
Normal file
1
testdata/objects/5dcfd2e73a6c3a17b0efb103f1a3b891fc06b0189a0c5d5916435d9e5d74e963
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"title":"Timezone Publish","body":"You can now include your timezone on all of your posts. This is completely optional but lets others see when you posted"}
|
1
testdata/objects/6a/16/6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973
vendored
Normal file
1
testdata/objects/6a/16/6a166437b9988bd11e911375f3ca1b4cd10b7db9a32812409c6d79a0753dd973
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"title":"Yarn is Testing!","body":"Hello, my name is Yarn. And I like to test. Test test 1 2 3."}
|
1
testdata/objects/96/28/9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891
vendored
Normal file
1
testdata/objects/96/28/9628e2adcd7a5e820fbdbe075027ac0ad78ef1a7a501971c2048bc5e5436b891
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<01><><EFBFBD>d<EFBFBD>+V<><56><EFBFBD>+<2B>%!ݚ<>O<EFBFBD><4F>2ޒ$)<07><>zF<7A>î<EFBFBD>)4<><34><EFBFBD>O:z<><7A>*<2A>Ыe<D0AB><65>*5<><04>)<29><>#<23>V<EFBFBD><0B>H<EFBFBD><48>!i<><69><EFBFBD>S$e<><65><EFBFBD>dx<64>]<5D><>$<24><1F>t<EFBFBD><74>6۩<><DBA9>H<EFBFBD><48>
|
1
testdata/objects/a0/08/a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb
vendored
Normal file
1
testdata/objects/a0/08/a008a13ade86edbd77f5c0fcfcf35bd295c93069be42fdbd46bc65b392ddf5fb
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"title":"Public Test","body":"Hello Everyone,\n\nWelcome to GreenCoast, a BlueSky Replacement\n\nMystiatech"}
|
1
testdata/objects/f4/52/f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292
vendored
Normal file
1
testdata/objects/f4/52/f452402fadb6608bd6f9b613a1d58234e2135f045ea29262574e3e4b1e5f7292
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"title":"Test post","body":"Does this work?"}
|
Reference in New Issue
Block a user