feat: customer detail page, size snapshots table, Spaces provisioning, Redis status cache
Some checks failed
Build & Release / build (push) Has been cancelled

This commit is contained in:
Ryan Moon
2026-04-03 20:07:18 -05:00
parent bc9d7b464c
commit b11b51aa1e
11 changed files with 832 additions and 273 deletions

View File

@@ -27,6 +27,18 @@ export async function migrate() {
)
`;
// idempotent column additions for existing deployments
await db`
CREATE TABLE IF NOT EXISTS customer_size_snapshots (
id SERIAL PRIMARY KEY,
slug TEXT NOT NULL REFERENCES customers(slug) ON DELETE CASCADE,
recorded_at DATE NOT NULL DEFAULT CURRENT_DATE,
db_size_bytes BIGINT,
spaces_size_bytes BIGINT,
spaces_object_count INT,
UNIQUE (slug, recorded_at)
)
`;
await db`CREATE INDEX IF NOT EXISTS customer_size_snapshots_slug_date ON customer_size_snapshots (slug, recorded_at DESC)`;
await db`ALTER TABLE users ADD COLUMN IF NOT EXISTS updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()`;
await db`
ALTER TABLE customers
@@ -35,6 +47,7 @@ export async function migrate() {
ADD COLUMN IF NOT EXISTS modules TEXT[] NOT NULL DEFAULT '{}',
ADD COLUMN IF NOT EXISTS start_date DATE NOT NULL DEFAULT CURRENT_DATE,
ADD COLUMN IF NOT EXISTS expiration_date DATE,
ADD COLUMN IF NOT EXISTS updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
ADD COLUMN IF NOT EXISTS updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
ADD COLUMN IF NOT EXISTS spaces_key TEXT NOT NULL DEFAULT ''
`;
}

View File

@@ -13,6 +13,7 @@ import { config } from "./lib/config";
import { migrate } from "./db/manager";
import { authRoutes } from "./routes/auth";
import { customerRoutes } from "./routes/customers";
import { startSizeCollector } from "./services/sizeCollector";
const app = Fastify({ logger: true });
@@ -42,6 +43,7 @@ app.register(customerRoutes, {
} as any);
await migrate();
startSizeCollector(app.log);
app.listen({ port: config.port, host: "0.0.0.0" }, (err) => {
if (err) {

33
src/lib/cache.ts Normal file
View File

@@ -0,0 +1,33 @@
import Redis from "ioredis";
import { config } from "./config";
// Manager uses the managed Valkey with "mgr:" prefix so it doesn't collide with customer keys
export const cache = new Redis(config.managedValkeyUrl, {
keyPrefix: "mgr:",
lazyConnect: true,
enableReadyCheck: false,
});
const STATUS_TTL = 120; // seconds
export async function getCachedStatus(slug: string): Promise<{ data: any; cachedAt: string } | null> {
try {
const raw = await cache.get(`status:${slug}`);
if (!raw) return null;
return JSON.parse(raw);
} catch {
return null;
}
}
export async function setCachedStatus(slug: string, data: any): Promise<void> {
try {
await cache.set(`status:${slug}`, JSON.stringify({ data, cachedAt: new Date().toISOString() }), "EX", STATUS_TTL);
} catch {}
}
export async function invalidateCachedStatus(slug: string): Promise<void> {
try {
await cache.del(`status:${slug}`);
} catch {}
}

View File

@@ -8,6 +8,8 @@ export const config = {
doadminDbUrl: process.env.DOADMIN_DATABASE_URL!,
jwtSecret: process.env.JWT_SECRET!,
managedValkeyUrl: process.env.MANAGED_VALKEY_URL!,
spacesBucket: process.env.SPACES_BUCKET ?? "lunarfront-data",
spacesRegion: process.env.SPACES_REGION ?? "nyc3",
};
for (const [key, val] of Object.entries(config)) {

View File

@@ -10,7 +10,7 @@ function token() {
return readFileSync(SA_TOKEN_PATH, "utf-8").trim();
}
async function k8sFetch(path: string, options: RequestInit = {}, allowStatuses: number[] = []) {
export async function k8sFetch(path: string, options: RequestInit = {}, allowStatuses: number[] = []) {
const res = await fetch(`${K8S_API}${path}`, {
...options,
headers: {

View File

@@ -1,13 +1,16 @@
import type { FastifyInstance } from "fastify";
import { z } from "zod";
import crypto from "crypto";
import postgres from "postgres";
import { createDatabase, createDatabaseUser, deleteDatabase, deleteDatabaseUser } from "../services/do";
import { addCustomerToPool, removeCustomerFromPool } from "../services/pgbouncer";
import { addCustomerChart, removeCustomerChart } from "../services/git";
import { setupCustomerDatabase, teardownCustomerDatabase } from "../services/db";
import { createNamespace, deleteNamespace, createSecret, createDockerRegistrySecret } from "../lib/k8s";
import { createNamespace, deleteNamespace, createSecret, createDockerRegistrySecret, patchSecret, getSecret, k8sFetch } from "../lib/k8s";
import { createSpacesKey, deleteSpacesKey, deleteSpacesObjects, getSpacesUsage } from "../services/spaces";
import { db } from "../db/manager";
import { config } from "../lib/config";
import { getCachedStatus, setCachedStatus } from "../lib/cache";
const MODULES = ["pos", "inventory", "rentals", "scheduling", "repairs", "accounting"] as const;
const PGBOUNCER_HOST = "pgbouncer.pgbouncer.svc";
@@ -71,6 +74,7 @@ export async function customerRoutes(app: FastifyInstance) {
pool: "pending",
namespace: "pending",
secrets: "pending",
storage: "pending",
chart: "pending",
};
@@ -122,6 +126,20 @@ export async function customerRoutes(app: FastifyInstance) {
await setStep("namespace", "done");
await setStep("secrets", "done");
const { accessKey: spacesKey, secretKey: spacesSecret } = await createSpacesKey(
`customer-${slug}`,
config.spacesBucket,
);
await patchSecret(namespace, "lunarfront-secrets", {
"spaces-key": spacesKey,
"spaces-secret": spacesSecret,
"spaces-bucket": config.spacesBucket,
"spaces-endpoint": `https://${config.spacesRegion}.digitaloceanspaces.com`,
"spaces-prefix": `${slug}/`,
});
await db`UPDATE customers SET spaces_key = ${spacesKey}, updated_at = NOW() WHERE slug = ${slug}`;
await setStep("storage", "done");
addCustomerChart(slug, body.appVersion);
await setStep("chart", "done");
@@ -144,9 +162,20 @@ export async function customerRoutes(app: FastifyInstance) {
app.log.info({ slug }, "deprovisioning customer");
const [customer] = await db`SELECT spaces_key FROM customers WHERE slug = ${slug}`;
removeCustomerChart(slug);
await removeCustomerFromPool(slug);
await teardownCustomerDatabase(slug, slug);
if (customer?.spaces_key) {
try {
const secrets = await getSecret(namespace, "lunarfront-secrets");
await deleteSpacesObjects(customer.spaces_key, secrets["spaces-secret"], config.spacesBucket, config.spacesRegion, `${slug}/`);
} catch {}
try { await deleteSpacesKey(customer.spaces_key); } catch {}
}
await Promise.all([
deleteDatabase(slug),
deleteDatabaseUser(slug),
@@ -159,6 +188,63 @@ export async function customerRoutes(app: FastifyInstance) {
return reply.code(204).send();
});
// Live overview: cached status (pods + ArgoCD) + latest size snapshot + 30d history
app.get("/customers/:slug/overview", async (req, reply) => {
const { slug } = req.params as { slug: string };
const namespace = `customer-${slug}`;
const { refresh } = req.query as { refresh?: string };
const [customer] = await db`SELECT * FROM customers WHERE slug = ${slug}`;
if (!customer) return reply.code(404).send({ message: "Not found" });
// ── Status (Redis cache, 2min TTL) ────────────────────────────────────────
let statusEntry = refresh ? null : await getCachedStatus(slug);
if (!statusEntry) {
const [podsResult, argoResult] = await Promise.allSettled([
k8sFetch(`/api/v1/namespaces/${namespace}/pods`).then(r => r.json()),
k8sFetch(`/apis/argoproj.io/v1alpha1/namespaces/argocd/applications/customer-${slug}`).then(r => r.json()),
]);
const podsRaw = podsResult.status === "fulfilled" ? podsResult.value : null;
const argoRaw = argoResult.status === "fulfilled" ? argoResult.value : null;
const pods = (podsRaw?.items ?? []).map((pod: any) => ({
name: pod.metadata.name,
ready: (pod.status.containerStatuses ?? []).every((c: any) => c.ready),
readyCount: (pod.status.containerStatuses ?? []).filter((c: any) => c.ready).length,
totalCount: (pod.status.containerStatuses ?? []).length,
status: pod.status.phase ?? "Unknown",
restarts: (pod.status.containerStatuses ?? []).reduce((s: number, c: any) => s + (c.restartCount ?? 0), 0),
startedAt: pod.status.startTime ?? null,
}));
const argocd = argoRaw?.status ? {
syncStatus: argoRaw.status.sync?.status ?? "Unknown",
healthStatus: argoRaw.status.health?.status ?? "Unknown",
conditions: (argoRaw.status.conditions ?? []).map((c: any) => ({ type: c.type, message: c.message })),
} : null;
const liveStatus = { pods, argocd };
await setCachedStatus(slug, liveStatus);
statusEntry = { data: liveStatus, cachedAt: new Date().toISOString() };
}
// ── Size history (last 30 days) ───────────────────────────────────────────
const sizeHistory = await db`
SELECT recorded_at, db_size_bytes, spaces_size_bytes, spaces_object_count
FROM customer_size_snapshots
WHERE slug = ${slug}
ORDER BY recorded_at DESC
LIMIT 30
`;
return reply.send({
customer,
status: { ...statusEntry.data, cachedAt: statusEntry.cachedAt },
sizeHistory,
});
});
// Remove only the manager DB record without touching infrastructure —
// useful for cleaning up failed partial deployments
app.delete("/customers/:slug/record", async (req, reply) => {

View File

@@ -0,0 +1,87 @@
import postgres from "postgres";
import { db } from "../db/manager";
import { config } from "../lib/config";
import { getSecret } from "../lib/k8s";
import { getSpacesUsage } from "./spaces";
async function collectSizes() {
const customers = await db`SELECT slug, spaces_key FROM customers WHERE status = 'provisioned'`;
if (customers.length === 0) return;
for (const customer of customers) {
const { slug } = customer;
try {
// DB size
let dbSizeBytes: number | null = null;
try {
const sql = postgres(config.doadminDbUrl.replace(/\/([^/?]+)(\?|$)/, `/${slug}$2`), { max: 1 });
try {
const [row] = await sql<[{ bytes: string }]>`SELECT pg_database_size(${slug}::text)::text AS bytes`;
dbSizeBytes = Number(row.bytes);
} finally {
await sql.end();
}
} catch {}
// Spaces size
let spacesSizeBytes: number | null = null;
let spacesObjectCount: number | null = null;
if (customer.spaces_key) {
try {
const namespace = `customer-${slug}`;
const secrets = await getSecret(namespace, "lunarfront-secrets");
const result = await getSpacesUsage(
customer.spaces_key,
secrets["spaces-secret"],
config.spacesBucket,
config.spacesRegion,
`${slug}/`,
);
spacesSizeBytes = result.sizeBytes;
spacesObjectCount = result.objectCount;
} catch {}
}
// Upsert today's snapshot (one row per day per customer)
await db`
INSERT INTO customer_size_snapshots (slug, recorded_at, db_size_bytes, spaces_size_bytes, spaces_object_count)
VALUES (${slug}, CURRENT_DATE, ${dbSizeBytes}, ${spacesSizeBytes}, ${spacesObjectCount})
ON CONFLICT (slug, recorded_at) DO UPDATE SET
db_size_bytes = EXCLUDED.db_size_bytes,
spaces_size_bytes = EXCLUDED.spaces_size_bytes,
spaces_object_count = EXCLUDED.spaces_object_count
`;
} catch (err) {
console.error(`[sizeCollector] failed for ${slug}:`, err);
}
}
}
function msUntilNext12h(): number {
const now = new Date();
const next = new Date(now);
const h = now.getUTCHours();
// next run at 00:00 or 12:00 UTC, whichever is sooner
if (h < 12) {
next.setUTCHours(12, 0, 0, 0);
} else {
next.setUTCDate(next.getUTCDate() + 1);
next.setUTCHours(0, 0, 0, 0);
}
return next.getTime() - now.getTime();
}
export function startSizeCollector(log: { info: (msg: string) => void }) {
// Run immediately on startup
collectSizes().then(() => log.info("Initial size snapshot collected")).catch(() => {});
// Then schedule for 00:00 and 12:00 UTC
function scheduleNext() {
const delay = msUntilNext12h();
setTimeout(() => {
collectSizes().then(() => log.info("Size snapshot collected")).catch(() => {});
scheduleNext();
}, delay);
}
scheduleNext();
}

219
src/services/spaces.ts Normal file
View File

@@ -0,0 +1,219 @@
import crypto from "crypto";
import { config } from "../lib/config";
const DO_API = "https://api.digitalocean.com/v2";
// ── DO Spaces key management ─────────────────────────────────────────────────
export async function createSpacesKey(
name: string,
bucket: string,
): Promise<{ accessKey: string; secretKey: string }> {
const res = await fetch(`${DO_API}/spaces/keys`, {
method: "POST",
headers: {
Authorization: `Bearer ${config.doToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
name,
grants: [{ bucket, permission: "readwrite" }],
}),
});
if (!res.ok) {
const body = await res.text();
throw new Error(`DO API POST /v2/spaces/keys → ${res.status}: ${body}`);
}
const data = (await res.json()) as { key: { access_key: string; secret_key: string } };
return { accessKey: data.key.access_key, secretKey: data.key.secret_key };
}
export async function deleteSpacesKey(accessKey: string): Promise<void> {
const res = await fetch(`${DO_API}/spaces/keys/${accessKey}`, {
method: "DELETE",
headers: {
Authorization: `Bearer ${config.doToken}`,
"Content-Type": "application/json",
},
});
if (!res.ok && res.status !== 404) {
const body = await res.text();
throw new Error(`DO API DELETE /v2/spaces/keys/${accessKey}${res.status}: ${body}`);
}
}
// ── AWS4 signing helpers ──────────────────────────────────────────────────────
function sha256hex(data: string | Buffer): string {
return crypto.createHash("sha256").update(data).digest("hex");
}
function hmac(key: string | Buffer, data: string): Buffer {
return crypto.createHmac("sha256", key).update(data).digest();
}
function awsSign(
method: string,
url: URL,
headers: Record<string, string>,
body: string,
accessKey: string,
secretKey: string,
region: string,
): Record<string, string> {
const service = "s3";
const now = new Date();
const dateStamp = now.toISOString().slice(0, 10).replace(/-/g, "");
const amzDate = now.toISOString().replace(/[:-]/g, "").replace(/\.\d{3}/, "");
const payloadHash = sha256hex(body);
const allHeaders: Record<string, string> = {
...headers,
host: url.host,
"x-amz-date": amzDate,
"x-amz-content-sha256": payloadHash,
};
// Canonical headers (sorted lowercase)
const sortedKeys = Object.keys(allHeaders).sort();
const canonicalHeaders = sortedKeys.map(k => `${k.toLowerCase()}:${allHeaders[k].trim()}`).join("\n") + "\n";
const signedHeaders = sortedKeys.map(k => k.toLowerCase()).join(";");
const canonicalUri = url.pathname || "/";
const canonicalQueryString = [...url.searchParams.entries()]
.sort(([a], [b]) => a.localeCompare(b))
.map(([k, v]) => `${encodeURIComponent(k)}=${encodeURIComponent(v)}`)
.join("&");
const canonicalRequest = [method, canonicalUri, canonicalQueryString, canonicalHeaders, signedHeaders, payloadHash].join("\n");
const credentialScope = `${dateStamp}/${region}/${service}/aws4_request`;
const stringToSign = ["AWS4-HMAC-SHA256", amzDate, credentialScope, sha256hex(canonicalRequest)].join("\n");
const signingKey = hmac(
hmac(hmac(hmac(`AWS4${secretKey}`, dateStamp), region), service),
"aws4_request",
);
const signature = crypto.createHmac("sha256", signingKey).update(stringToSign).digest("hex");
const authHeader = `AWS4-HMAC-SHA256 Credential=${accessKey}/${credentialScope}, SignedHeaders=${signedHeaders}, Signature=${signature}`;
return {
...allHeaders,
Authorization: authHeader,
};
}
// ── Spaces usage ──────────────────────────────────────────────────────────────
export async function getSpacesUsage(
accessKey: string,
secretKey: string,
bucket: string,
region: string,
prefix: string,
): Promise<{ objectCount: number; sizeBytes: number; sizePretty: string }> {
const endpoint = `https://${bucket}.${region}.digitaloceanspaces.com/`;
const url = new URL(endpoint);
url.searchParams.set("list-type", "2");
url.searchParams.set("prefix", prefix);
url.searchParams.set("max-keys", "1000");
const signedHeaders = awsSign("GET", url, {}, "", accessKey, secretKey, region);
const res = await fetch(url.toString(), { headers: signedHeaders });
if (!res.ok) {
const body = await res.text();
throw new Error(`Spaces ListObjectsV2 → ${res.status}: ${body}`);
}
const xml = await res.text();
const sizeMatches = xml.match(/<Size>(\d+)<\/Size>/g) ?? [];
const sizeBytes = sizeMatches.reduce((sum, m) => {
const n = m.match(/<Size>(\d+)<\/Size>/);
return sum + (n ? Number(n[1]) : 0);
}, 0);
const keyMatches = xml.match(/<Key>/g) ?? [];
const objectCount = keyMatches.length;
return { objectCount, sizeBytes, sizePretty: formatBytes(sizeBytes) };
}
// ── Spaces object deletion ────────────────────────────────────────────────────
export async function deleteSpacesObjects(
accessKey: string,
secretKey: string,
bucket: string,
region: string,
prefix: string,
): Promise<void> {
// List all objects under prefix
const endpoint = `https://${bucket}.${region}.digitaloceanspaces.com/`;
const listUrl = new URL(endpoint);
listUrl.searchParams.set("list-type", "2");
listUrl.searchParams.set("prefix", prefix);
listUrl.searchParams.set("max-keys", "1000");
const listHeaders = awsSign("GET", listUrl, {}, "", accessKey, secretKey, region);
const listRes = await fetch(listUrl.toString(), { headers: listHeaders });
if (!listRes.ok) {
const body = await listRes.text();
throw new Error(`Spaces ListObjectsV2 → ${listRes.status}: ${body}`);
}
const xml = await listRes.text();
const keyMatches = [...xml.matchAll(/<Key>([^<]+)<\/Key>/g)];
if (keyMatches.length === 0) return;
const keys = keyMatches.map(m => m[1]);
// Delete in batches of 1000
const batchSize = 1000;
for (let i = 0; i < keys.length; i += batchSize) {
const batch = keys.slice(i, i + batchSize);
const deleteXml =
`<?xml version="1.0" encoding="UTF-8"?><Delete>` +
batch.map(k => `<Object><Key>${escapeXml(k)}</Key></Object>`).join("") +
`</Delete>`;
const deleteUrl = new URL(endpoint);
deleteUrl.searchParams.set("delete", "");
const contentMd5 = crypto.createHash("md5").update(deleteXml).digest("base64");
const deleteHeaders = awsSign(
"POST",
deleteUrl,
{ "content-md5": contentMd5, "content-type": "application/xml" },
deleteXml,
accessKey,
secretKey,
region,
);
const deleteRes = await fetch(deleteUrl.toString(), {
method: "POST",
headers: deleteHeaders,
body: deleteXml,
});
if (!deleteRes.ok) {
const body = await deleteRes.text();
throw new Error(`Spaces DeleteObjects → ${deleteRes.status}: ${body}`);
}
}
}
// ── Helpers ───────────────────────────────────────────────────────────────────
function formatBytes(bytes: number): string {
if (bytes < 1024) return `${bytes} B`;
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`;
}
function escapeXml(str: string): string {
return str.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;");
}