Implement file storage layer with local provider, upload/download API, tests

- StorageProvider interface with LocalProvider (S3 placeholder)
- File table with entity_type/entity_id references, content type, path
- POST /v1/files (multipart upload), GET /v1/files (list by entity),
  GET /v1/files/:id (metadata), GET /v1/files/serve/* (content),
  DELETE /v1/files/:id
- member_identifier drops base64 columns, uses file_id FKs
- File validation: type whitelist, size limits, per-entity max
- Fastify storage plugin injects provider into app
- 6 API tests for upload, list, get, delete, validation
- Test runner kills stale port before starting backend
This commit is contained in:
Ryan Moon
2026-03-28 15:29:06 -05:00
parent de4d2e0a32
commit 760e995ae3
19 changed files with 615 additions and 6 deletions

View File

@@ -0,0 +1,25 @@
-- File storage table
CREATE TABLE IF NOT EXISTS "file" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid(),
"company_id" uuid NOT NULL REFERENCES "company"("id"),
"path" varchar(1000) NOT NULL,
"filename" varchar(255) NOT NULL,
"content_type" varchar(100) NOT NULL,
"size_bytes" integer NOT NULL,
"entity_type" varchar(100) NOT NULL,
"entity_id" uuid NOT NULL,
"category" varchar(100) NOT NULL,
"uploaded_by" uuid,
"created_at" timestamp with time zone NOT NULL DEFAULT now()
);
CREATE UNIQUE INDEX "file_company_path" ON "file" ("company_id", "path");
CREATE INDEX "file_entity" ON "file" ("company_id", "entity_type", "entity_id");
-- Update member_identifier: replace base64 columns with file references
ALTER TABLE "member_identifier" DROP COLUMN IF EXISTS "image_front";
ALTER TABLE "member_identifier" DROP COLUMN IF EXISTS "image_back";
ALTER TABLE "member_identifier" DROP COLUMN IF EXISTS "image_front_url";
ALTER TABLE "member_identifier" DROP COLUMN IF EXISTS "image_back_url";
ALTER TABLE "member_identifier" ADD COLUMN "image_front_file_id" uuid REFERENCES "file"("id");
ALTER TABLE "member_identifier" ADD COLUMN "image_back_file_id" uuid REFERENCES "file"("id");

View File

@@ -85,6 +85,13 @@
"when": 1774710000000,
"tag": "0011_member_address",
"breakpoints": true
},
{
"idx": 12,
"version": "7",
"when": 1774720000000,
"tag": "0012_file_storage",
"breakpoints": true
}
]
}

View File

@@ -82,8 +82,8 @@ export const memberIdentifiers = pgTable('member_identifier', {
issuingAuthority: varchar('issuing_authority', { length: 255 }),
issuedDate: date('issued_date'),
expiresAt: date('expires_at'),
imageFront: text('image_front'),
imageBack: text('image_back'),
imageFrontFileId: uuid('image_front_file_id'),
imageBackFileId: uuid('image_back_file_id'),
notes: text('notes'),
isPrimary: boolean('is_primary').notNull().default(false),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),

View File

@@ -0,0 +1,21 @@
import { pgTable, uuid, varchar, integer, timestamp } from 'drizzle-orm/pg-core'
import { companies } from './stores.js'
export const files = pgTable('file', {
id: uuid('id').primaryKey().defaultRandom(),
companyId: uuid('company_id')
.notNull()
.references(() => companies.id),
path: varchar('path', { length: 1000 }).notNull(),
filename: varchar('filename', { length: 255 }).notNull(),
contentType: varchar('content_type', { length: 100 }).notNull(),
sizeBytes: integer('size_bytes').notNull(),
entityType: varchar('entity_type', { length: 100 }).notNull(),
entityId: uuid('entity_id').notNull(),
category: varchar('category', { length: 100 }).notNull(),
uploadedBy: uuid('uploaded_by'),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
})
export type FileRecord = typeof files.$inferSelect
export type FileRecordInsert = typeof files.$inferInsert