From 80368a811bcd1ebe4be9b241f6adf32ae85688b7 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 07:02:05 +0800 Subject: [PATCH 01/15] sql script and related changes --- .../graphile-settings/src/upload-resolver.ts | 203 +++++- graphql/server/src/middleware/upload.ts | 12 +- migrations/object_store.sql | 607 ++++++++++++++++++ pnpm-lock.yaml | 423 ++++++++++++ uploads/s3-streamer/package.json | 1 + uploads/s3-streamer/src/index.ts | 1 + uploads/s3-streamer/src/storage-provider.ts | 198 ++++++ 7 files changed, 1417 insertions(+), 28 deletions(-) create mode 100644 migrations/object_store.sql create mode 100644 uploads/s3-streamer/src/storage-provider.ts diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index 90df46a61..c744ee11f 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -7,6 +7,16 @@ * Lazily initializes the S3 streamer on first upload to avoid requiring * env vars at module load time. * + * V2 mode (UPLOAD_V2_ENABLED=true): + * - Key format: {database_id}/{bucket_key}/{uuid}_origin + * - INSERT into object_store_public.files after S3 upload + * - Returns { key, url, mime, filename } for image/upload types + * + * Legacy mode (UPLOAD_V2_ENABLED=false, default): + * - Key format: {random24hex}-{sanitized-filename} + * - No files table INSERT + * - Returns { url, mime, filename } for image/upload types + * * ENV VARS: * BUCKET_PROVIDER - 'minio' | 's3' (default: 'minio') * BUCKET_NAME - bucket name (default: 'test-bucket') @@ -14,13 +24,17 @@ * AWS_ACCESS_KEY - access key (default: 'minioadmin') * AWS_SECRET_KEY - secret key (default: 'minioadmin') * MINIO_ENDPOINT - MinIO endpoint (default: 'http://localhost:9000') + * UPLOAD_V2_ENABLED - enable v2 upload with files index (default: 'false') */ import Streamer from '@constructive-io/s3-streamer'; +import { S3StorageProvider } from '@constructive-io/s3-streamer'; +import type { StorageProvider } from '@constructive-io/s3-streamer'; import uploadNames from '@constructive-io/upload-names'; import { getEnvOptions } from '@constructive-io/graphql-env'; import { Logger } from '@pgpmjs/logger'; -import { randomBytes } from 'crypto'; +import { randomBytes, randomUUID } from 'crypto'; +import { Pool } from 'pg'; import type { Readable } from 'stream'; import type { FileUpload, @@ -32,73 +46,178 @@ const log = new Logger('upload-resolver'); const DEFAULT_IMAGE_MIME_TYPES = ['image/jpeg', 'image/png', 'image/svg+xml']; let streamer: Streamer | null = null; +let storageProvider: StorageProvider | null = null; let bucketName: string; +let pgPool: Pool | null = null; -function getStreamer(): Streamer { - if (streamer) return streamer; +const isV2Enabled = (): boolean => + process.env.UPLOAD_V2_ENABLED === 'true' || process.env.UPLOAD_V2_ENABLED === '1'; +function getCdnConfig() { const opts = getEnvOptions(); const cdn = opts.cdn || {}; + return { + provider: (cdn.provider || 'minio') as 'minio' | 's3', + bucketName: cdn.bucketName || 'test-bucket', + awsRegion: cdn.awsRegion || 'us-east-1', + awsAccessKey: cdn.awsAccessKey || 'minioadmin', + awsSecretKey: cdn.awsSecretKey || 'minioadmin', + minioEndpoint: cdn.minioEndpoint || 'http://localhost:9000', + }; +} - const provider = cdn.provider || 'minio'; - bucketName = cdn.bucketName || 'test-bucket'; - const awsRegion = cdn.awsRegion || 'us-east-1'; - const awsAccessKey = cdn.awsAccessKey || 'minioadmin'; - const awsSecretKey = cdn.awsSecretKey || 'minioadmin'; - const minioEndpoint = cdn.minioEndpoint || 'http://localhost:9000'; +function getStreamer(): Streamer { + if (streamer) return streamer; + + const cdn = getCdnConfig(); + bucketName = cdn.bucketName; if (process.env.NODE_ENV === 'production') { - if (!cdn.awsAccessKey || !cdn.awsSecretKey) { + if (cdn.awsAccessKey === 'minioadmin' || cdn.awsSecretKey === 'minioadmin') { log.warn('[upload-resolver] WARNING: Using default credentials in production.'); } } log.info( - `[upload-resolver] Initializing: provider=${provider} bucket=${bucketName}`, + `[upload-resolver] Initializing: provider=${cdn.provider} bucket=${bucketName}`, ); streamer = new Streamer({ defaultBucket: bucketName, - awsRegion, - awsSecretKey, - awsAccessKey, - minioEndpoint, - provider, + awsRegion: cdn.awsRegion, + awsSecretKey: cdn.awsSecretKey, + awsAccessKey: cdn.awsAccessKey, + minioEndpoint: cdn.minioEndpoint, + provider: cdn.provider, }); return streamer; } +function getStorageProvider(): StorageProvider { + if (storageProvider) return storageProvider; + + const cdn = getCdnConfig(); + bucketName = cdn.bucketName; + + storageProvider = new S3StorageProvider({ + bucket: cdn.bucketName, + awsRegion: cdn.awsRegion, + awsAccessKey: cdn.awsAccessKey, + awsSecretKey: cdn.awsSecretKey, + minioEndpoint: cdn.minioEndpoint, + provider: cdn.provider, + }); + + return storageProvider; +} + +function getPgPool(): Pool { + if (pgPool) return pgPool; + pgPool = new Pool({ + host: process.env.PGHOST || 'localhost', + port: Number(process.env.PGPORT || 5432), + database: process.env.PGDATABASE || 'constructive', + user: process.env.PGUSER || 'postgres', + password: process.env.PGPASSWORD || 'password', + max: 3, + }); + return pgPool; +} + /** - * Generates a randomized storage key from a filename. - * Format: {random10chars}-{sanitized-filename} + * Generates a randomized storage key from a filename (legacy format). + * Format: {random24hex}-{sanitized-filename} */ -function generateKey(filename: string): string { +function generateLegacyKey(filename: string): string { const rand = randomBytes(12).toString('hex'); return `${rand}-${uploadNames(filename)}`; } +/** + * Generates a v2 storage key. + * Format: {database_id}/{bucket_key}/{uuid}_origin + */ +function generateV2Key(databaseId: string, bucketKey: string): { key: string; fileId: string } { + const fileId = randomUUID(); + return { key: `${databaseId}/${bucketKey}/${fileId}_origin`, fileId }; +} + +/** + * INSERTs a row into object_store_public.files. + * Fires the AFTER INSERT trigger which enqueues a process-image job. + */ +async function insertFileRecord( + fileId: string, + databaseId: string, + bucketKey: string, + key: string, + etag: string, + createdBy: string | null, +): Promise { + const pool = getPgPool(); + await pool.query( + `INSERT INTO object_store_public.files + (id, database_id, bucket_key, key, etag, created_by) + VALUES ($1, $2, $3, $4, $5, $6)`, + [fileId, Number(databaseId), bucketKey, key, etag, createdBy], + ); +} + +/** + * Extracts databaseId and userId from the GraphQL context. + * In PostGraphile, context contains the Express request. + */ +function extractContextInfo(context: any): { databaseId: string | null; userId: string | null } { + // PostGraphile v5 stores the request on context + const req = context?.req || context?.request; + const databaseId = req?.api?.databaseId || req?.databaseId || null; + const userId = req?.token?.user_id || null; + return { databaseId, userId }; +} + /** * Streams a file to S3/MinIO storage and returns the URL and metadata. * * Reusable by both the GraphQL upload resolver and REST /upload endpoint. + * + * When UPLOAD_V2_ENABLED, uses the new key format and INSERTs a files row. */ export async function streamToStorage( readStream: Readable, filename: string, -): Promise<{ url: string; filename: string; mime: string }> { + opts?: { databaseId?: string; userId?: string; bucketKey?: string }, +): Promise<{ url: string; filename: string; mime: string; key?: string }> { + if (isV2Enabled() && opts?.databaseId) { + const storage = getStorageProvider(); + const bucketKey = opts.bucketKey || 'default'; + const { key, fileId } = generateV2Key(opts.databaseId, bucketKey); + + const s3 = getStreamer(); + const detected = await s3.detectContentType({ readStream, filename }); + const contentType = detected.contentType; + + const result = await storage.upload(key, detected.stream, { contentType }); + + await insertFileRecord(fileId, opts.databaseId, bucketKey, key, result.etag, opts.userId || null); + + const url = await storage.presignGet(key, 3600); + return { key, url, filename, mime: contentType }; + } + + // Legacy path const s3 = getStreamer(); - const key = generateKey(filename); - const result = await s3.upload({ + const key = generateLegacyKey(filename); + const uploadResult = await s3.upload({ readStream, filename, key, bucket: bucketName, }); return { - url: result.upload.Location, + url: uploadResult.upload.Location, filename, - mime: result.contentType, + mime: uploadResult.contentType, }; } @@ -106,7 +225,7 @@ export async function streamToStorage( * Upload resolver that streams files to S3/MinIO. * * Returns different shapes based on the column's type hint: - * - 'image' / 'upload' → { filename, mime, url } (for jsonb domain columns) + * - 'image' / 'upload' → { key, url, mime, filename } (v2) or { url, mime, filename } (legacy) * - 'attachment' / default → url string (for text domain columns) * * MIME validation happens before persistence: content type is detected from @@ -121,7 +240,6 @@ async function uploadResolver( const { tags, type } = info.uploadPlugin; const s3 = getStreamer(); const { filename } = upload; - const key = generateKey(filename); // MIME type validation from smart tags const typ = type || tags?.type; @@ -147,6 +265,39 @@ async function uploadResolver( throw new Error('UPLOAD_MIMETYPE'); } + // V2 path: new key format + files table INSERT + if (isV2Enabled()) { + const { databaseId, userId } = extractContextInfo(_context); + + if (databaseId) { + const storage = getStorageProvider(); + const bucketKey = 'default'; + const { key, fileId } = generateV2Key(databaseId, bucketKey); + + const result = await storage.upload(key, detected.stream, { + contentType: detectedContentType, + }); + + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + + const url = await storage.presignGet(key, 3600); + + switch (typ) { + case 'image': + case 'upload': + return { key, filename, mime: detectedContentType, url }; + case 'attachment': + default: + return url; + } + } + + log.warn('[upload-resolver] V2 enabled but no databaseId in context, falling back to legacy'); + } + + // Legacy path + const key = generateLegacyKey(filename); + const result = await s3.uploadWithContentType({ readStream: detected.stream, contentType: detectedContentType, diff --git a/graphql/server/src/middleware/upload.ts b/graphql/server/src/middleware/upload.ts index 89c513115..71ab851b1 100644 --- a/graphql/server/src/middleware/upload.ts +++ b/graphql/server/src/middleware/upload.ts @@ -266,8 +266,11 @@ export const createUploadAuthenticateMiddleware = ( * Accepts a single file via multipart/form-data, streams it to S3/MinIO, * and returns file metadata. The frontend uses this in a two-step flow: * - * 1. POST /upload -> { url, filename, mime, size } + * 1. POST /upload -> { key?, url, filename, mime, size } * 2. GraphQL mutation -> patch row with the returned metadata + * + * When UPLOAD_V2_ENABLED=true, passes databaseId and userId to streamToStorage + * so it can use the new key format and INSERT into object_store_public.files. */ export const uploadRoute: RequestHandler[] = [ parseFileWithErrors, @@ -287,13 +290,18 @@ export const uploadRoute: RequestHandler[] = [ try { const readStream = fs.createReadStream(req.file.path); - const result = await streamToStorage(readStream, req.file.originalname); + const result = await streamToStorage(readStream, req.file.originalname, { + databaseId: req.api?.databaseId, + userId: req.token.user_id, + bucketKey: 'default', + }); uploadLog.debug( `[upload] Uploaded file for user=${req.token.user_id} filename=${req.file.originalname} mime=${result.mime} size=${req.file.size}`, ); res.json({ + ...(result.key ? { key: result.key } : {}), url: result.url, filename: result.filename, mime: result.mime, diff --git a/migrations/object_store.sql b/migrations/object_store.sql new file mode 100644 index 000000000..b370ffdca --- /dev/null +++ b/migrations/object_store.sql @@ -0,0 +1,607 @@ +-- ============================================================================= +-- Constructive Upload System -- object_store_public schema +-- ============================================================================= +-- Run: psql -h localhost -U postgres -d constructive < migrations/object_store.sql +-- ============================================================================= + +BEGIN; + +-- Ensure required roles exist (idempotent for dev environments) +DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN + CREATE ROLE authenticated NOLOGIN; + END IF; + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN + CREATE ROLE service_role NOLOGIN; + END IF; +END $$; + +-- Ensure app_jobs schema + stub add_job exist (required by trigger functions). +-- In production, app_jobs is deployed by the database-jobs pgpm module. +-- This stub is a no-op that prevents trigger creation from failing in dev. +CREATE SCHEMA IF NOT EXISTS app_jobs; + +CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL +) RETURNS void AS $$ +BEGIN + -- Stub: in production this is provided by database-jobs pgpm module. + -- In dev, jobs are enqueued but not processed unless the job worker is running. + RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; +END; +$$ LANGUAGE plpgsql; + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS object_store_public; + +-- --------------------------------------------------------------------------- +-- 1. Status ENUM +-- --------------------------------------------------------------------------- + +CREATE TYPE object_store_public.file_status AS ENUM ( + 'pending', + 'processing', + 'ready', + 'error', + 'deleting' +); + +COMMENT ON TYPE object_store_public.file_status IS + 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; + +-- --------------------------------------------------------------------------- +-- 2. Files Table +-- --------------------------------------------------------------------------- + +CREATE TABLE object_store_public.files ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status object_store_public.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT files_pkey PRIMARY KEY (id, database_id), + CONSTRAINT files_key_unique UNIQUE (key, database_id), + CONSTRAINT files_key_not_empty CHECK (key <> ''), + CONSTRAINT files_key_max_length CHECK (length(key) <= 1024), + CONSTRAINT files_bucket_key_format CHECK (bucket_key ~ '^[a-z][a-z0-9_-]*$'), + CONSTRAINT files_source_table_format CHECK ( + source_table IS NULL OR source_table ~ '^[a-z_]+\.[a-z_]+$' + ), + CONSTRAINT files_source_complete CHECK ( + (source_table IS NULL AND source_column IS NULL AND source_id IS NULL) + OR (source_table IS NOT NULL AND source_column IS NOT NULL AND source_id IS NOT NULL) + ) +); + +COMMENT ON TABLE object_store_public.files IS + 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; +COMMENT ON COLUMN object_store_public.files.key IS + 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; +COMMENT ON COLUMN object_store_public.files.etag IS + 'S3 ETag for reconciliation and cache validation.'; +COMMENT ON COLUMN object_store_public.files.status_reason IS + 'Human-readable reason for current status (error details, deletion reason).'; +COMMENT ON COLUMN object_store_public.files.processing_started_at IS + 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; +COMMENT ON COLUMN object_store_public.files.source_table IS + 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; +COMMENT ON COLUMN object_store_public.files.source_column IS + 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; +COMMENT ON COLUMN object_store_public.files.source_id IS + 'Primary key of the row in the source table. NULL until domain trigger populates it.'; + +-- --------------------------------------------------------------------------- +-- 3. Buckets Table +-- --------------------------------------------------------------------------- + +CREATE TABLE object_store_public.buckets ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + key text NOT NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT false, + config jsonb NOT NULL DEFAULT '{}'::jsonb, + created_by uuid, + updated_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT buckets_pkey PRIMARY KEY (id, database_id), + CONSTRAINT buckets_key_unique UNIQUE (key, database_id), + CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') +); + +COMMENT ON TABLE object_store_public.buckets IS + 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; + +-- --------------------------------------------------------------------------- +-- 4. Indexes +-- --------------------------------------------------------------------------- + +-- Tenant queries +CREATE INDEX files_database_id_idx + ON object_store_public.files (database_id); + +-- Bucket + tenant queries +CREATE INDEX files_bucket_database_id_idx + ON object_store_public.files (bucket_key, database_id); + +-- "My uploads" queries +CREATE INDEX files_created_by_database_id_created_at_idx + ON object_store_public.files (created_by, database_id, created_at DESC); + +-- Back-reference lookups (cleanup worker, attachment queries) +CREATE INDEX files_source_ref_idx + ON object_store_public.files (source_table, source_column, source_id); + +-- Pending file reaper (hourly cron) +CREATE INDEX files_pending_created_at_idx + ON object_store_public.files (created_at) + WHERE status = 'pending'; + +-- Stuck processing detection +CREATE INDEX files_processing_idx + ON object_store_public.files (processing_started_at) + WHERE status = 'processing'; + +-- Deletion job queue +CREATE INDEX files_deleting_idx + ON object_store_public.files (updated_at) + WHERE status = 'deleting'; + +-- Time-range scans on large tables +CREATE INDEX files_created_at_brin_idx + ON object_store_public.files USING brin (created_at); + +-- --------------------------------------------------------------------------- +-- 5. Triggers +-- --------------------------------------------------------------------------- + +-- 5a. AFTER INSERT -- enqueue process-image job +-- NOTE: Version rows are inserted with status = 'ready', which intentionally +-- bypasses this trigger (condition: NEW.status = 'pending'). Only origin +-- uploads (status = 'pending') need processing. + +CREATE OR REPLACE FUNCTION object_store_public.files_after_insert_queue_processing() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_insert_queue_processing + AFTER INSERT ON object_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'pending') + EXECUTE FUNCTION object_store_public.files_after_insert_queue_processing(); + +COMMENT ON TRIGGER files_after_insert_queue_processing ON object_store_public.files IS + 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; + +-- 5b. BEFORE UPDATE -- timestamp + state machine + +CREATE OR REPLACE FUNCTION object_store_public.files_before_update_timestamp() +RETURNS trigger AS $$ +BEGIN + -- Always update timestamp + NEW.updated_at := now(); + + -- State machine validation (only when status changes) + IF OLD.status IS DISTINCT FROM NEW.status THEN + IF NOT ( + (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) + OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) + OR (OLD.status = 'ready' AND NEW.status = 'deleting') + OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) + ) THEN + RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; + END IF; + + -- Track processing start/end + IF NEW.status = 'processing' THEN + NEW.processing_started_at := now(); + ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN + NEW.processing_started_at := NULL; + END IF; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_before_update_timestamp + BEFORE UPDATE ON object_store_public.files + FOR EACH ROW + EXECUTE FUNCTION object_store_public.files_before_update_timestamp(); + +COMMENT ON TRIGGER files_before_update_timestamp ON object_store_public.files IS + 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; + +-- 5c. AFTER UPDATE -- enqueue delete_s3_object job + +CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_deletion() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'delete_s3_object', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id, + 'key', NEW.key + ), + job_key := 'delete:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_deletion + AFTER UPDATE ON object_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') + EXECUTE FUNCTION object_store_public.files_after_update_queue_deletion(); + +COMMENT ON TRIGGER files_after_update_queue_deletion ON object_store_public.files IS + 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + +-- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry + +CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_retry() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_retry + AFTER UPDATE ON object_store_public.files + FOR EACH ROW + WHEN (OLD.status = 'error' AND NEW.status = 'pending') + EXECUTE FUNCTION object_store_public.files_after_update_queue_retry(); + +COMMENT ON TRIGGER files_after_update_queue_retry ON object_store_public.files IS + 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; + +-- --------------------------------------------------------------------------- +-- 6. RLS Policies & Grants +-- --------------------------------------------------------------------------- + +ALTER TABLE object_store_public.files ENABLE ROW LEVEL SECURITY; +ALTER TABLE object_store_public.files FORCE ROW LEVEL SECURITY; + +-- Policy 1: Tenant isolation (all operations, all authenticated roles) +CREATE POLICY files_tenant_isolation ON object_store_public.files + FOR ALL + USING (database_id = current_setting('app.database_id')::integer) + WITH CHECK (database_id = current_setting('app.database_id')::integer); + +-- Policy 2: Creator-only for non-ready files (SELECT) +CREATE POLICY files_visibility ON object_store_public.files + FOR SELECT + USING ( + status = 'ready' + OR created_by = current_setting('app.user_id')::uuid + ); + +-- Policy 3: Public bucket read (SELECT, for anonymous access) +CREATE POLICY files_public_bucket_read ON object_store_public.files + FOR SELECT + USING ( + EXISTS ( + SELECT 1 FROM object_store_public.buckets b + WHERE b.key = bucket_key + AND b.database_id = files.database_id + AND b.is_public = true + ) + AND status = 'ready' + ); + +-- Policy 4: Admin override (all operations) +CREATE POLICY files_admin_override ON object_store_public.files + FOR ALL + USING (current_setting('app.role', true) = 'administrator') + WITH CHECK (current_setting('app.role', true) = 'administrator'); + +-- Grants +GRANT SELECT, INSERT, UPDATE ON object_store_public.files TO authenticated; +GRANT SELECT, INSERT, UPDATE, DELETE ON object_store_public.files TO service_role; + +COMMENT ON POLICY files_tenant_isolation ON object_store_public.files IS + 'Every query is scoped to the current tenant via app.database_id session variable.'; +COMMENT ON POLICY files_visibility ON object_store_public.files IS + 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; +COMMENT ON POLICY files_public_bucket_read ON object_store_public.files IS + 'Allows unauthenticated reads on ready files in public buckets.'; +COMMENT ON POLICY files_admin_override ON object_store_public.files IS + 'Administrators can see and modify all files in the tenant regardless of status or creator.'; + +-- --------------------------------------------------------------------------- +-- 7. Domain Table Triggers +-- --------------------------------------------------------------------------- + +-- 7a. Generic trigger function: back-reference population +-- +-- When a domain table's image/upload/attachment column is updated with an S3 key, +-- find the files row by key and populate source_table, source_column, source_id. +-- Also finds version rows by key prefix and populates the same back-reference. +-- +-- Parameters (passed via TG_ARGV): +-- TG_ARGV[0] = column name (e.g. 'profile_picture') +-- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') + +CREATE OR REPLACE FUNCTION object_store_public.populate_file_back_reference() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + new_val jsonb; + old_val jsonb; + new_key text; + old_key text; + base_key text; + db_id integer; +BEGIN + -- Get the database_id from session context + db_id := current_setting('app.database_id')::integer; + + -- Extract the jsonb value from the specified column (dynamic) + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; + + -- Extract the key from the new and old values + new_key := new_val ->> 'key'; + old_key := old_val ->> 'key'; + + -- If no key change, nothing to do + IF new_key IS NOT DISTINCT FROM old_key THEN + RETURN NEW; + END IF; + + -- Handle file replacement: mark old files as deleting + IF old_key IS NOT NULL AND old_key <> '' THEN + -- Derive base key for the old file (strip version suffix) + base_key := regexp_replace(old_key, '_[^_]+$', ''); + + -- Mark old origin + all versions as deleting + UPDATE object_store_public.files + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE database_id = db_id + AND (key = old_key OR key LIKE base_key || '_%') + AND status NOT IN ('deleting'); + END IF; + + -- Populate back-reference on new file (origin + versions) + IF new_key IS NOT NULL AND new_key <> '' THEN + -- Derive base key for the new file + base_key := regexp_replace(new_key, '_[^_]+$', ''); + + -- Set back-reference on origin + all version rows + UPDATE object_store_public.files + SET source_table = table_name, + source_column = col_name, + source_id = NEW.id + WHERE database_id = db_id + AND (key = new_key OR key LIKE base_key || '_%'); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION object_store_public.populate_file_back_reference() IS + 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; + +-- 7b. Generic trigger function: source row deletion +-- +-- When a domain row is deleted, mark all associated files as deleting. + +CREATE OR REPLACE FUNCTION object_store_public.mark_files_deleting_on_source_delete() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + db_id integer; +BEGIN + db_id := current_setting('app.database_id')::integer; + + -- Mark all files for this source row + column as deleting + UPDATE object_store_public.files + SET status = 'deleting', status_reason = 'source row deleted' + WHERE database_id = db_id + AND source_table = table_name + AND source_column = col_name + AND source_id = OLD.id + AND status NOT IN ('deleting'); + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION object_store_public.mark_files_deleting_on_source_delete() IS + 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; + +-- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns +-- +-- Each domain column gets two triggers: +-- - AFTER UPDATE: back-reference population + file replacement +-- - BEFORE DELETE: mark files deleting on source row deletion +-- +-- These are wrapped in a DO block so they gracefully skip tables that +-- don't exist yet (e.g. in fresh dev environments). In production, +-- domain tables will exist before this migration runs. + +DO $domain_triggers$ +DECLARE + _tbl text; +BEGIN + -- constructive_users_public.users.profile_picture + SELECT 'constructive_users_public.users' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_users_public' AND table_name = 'users'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref + AFTER UPDATE OF profile_picture ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete + BEFORE DELETE ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; + RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; + END IF; + + -- constructive_status_public.app_levels.image + SELECT 'constructive_status_public.app_levels' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_status_public' AND table_name = 'app_levels'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER app_levels_image_file_ref + AFTER UPDATE OF image ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE 'CREATE TRIGGER app_levels_image_file_delete + BEFORE DELETE ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; + RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; + END IF; + + -- services_public.sites (og_image, apple_touch_icon, logo, favicon) + SELECT 'services_public.sites' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'sites'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER sites_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_og_image_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref + AFTER UPDATE OF apple_touch_icon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_logo_file_ref + AFTER UPDATE OF logo ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_logo_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_favicon_file_ref + AFTER UPDATE OF favicon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_favicon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; + RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; + END IF; + + -- services_public.apps.app_image + SELECT 'services_public.apps' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'apps'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER apps_app_image_file_ref + AFTER UPDATE OF app_image ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; + EXECUTE 'CREATE TRIGGER apps_app_image_file_delete + BEFORE DELETE ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; + RAISE NOTICE 'Created triggers for services_public.apps.app_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; + END IF; + + -- services_public.site_metadata.og_image + SELECT 'services_public.site_metadata' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'site_metadata'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete + BEFORE DELETE ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; + RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; + END IF; + + -- db_migrate.migrate_files.upload + SELECT 'db_migrate.migrate_files' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'db_migrate' AND table_name = 'migrate_files'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref + AFTER UPDATE OF upload ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete + BEFORE DELETE ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; + RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; + ELSE + RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; + END IF; +END +$domain_triggers$; + +COMMIT; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index efe3dee61..6ea1ca196 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2628,6 +2628,9 @@ importers: '@aws-sdk/lib-storage': specifier: ^3.1001.0 version: 3.1001.0(@aws-sdk/client-s3@3.1001.0) + '@aws-sdk/s3-request-presigner': + specifier: ^3.1001.0 + version: 3.1007.0 '@constructive-io/content-type-stream': specifier: workspace:^ version: link:../content-type-stream/dist @@ -2749,6 +2752,10 @@ packages: resolution: {integrity: sha512-Nasoyb5K4jfvncTKQyA13q55xHoz9as01NVYP05B0Kzux/X5UhMn3qXsZDyWOSXkfSCAIrMBKmVVWbI0vUapdQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/core@3.973.19': + resolution: {integrity: sha512-56KePyOcZnKTWCd89oJS1G6j3HZ9Kc+bh/8+EbvtaCCXdP6T7O7NzCiPuHRhFLWnzXIaXX3CxAz0nI5My9spHQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/crc64-nvme@3.972.3': resolution: {integrity: sha512-UExeK+EFiq5LAcbHm96CQLSia+5pvpUVSAsVApscBzayb7/6dJBJKwV4/onsk4VbWSmqxDMcfuTD+pC4RxgZHg==} engines: {node: '>=20.0.0'} @@ -2823,6 +2830,10 @@ packages: resolution: {integrity: sha512-U4K1rqyJYvT/zgTI3+rN+MToa51dFnnq1VSsVJuJWPNEKcEnuZVqf7yTpkJJMkYixVW5TTi1dgupd+nmJ0JyWw==} engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-sdk-s3@3.972.19': + resolution: {integrity: sha512-/CtOHHVFg4ZuN6CnLnYkrqWgVEnbOBC4kNiKa+4fldJ9cioDt3dD/f5vpq0cWLOXwmGL2zgVrVxNhjxWpxNMkg==} + engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-ssec@3.972.6': resolution: {integrity: sha512-acvMUX9jF4I2Ew+Z/EA6gfaFaz9ehci5wxBmXCZeulLuv8m+iGf6pY9uKz8TPjg39bdAz3hxoE0eLP8Qz+IYlA==} engines: {node: '>=20.0.0'} @@ -2839,10 +2850,18 @@ packages: resolution: {integrity: sha512-Aa5PusHLXAqLTX1UKDvI3pHQJtIsF7Q+3turCHqfz/1F61/zDMWfbTC8evjhrrYVAtz9Vsv3SJ/waSUeu7B6gw==} engines: {node: '>=20.0.0'} + '@aws-sdk/s3-request-presigner@3.1007.0': + resolution: {integrity: sha512-TZmNzomZxwmIlyi+h8i0j561j4ryDNazUnoEszJTYOuk57RA7NUKQzNvRYUoKOChbFfvDzTy6PR5SRXfu0vaVw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/signature-v4-multi-region@3.996.4': resolution: {integrity: sha512-MGa8ro0onekYIiesHX60LwKdkxK3Kd61p7TTbLwZemBqlnD9OLrk9sXZdFOIxXanJ+3AaJnV/jiX866eD/4PDg==} engines: {node: '>=20.0.0'} + '@aws-sdk/signature-v4-multi-region@3.996.7': + resolution: {integrity: sha512-mYhh7FY+7OOqjkYkd6+6GgJOsXK1xBWmuR+c5mxJPj2kr5TBNeZq+nUvE9kANWAux5UxDVrNOSiEM/wlHzC3Lg==} + engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.1001.0': resolution: {integrity: sha512-09XAq/uIYgeZhohuGRrR/R+ek3+ljFNdzWCXdqb9rlIERDjSfNiLjTtpHgSK1xTPmC5G4yWoEAyMfTXiggS6wA==} engines: {node: '>=20.0.0'} @@ -2851,14 +2870,26 @@ packages: resolution: {integrity: sha512-RW60aH26Bsc016Y9B98hC0Plx6fK5P2v/iQYwMzrSjiDh1qRMUCP6KrXHYEHe3uFvKiOC93Z9zk4BJsUi6Tj1Q==} engines: {node: '>=20.0.0'} + '@aws-sdk/types@3.973.5': + resolution: {integrity: sha512-hl7BGwDCWsjH8NkZfx+HgS7H2LyM2lTMAI7ba9c8O0KqdBLTdNJivsHpqjg9rNlAlPyREb6DeDRXUl0s8uFdmQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-arn-parser@3.972.2': resolution: {integrity: sha512-VkykWbqMjlSgBFDyrY3nOSqupMc6ivXuGmvci6Q3NnLq5kC+mKQe2QBZ4nrWRE/jqOxeFP2uYzLtwncYYcvQDg==} engines: {node: '>=20.0.0'} + '@aws-sdk/util-arn-parser@3.972.3': + resolution: {integrity: sha512-HzSD8PMFrvgi2Kserxuff5VitNq2sgf3w9qxmskKDiDTThWfVteJxuCS9JXiPIPtmCrp+7N9asfIaVhBFORllA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-endpoints@3.996.3': resolution: {integrity: sha512-yWIQSNiCjykLL+ezN5A+DfBb1gfXTytBxm57e64lYmwxDHNmInYHRJYYRAGWG1o77vKEiWaw4ui28e3yb1k5aQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/util-format-url@3.972.7': + resolution: {integrity: sha512-V+PbnWfUl93GuFwsOHsAq7hY/fnm9kElRqR8IexIJr5Rvif9e614X5sGSyz3mVSf1YAZ+VTy63W1/pGdA55zyA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-locate-window@3.965.4': resolution: {integrity: sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog==} engines: {node: '>=20.0.0'} @@ -2875,6 +2906,10 @@ packages: aws-crt: optional: true + '@aws-sdk/xml-builder@3.972.10': + resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/xml-builder@3.972.9': resolution: {integrity: sha512-ItnlMgSqkPrUfJs7EsvU/01zw5UeIb2tNPhD09LBLHbg+g+HDiKibSLwpkuz/ZIlz4F2IMn+5XgE4AK/pfPuog==} engines: {node: '>=20.0.0'} @@ -3906,24 +3941,28 @@ packages: engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [glibc] '@nx/nx-linux-arm64-musl@20.8.3': resolution: {integrity: sha512-LTTGzI8YVPlF1v0YlVf+exM+1q7rpsiUbjTTHJcfHFRU5t4BsiZD54K19Y1UBg1XFx5cwhEaIomSmJ88RwPPVQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [musl] '@nx/nx-linux-x64-gnu@20.8.3': resolution: {integrity: sha512-SlA4GtXvQbSzSIWLgiIiLBOjdINPOUR/im+TUbaEMZ8wiGrOY8cnk0PVt95TIQJVBeXBCeb5HnoY0lHJpMOODg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [glibc] '@nx/nx-linux-x64-musl@20.8.3': resolution: {integrity: sha512-MNzkEwPktp5SQH9dJDH2wP9hgG9LsBDhKJXJfKw6sUI/6qz5+/aAjFziKy+zBnhU4AO1yXt5qEWzR8lDcIriVQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [musl] '@nx/nx-win32-arm64-msvc@20.8.3': resolution: {integrity: sha512-qUV7CyXKwRCM/lkvyS6Xa1MqgAuK5da6w27RAehh7LATBUKn1I4/M7DGn6L7ERCxpZuh1TrDz9pUzEy0R+Ekkg==} @@ -4042,48 +4081,56 @@ packages: engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-arm64-musl@0.36.0': resolution: {integrity: sha512-SPGLJkOIHSIC6ABUQ5V8NqJpvYhMJueJv26NYqfCnwi/Mn6A61amkpJJ9Suy0Nmvs+OWESJpcebrBUbXPGZyQQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] + libc: [musl] '@oxfmt/binding-linux-ppc64-gnu@0.36.0': resolution: {integrity: sha512-3EuoyB8x9x8ysYJjbEO/M9fkSk72zQKnXCvpZMDHXlnY36/1qMp55Nm0PrCwjGO/1pen5hdOVkz9WmP3nAp2IQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-riscv64-gnu@0.36.0': resolution: {integrity: sha512-MpY3itLwpGh8dnywtrZtaZ604T1m715SydCKy0+qTxetv+IHzuA+aO/AGzrlzUNYZZmtWtmDBrChZGibvZxbRQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-riscv64-musl@0.36.0': resolution: {integrity: sha512-mmDhe4Vtx+XwQPRPn/V25+APnkApYgZ23q+6GVsNYY98pf3aU0aI3Me96pbRs/AfJ1jIiGC+/6q71FEu8dHcHw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] + libc: [musl] '@oxfmt/binding-linux-s390x-gnu@0.36.0': resolution: {integrity: sha512-AYXhU+DmNWLSnvVwkHM92fuYhogtVHab7UQrPNaDf1sxadugg9gWVmcgJDlIwxJdpk5CVW/TFvwUKwI432zhhA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-x64-gnu@0.36.0': resolution: {integrity: sha512-H16QhhQ3usoakMleiAAQ2mg0NsBDAdyE9agUgfC8IHHh3jZEbr0rIKwjEqwbOHK5M0EmfhJmr+aGO/MgZPsneA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-x64-musl@0.36.0': resolution: {integrity: sha512-EFFGkixA39BcmHiCe2ECdrq02D6FCve5ka6ObbvrheXl4V+R0U/E+/uLyVx1X65LW8TA8QQHdnbdDallRekohw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] + libc: [musl] '@oxfmt/binding-openharmony-arm64@0.36.0': resolution: {integrity: sha512-zr/t369wZWFOj1qf06Z5gGNjFymfUNDrxKMmr7FKiDRVI1sNsdKRCuRL4XVjtcptKQ+ao3FfxLN1vrynivmCYg==} @@ -4599,66 +4646,79 @@ packages: resolution: {integrity: sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==} cpu: [arm] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.57.1': resolution: {integrity: sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==} cpu: [arm] os: [linux] + libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.57.1': resolution: {integrity: sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==} cpu: [arm64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.57.1': resolution: {integrity: sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==} cpu: [arm64] os: [linux] + libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.57.1': resolution: {integrity: sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==} cpu: [loong64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-loong64-musl@4.57.1': resolution: {integrity: sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==} cpu: [loong64] os: [linux] + libc: [musl] '@rollup/rollup-linux-ppc64-gnu@4.57.1': resolution: {integrity: sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==} cpu: [ppc64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-ppc64-musl@4.57.1': resolution: {integrity: sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==} cpu: [ppc64] os: [linux] + libc: [musl] '@rollup/rollup-linux-riscv64-gnu@4.57.1': resolution: {integrity: sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==} cpu: [riscv64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.57.1': resolution: {integrity: sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==} cpu: [riscv64] os: [linux] + libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.57.1': resolution: {integrity: sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==} cpu: [s390x] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.57.1': resolution: {integrity: sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==} cpu: [x64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-musl@4.57.1': resolution: {integrity: sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==} cpu: [x64] os: [linux] + libc: [musl] '@rollup/rollup-openbsd-x64@4.57.1': resolution: {integrity: sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==} @@ -4730,6 +4790,10 @@ packages: resolution: {integrity: sha512-qocxM/X4XGATqQtUkbE9SPUB6wekBi+FyJOMbPj0AhvyvFGYEmOlz6VB22iMePCQsFmMIvFSeViDvA7mZJG47g==} engines: {node: '>=18.0.0'} + '@smithy/abort-controller@4.2.11': + resolution: {integrity: sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==} + engines: {node: '>=18.0.0'} + '@smithy/chunked-blob-reader-native@4.2.2': resolution: {integrity: sha512-QzzYIlf4yg0w5TQaC9VId3B3ugSk1MI/wb7tgcHtd7CBV9gNRKZrhc2EPSxSZuDy10zUZ0lomNMgkc6/VVe8xg==} engines: {node: '>=18.0.0'} @@ -4746,6 +4810,10 @@ packages: resolution: {integrity: sha512-/+ldRdtiO5Cb26afAZOG1FZM0x7D4AYdjpyOv2OScJw+4C7X+OLdRnNKF5UyUE0VpPgSKr3rnF/kvprRA4h2kg==} engines: {node: '>=18.0.0'} + '@smithy/core@3.23.9': + resolution: {integrity: sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==} + engines: {node: '>=18.0.0'} + '@smithy/credential-provider-imds@4.2.10': resolution: {integrity: sha512-3bsMLJJLTZGZqVGGeBVFfLzuRulVsGTj12BzRKODTHqUABpIr0jMN1vN3+u6r2OfyhAQ2pXaMZWX/swBK5I6PQ==} engines: {node: '>=18.0.0'} @@ -4774,6 +4842,10 @@ packages: resolution: {integrity: sha512-muS5tFw+A/uo+U+yig06vk1776UFM+aAp9hFM8efI4ZcHhTcgv6NTeK4x7ltHeMPBwnhEjcf0MULTyxNkSNxDw==} engines: {node: '>=18.0.0'} + '@smithy/fetch-http-handler@5.3.13': + resolution: {integrity: sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==} + engines: {node: '>=18.0.0'} + '@smithy/hash-blob-browser@4.2.11': resolution: {integrity: sha512-DrcAx3PM6AEbWZxsKl6CWAGnVwiz28Wp1ZhNu+Hi4uI/6C1PIZBIaPM2VoqBDAsOWbM6ZVzOEQMxFLLdmb4eBQ==} engines: {node: '>=18.0.0'} @@ -4798,6 +4870,10 @@ packages: resolution: {integrity: sha512-Yfu664Qbf1B4IYIsYgKoABt010daZjkaCRvdU/sPnZG6TtHOB0md0RjNdLGzxe5UIdn9js4ftPICzmkRa9RJ4Q==} engines: {node: '>=18.0.0'} + '@smithy/is-array-buffer@4.2.2': + resolution: {integrity: sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow==} + engines: {node: '>=18.0.0'} + '@smithy/md5-js@4.2.10': resolution: {integrity: sha512-Op+Dh6dPLWTjWITChFayDllIaCXRofOed8ecpggTC5fkh8yXes0vAEX7gRUfjGK+TlyxoCAA05gHbZW/zB9JwQ==} engines: {node: '>=18.0.0'} @@ -4810,6 +4886,10 @@ packages: resolution: {integrity: sha512-CoVGZaqIC0tEjz0ga3ciwCMA5fd/4lIOwO2wx0fH+cTi1zxSFZnMJbIiIF9G1d4vRSDyTupDrpS3FKBBJGkRZg==} engines: {node: '>=18.0.0'} + '@smithy/middleware-endpoint@4.4.23': + resolution: {integrity: sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==} + engines: {node: '>=18.0.0'} + '@smithy/middleware-retry@4.4.38': resolution: {integrity: sha512-WdHvdhjE6Fj78vxFwDKFDwlqGOGRUWrwGeuENUbTVE46Su9mnQM+dXHtbnCaQvwuSYrRsjpe8zUsFpwUp/azlA==} engines: {node: '>=18.0.0'} @@ -4818,34 +4898,66 @@ packages: resolution: {integrity: sha512-STQdONGPwbbC7cusL60s7vOa6He6A9w2jWhoapL0mgVjmR19pr26slV+yoSP76SIssMTX/95e5nOZ6UQv6jolg==} engines: {node: '>=18.0.0'} + '@smithy/middleware-serde@4.2.12': + resolution: {integrity: sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==} + engines: {node: '>=18.0.0'} + '@smithy/middleware-stack@4.2.10': resolution: {integrity: sha512-pmts/WovNcE/tlyHa8z/groPeOtqtEpp61q3W0nW1nDJuMq/x+hWa/OVQBtgU0tBqupeXq0VBOLA4UZwE8I0YA==} engines: {node: '>=18.0.0'} + '@smithy/middleware-stack@4.2.11': + resolution: {integrity: sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==} + engines: {node: '>=18.0.0'} + '@smithy/node-config-provider@4.3.10': resolution: {integrity: sha512-UALRbJtVX34AdP2VECKVlnNgidLHA2A7YgcJzwSBg1hzmnO/bZBHl/LDQQyYifzUwp1UOODnl9JJ3KNawpUJ9w==} engines: {node: '>=18.0.0'} + '@smithy/node-config-provider@4.3.11': + resolution: {integrity: sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==} + engines: {node: '>=18.0.0'} + '@smithy/node-http-handler@4.4.13': resolution: {integrity: sha512-o8CP8w6tlUA0lk+Qfwm6Ed0jCWk3bEY6iBOJjdBaowbXKCSClk8zIHQvUL6RUZMvuNafF27cbRCMYqw6O1v4aA==} engines: {node: '>=18.0.0'} + '@smithy/node-http-handler@4.4.14': + resolution: {integrity: sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==} + engines: {node: '>=18.0.0'} + '@smithy/property-provider@4.2.10': resolution: {integrity: sha512-5jm60P0CU7tom0eNrZ7YrkgBaoLFXzmqB0wVS+4uK8PPGmosSrLNf6rRd50UBvukztawZ7zyA8TxlrKpF5z9jw==} engines: {node: '>=18.0.0'} + '@smithy/property-provider@4.2.11': + resolution: {integrity: sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==} + engines: {node: '>=18.0.0'} + '@smithy/protocol-http@5.3.10': resolution: {integrity: sha512-2NzVWpYY0tRdfeCJLsgrR89KE3NTWT2wGulhNUxYlRmtRmPwLQwKzhrfVaiNlA9ZpJvbW7cjTVChYKgnkqXj1A==} engines: {node: '>=18.0.0'} + '@smithy/protocol-http@5.3.11': + resolution: {integrity: sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==} + engines: {node: '>=18.0.0'} + '@smithy/querystring-builder@4.2.10': resolution: {integrity: sha512-HeN7kEvuzO2DmAzLukE9UryiUvejD3tMp9a1D1NJETerIfKobBUCLfviP6QEk500166eD2IATaXM59qgUI+YDA==} engines: {node: '>=18.0.0'} + '@smithy/querystring-builder@4.2.11': + resolution: {integrity: sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==} + engines: {node: '>=18.0.0'} + '@smithy/querystring-parser@4.2.10': resolution: {integrity: sha512-4Mh18J26+ao1oX5wXJfWlTT+Q1OpDR8ssiC9PDOuEgVBGloqg18Fw7h5Ct8DyT9NBYwJgtJ2nLjKKFU6RP1G1Q==} engines: {node: '>=18.0.0'} + '@smithy/querystring-parser@4.2.11': + resolution: {integrity: sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==} + engines: {node: '>=18.0.0'} + '@smithy/service-error-classification@4.2.10': resolution: {integrity: sha512-0R/+/Il5y8nB/By90o8hy/bWVYptbIfvoTYad0igYQO5RefhNCDmNzqxaMx7K1t/QWo0d6UynqpqN5cCQt1MCg==} engines: {node: '>=18.0.0'} @@ -4854,14 +4966,26 @@ packages: resolution: {integrity: sha512-pHgASxl50rrtOztgQCPmOXFjRW+mCd7ALr/3uXNzRrRoGV5G2+78GOsQ3HlQuBVHCh9o6xqMNvlIKZjWn4Euug==} engines: {node: '>=18.0.0'} + '@smithy/shared-ini-file-loader@4.4.6': + resolution: {integrity: sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==} + engines: {node: '>=18.0.0'} + '@smithy/signature-v4@5.3.10': resolution: {integrity: sha512-Wab3wW8468WqTKIxI+aZe3JYO52/RYT/8sDOdzkUhjnLakLe9qoQqIcfih/qxcF4qWEFoWBszY0mj5uxffaVXA==} engines: {node: '>=18.0.0'} + '@smithy/signature-v4@5.3.11': + resolution: {integrity: sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==} + engines: {node: '>=18.0.0'} + '@smithy/smithy-client@4.12.1': resolution: {integrity: sha512-Xf9UFHlAihewfkmLNZ6I/Ek6kcYBKoU3cbRS9Z4q++9GWoW0YFbAHs7wMbuXm+nGuKHZ5OKheZMuDdaWPv8DJw==} engines: {node: '>=18.0.0'} + '@smithy/smithy-client@4.12.3': + resolution: {integrity: sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==} + engines: {node: '>=18.0.0'} + '@smithy/types@4.13.0': resolution: {integrity: sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==} engines: {node: '>=18.0.0'} @@ -4870,14 +4994,26 @@ packages: resolution: {integrity: sha512-uypjF7fCDsRk26u3qHmFI/ePL7bxxB9vKkE+2WKEciHhz+4QtbzWiHRVNRJwU3cKhrYDYQE3b0MRFtqfLYdA4A==} engines: {node: '>=18.0.0'} + '@smithy/url-parser@4.2.11': + resolution: {integrity: sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==} + engines: {node: '>=18.0.0'} + '@smithy/util-base64@4.3.1': resolution: {integrity: sha512-BKGuawX4Doq/bI/uEmg+Zyc36rJKWuin3py89PquXBIBqmbnJwBBsmKhdHfNEp0+A4TDgLmT/3MSKZ1SxHcR6w==} engines: {node: '>=18.0.0'} + '@smithy/util-base64@4.3.2': + resolution: {integrity: sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-body-length-browser@4.2.1': resolution: {integrity: sha512-SiJeLiozrAoCrgDBUgsVbmqHmMgg/2bA15AzcbcW+zan7SuyAVHN4xTSbq0GlebAIwlcaX32xacnrG488/J/6g==} engines: {node: '>=18.0.0'} + '@smithy/util-body-length-browser@4.2.2': + resolution: {integrity: sha512-JKCrLNOup3OOgmzeaKQwi4ZCTWlYR5H4Gm1r2uTMVBXoemo1UEghk5vtMi1xSu2ymgKVGW631e2fp9/R610ZjQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-body-length-node@4.2.2': resolution: {integrity: sha512-4rHqBvxtJEBvsZcFQSPQqXP2b/yy/YlB66KlcEgcH2WNoOKCKB03DSLzXmOsXjbl8dJ4OEYTn31knhdznwk7zw==} engines: {node: '>=18.0.0'} @@ -4890,10 +5026,18 @@ packages: resolution: {integrity: sha512-/swhmt1qTiVkaejlmMPPDgZhEaWb/HWMGRBheaxwuVkusp/z+ErJyQxO6kaXumOciZSWlmq6Z5mNylCd33X7Ig==} engines: {node: '>=18.0.0'} + '@smithy/util-buffer-from@4.2.2': + resolution: {integrity: sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q==} + engines: {node: '>=18.0.0'} + '@smithy/util-config-provider@4.2.1': resolution: {integrity: sha512-462id/00U8JWFw6qBuTSWfN5TxOHvDu4WliI97qOIOnuC/g+NDAknTU8eoGXEPlLkRVgWEr03jJBLV4o2FL8+A==} engines: {node: '>=18.0.0'} + '@smithy/util-config-provider@4.2.2': + resolution: {integrity: sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-defaults-mode-browser@4.3.37': resolution: {integrity: sha512-JlPZhV1kQCGNJgofRTU6E8kHrjCKsb6cps8gco8QDVaFl7biFYzHg0p1x89ytIWyVyCkY3nOpO8tJPM47Vqlww==} engines: {node: '>=18.0.0'} @@ -4910,10 +5054,18 @@ packages: resolution: {integrity: sha512-c1hHtkgAWmE35/50gmdKajgGAKV3ePJ7t6UtEmpfCWJmQE9BQAQPz0URUVI89eSkcDqCtzqllxzG28IQoZPvwA==} engines: {node: '>=18.0.0'} + '@smithy/util-hex-encoding@4.2.2': + resolution: {integrity: sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg==} + engines: {node: '>=18.0.0'} + '@smithy/util-middleware@4.2.10': resolution: {integrity: sha512-LxaQIWLp4y0r72eA8mwPNQ9va4h5KeLM0I3M/HV9klmFaY2kN766wf5vsTzmaOpNNb7GgXAd9a25P3h8T49PSA==} engines: {node: '>=18.0.0'} + '@smithy/util-middleware@4.2.11': + resolution: {integrity: sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==} + engines: {node: '>=18.0.0'} + '@smithy/util-retry@4.2.10': resolution: {integrity: sha512-HrBzistfpyE5uqTwiyLsFHscgnwB0kgv8vySp7q5kZ0Eltn/tjosaSGGDj/jJ9ys7pWzIP/icE2d+7vMKXLv7A==} engines: {node: '>=18.0.0'} @@ -4922,10 +5074,18 @@ packages: resolution: {integrity: sha512-c7awZV6cxY0czgDDSr+Bz0XfRtg8AwW2BWhrHhLJISrpmwv8QzA2qzTllWyMVNdy1+UJr9vCm29hzuh3l8TTFw==} engines: {node: '>=18.0.0'} + '@smithy/util-stream@4.5.17': + resolution: {integrity: sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-uri-escape@4.2.1': resolution: {integrity: sha512-YmiUDn2eo2IOiWYYvGQkgX5ZkBSiTQu4FlDo5jNPpAxng2t6Sjb6WutnZV9l6VR4eJul1ABmCrnWBC9hKHQa6Q==} engines: {node: '>=18.0.0'} + '@smithy/util-uri-escape@4.2.2': + resolution: {integrity: sha512-2kAStBlvq+lTXHyAZYfJRb/DfS3rsinLiwb+69SstC9Vb0s9vNWkRwpnj918Pfi85mzi42sOqdV72OLxWAISnw==} + engines: {node: '>=18.0.0'} + '@smithy/util-utf8@2.3.0': resolution: {integrity: sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==} engines: {node: '>=14.0.0'} @@ -4934,6 +5094,10 @@ packages: resolution: {integrity: sha512-DSIwNaWtmzrNQHv8g7DBGR9mulSit65KSj5ymGEIAknmIN8IpbZefEep10LaMG/P/xquwbmJ1h9ectz8z6mV6g==} engines: {node: '>=18.0.0'} + '@smithy/util-utf8@4.2.2': + resolution: {integrity: sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw==} + engines: {node: '>=18.0.0'} + '@smithy/util-waiter@4.2.10': resolution: {integrity: sha512-4eTWph/Lkg1wZEDAyObwme0kmhEb7J/JjibY2znJdrYRgKbKqB7YoEhhJVJ4R1g/SYih4zuwX7LpJaM8RsnTVg==} engines: {node: '>=18.0.0'} @@ -4942,6 +5106,10 @@ packages: resolution: {integrity: sha512-dSfDCeihDmZlV2oyr0yWPTUfh07suS+R5OB+FZGiv/hHyK3hrFBW5rR1UYjfa57vBsrP9lciFkRPzebaV1Qujw==} engines: {node: '>=18.0.0'} + '@smithy/uuid@1.1.2': + resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} + engines: {node: '>=18.0.0'} + '@styled-system/background@5.1.2': resolution: {integrity: sha512-jtwH2C/U6ssuGSvwTN3ri/IyjdHb8W9X/g8Y0JLcrH02G+BW3OS8kZdHphF1/YyRklnrKrBT2ngwGUK6aqqV3A==} @@ -5322,41 +5490,49 @@ packages: resolution: {integrity: sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==} cpu: [arm64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-arm64-musl@1.11.1': resolution: {integrity: sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==} cpu: [arm64] os: [linux] + libc: [musl] '@unrs/resolver-binding-linux-ppc64-gnu@1.11.1': resolution: {integrity: sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-riscv64-gnu@1.11.1': resolution: {integrity: sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==} cpu: [riscv64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-riscv64-musl@1.11.1': resolution: {integrity: sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==} cpu: [riscv64] os: [linux] + libc: [musl] '@unrs/resolver-binding-linux-s390x-gnu@1.11.1': resolution: {integrity: sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==} cpu: [s390x] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-x64-gnu@1.11.1': resolution: {integrity: sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==} cpu: [x64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-x64-musl@1.11.1': resolution: {integrity: sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==} cpu: [x64] os: [linux] + libc: [musl] '@unrs/resolver-binding-wasm32-wasi@1.11.1': resolution: {integrity: sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==} @@ -9873,6 +10049,22 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@aws-sdk/core@3.973.19': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws-sdk/xml-builder': 3.972.10 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@aws-sdk/crc64-nvme@3.972.3': dependencies: '@smithy/types': 4.13.0 @@ -10071,6 +10263,23 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@aws-sdk/middleware-sdk-s3@3.972.19': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-arn-parser': 3.972.3 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-config-provider': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@aws-sdk/middleware-ssec@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -10138,6 +10347,17 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/s3-request-presigner@3.1007.0': + dependencies: + '@aws-sdk/signature-v4-multi-region': 3.996.7 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-format-url': 3.972.7 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/signature-v4-multi-region@3.996.4': dependencies: '@aws-sdk/middleware-sdk-s3': 3.972.16 @@ -10147,6 +10367,15 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/signature-v4-multi-region@3.996.7': + dependencies: + '@aws-sdk/middleware-sdk-s3': 3.972.19 + '@aws-sdk/types': 3.973.5 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/token-providers@3.1001.0': dependencies: '@aws-sdk/core': 3.973.16 @@ -10164,10 +10393,19 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/types@3.973.5': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/util-arn-parser@3.972.2': dependencies: tslib: 2.8.1 + '@aws-sdk/util-arn-parser@3.972.3': + dependencies: + tslib: 2.8.1 + '@aws-sdk/util-endpoints@3.996.3': dependencies: '@aws-sdk/types': 3.973.4 @@ -10176,6 +10414,13 @@ snapshots: '@smithy/util-endpoints': 3.3.1 tslib: 2.8.1 + '@aws-sdk/util-format-url@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/util-locate-window@3.965.4': dependencies: tslib: 2.8.1 @@ -10195,6 +10440,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/xml-builder@3.972.10': + dependencies: + '@smithy/types': 4.13.0 + fast-xml-parser: 5.4.1 + tslib: 2.8.1 + '@aws-sdk/xml-builder@3.972.9': dependencies: '@smithy/types': 4.13.0 @@ -12212,6 +12463,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/abort-controller@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/chunked-blob-reader-native@4.2.2': dependencies: '@smithy/util-base64': 4.3.1 @@ -12243,6 +12499,19 @@ snapshots: '@smithy/uuid': 1.1.1 tslib: 2.8.1 + '@smithy/core@3.23.9': + dependencies: + '@smithy/middleware-serde': 4.2.12 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + '@smithy/credential-provider-imds@4.2.10': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -12289,6 +12558,14 @@ snapshots: '@smithy/util-base64': 4.3.1 tslib: 2.8.1 + '@smithy/fetch-http-handler@5.3.13': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + tslib: 2.8.1 + '@smithy/hash-blob-browser@4.2.11': dependencies: '@smithy/chunked-blob-reader': 5.2.1 @@ -12322,6 +12599,10 @@ snapshots: dependencies: tslib: 2.8.1 + '@smithy/is-array-buffer@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/md5-js@4.2.10': dependencies: '@smithy/types': 4.13.0 @@ -12345,6 +12626,17 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 + '@smithy/middleware-endpoint@4.4.23': + dependencies: + '@smithy/core': 3.23.9 + '@smithy/middleware-serde': 4.2.12 + '@smithy/node-config-provider': 4.3.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-middleware': 4.2.11 + tslib: 2.8.1 + '@smithy/middleware-retry@4.4.38': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -12363,11 +12655,22 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/middleware-serde@4.2.12': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/middleware-stack@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/middleware-stack@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/node-config-provider@4.3.10': dependencies: '@smithy/property-provider': 4.2.10 @@ -12375,6 +12678,13 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/node-config-provider@4.3.11': + dependencies: + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/node-http-handler@4.4.13': dependencies: '@smithy/abort-controller': 4.2.10 @@ -12383,27 +12693,56 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/node-http-handler@4.4.14': + dependencies: + '@smithy/abort-controller': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/property-provider@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/property-provider@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/protocol-http@5.3.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/protocol-http@5.3.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/querystring-builder@4.2.10': dependencies: '@smithy/types': 4.13.0 '@smithy/util-uri-escape': 4.2.1 tslib: 2.8.1 + '@smithy/querystring-builder@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + '@smithy/util-uri-escape': 4.2.2 + tslib: 2.8.1 + '@smithy/querystring-parser@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/querystring-parser@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/service-error-classification@4.2.10': dependencies: '@smithy/types': 4.13.0 @@ -12413,6 +12752,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/shared-ini-file-loader@4.4.6': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/signature-v4@5.3.10': dependencies: '@smithy/is-array-buffer': 4.2.1 @@ -12424,6 +12768,17 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@smithy/signature-v4@5.3.11': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-uri-escape': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@smithy/smithy-client@4.12.1': dependencies: '@smithy/core': 3.23.7 @@ -12434,6 +12789,16 @@ snapshots: '@smithy/util-stream': 4.5.16 tslib: 2.8.1 + '@smithy/smithy-client@4.12.3': + dependencies: + '@smithy/core': 3.23.9 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-stack': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-stream': 4.5.17 + tslib: 2.8.1 + '@smithy/types@4.13.0': dependencies: tslib: 2.8.1 @@ -12444,16 +12809,32 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/url-parser@4.2.11': + dependencies: + '@smithy/querystring-parser': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/util-base64@4.3.1': dependencies: '@smithy/util-buffer-from': 4.2.1 '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@smithy/util-base64@4.3.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@smithy/util-body-length-browser@4.2.1': dependencies: tslib: 2.8.1 + '@smithy/util-body-length-browser@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-body-length-node@4.2.2': dependencies: tslib: 2.8.1 @@ -12468,10 +12849,19 @@ snapshots: '@smithy/is-array-buffer': 4.2.1 tslib: 2.8.1 + '@smithy/util-buffer-from@4.2.2': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + tslib: 2.8.1 + '@smithy/util-config-provider@4.2.1': dependencies: tslib: 2.8.1 + '@smithy/util-config-provider@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-defaults-mode-browser@4.3.37': dependencies: '@smithy/property-provider': 4.2.10 @@ -12499,11 +12889,20 @@ snapshots: dependencies: tslib: 2.8.1 + '@smithy/util-hex-encoding@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-middleware@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/util-middleware@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/util-retry@4.2.10': dependencies: '@smithy/service-error-classification': 4.2.10 @@ -12521,10 +12920,25 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@smithy/util-stream@4.5.17': + dependencies: + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/node-http-handler': 4.4.14 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@smithy/util-uri-escape@4.2.1': dependencies: tslib: 2.8.1 + '@smithy/util-uri-escape@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-utf8@2.3.0': dependencies: '@smithy/util-buffer-from': 2.2.0 @@ -12535,6 +12949,11 @@ snapshots: '@smithy/util-buffer-from': 4.2.1 tslib: 2.8.1 + '@smithy/util-utf8@4.2.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + tslib: 2.8.1 + '@smithy/util-waiter@4.2.10': dependencies: '@smithy/abort-controller': 4.2.10 @@ -12545,6 +12964,10 @@ snapshots: dependencies: tslib: 2.8.1 + '@smithy/uuid@1.1.2': + dependencies: + tslib: 2.8.1 + '@styled-system/background@5.1.2': dependencies: '@styled-system/core': 5.1.2 diff --git a/uploads/s3-streamer/package.json b/uploads/s3-streamer/package.json index dbe965c73..520faf5f1 100644 --- a/uploads/s3-streamer/package.json +++ b/uploads/s3-streamer/package.json @@ -37,6 +37,7 @@ "dependencies": { "@aws-sdk/client-s3": "^3.1001.0", "@aws-sdk/lib-storage": "^3.1001.0", + "@aws-sdk/s3-request-presigner": "^3.1001.0", "@constructive-io/content-type-stream": "workspace:^", "@pgpmjs/types": "workspace:^" }, diff --git a/uploads/s3-streamer/src/index.ts b/uploads/s3-streamer/src/index.ts index 74c9f3389..90cf965e1 100644 --- a/uploads/s3-streamer/src/index.ts +++ b/uploads/s3-streamer/src/index.ts @@ -2,6 +2,7 @@ import getClient from './s3'; import Streamer from './streamer'; export * from './utils'; +export * from './storage-provider'; export { getClient }; export { Streamer }; diff --git a/uploads/s3-streamer/src/storage-provider.ts b/uploads/s3-streamer/src/storage-provider.ts new file mode 100644 index 000000000..336bc4e41 --- /dev/null +++ b/uploads/s3-streamer/src/storage-provider.ts @@ -0,0 +1,198 @@ +/** + * StorageProvider interface and S3 implementation. + * + * The StorageProvider interface abstracts storage operations so that + * future implementations (GCS, Azure, local filesystem) can be swapped + * in without changing consumers. + * + * S3StorageProvider is the only implementation for now. It is + * MinIO-compatible (forcePathStyle: true, configurable endpoint). + */ + +import { + DeleteObjectCommand, + DeleteObjectsCommand, + GetObjectCommand, + HeadObjectCommand, + ListObjectsV2Command, + PutObjectCommand, + S3Client, +} from '@aws-sdk/client-s3'; +import { getSignedUrl } from '@aws-sdk/s3-request-presigner'; +import { Upload } from '@aws-sdk/lib-storage'; +import type { Readable } from 'stream'; + +import getS3 from './s3'; + +// -- Interfaces -- + +export interface UploadOpts { + contentType: string; + size?: number; + metadata?: Record; +} + +export interface StorageUploadResult { + etag: string; + versionId?: string; +} + +export interface ObjectMeta { + key: string; + size: number; + etag: string; + lastModified: Date; + contentType?: string; +} + +export interface StorageProvider { + upload(key: string, stream: Readable, opts: UploadOpts): Promise; + download(key: string): Promise; + delete(key: string): Promise; + deleteMany(keys: string[]): Promise; + head(key: string): Promise; + presignGet(key: string, expiresIn: number): Promise; + presignPut(key: string, expiresIn: number, contentType: string): Promise; + listPrefix(prefix: string): AsyncIterable; +} + +// -- S3 Implementation -- + +export interface S3StorageProviderOptions { + bucket: string; + awsRegion: string; + awsAccessKey: string; + awsSecretKey: string; + minioEndpoint?: string; + provider?: 'minio' | 's3'; +} + +export class S3StorageProvider implements StorageProvider { + private client: S3Client; + private bucket: string; + + constructor(opts: S3StorageProviderOptions) { + this.bucket = opts.bucket; + this.client = getS3({ + awsRegion: opts.awsRegion, + awsAccessKey: opts.awsAccessKey, + awsSecretKey: opts.awsSecretKey, + minioEndpoint: opts.minioEndpoint, + provider: opts.provider, + }); + } + + async upload(key: string, stream: Readable, opts: UploadOpts): Promise { + const upload = new Upload({ + client: this.client, + params: { + Bucket: this.bucket, + Key: key, + Body: stream, + ContentType: opts.contentType, + ...(opts.metadata ? { Metadata: opts.metadata } : {}), + }, + }); + + const result = await upload.done(); + return { + etag: result.ETag?.replace(/"/g, '') || '', + versionId: result.VersionId, + }; + } + + async download(key: string): Promise { + const result = await this.client.send( + new GetObjectCommand({ Bucket: this.bucket, Key: key }) + ); + return result.Body as Readable; + } + + async delete(key: string): Promise { + await this.client.send( + new DeleteObjectCommand({ Bucket: this.bucket, Key: key }) + ); + } + + async deleteMany(keys: string[]): Promise { + if (keys.length === 0) return; + + // DeleteObjectsCommand supports max 1000 keys per request + for (let i = 0; i < keys.length; i += 1000) { + const batch = keys.slice(i, i + 1000); + await this.client.send( + new DeleteObjectsCommand({ + Bucket: this.bucket, + Delete: { + Objects: batch.map((key) => ({ Key: key })), + Quiet: true, + }, + }) + ); + } + } + + async head(key: string): Promise { + const result = await this.client.send( + new HeadObjectCommand({ Bucket: this.bucket, Key: key }) + ); + return { + key, + size: result.ContentLength || 0, + etag: result.ETag?.replace(/"/g, '') || '', + lastModified: result.LastModified || new Date(), + contentType: result.ContentType, + }; + } + + async presignGet(key: string, expiresIn: number): Promise { + return getSignedUrl( + this.client as any, + new GetObjectCommand({ Bucket: this.bucket, Key: key }), + { expiresIn } + ); + } + + async presignPut(key: string, expiresIn: number, contentType: string): Promise { + return getSignedUrl( + this.client as any, + new PutObjectCommand({ + Bucket: this.bucket, + Key: key, + ContentType: contentType, + }), + { expiresIn } + ); + } + + async *listPrefix(prefix: string): AsyncIterable { + let continuationToken: string | undefined; + + do { + const result = await this.client.send( + new ListObjectsV2Command({ + Bucket: this.bucket, + Prefix: prefix, + ContinuationToken: continuationToken, + }) + ); + + for (const obj of result.Contents || []) { + yield { + key: obj.Key || '', + size: obj.Size || 0, + etag: obj.ETag?.replace(/"/g, '') || '', + lastModified: obj.LastModified || new Date(), + }; + } + + continuationToken = result.IsTruncated + ? result.NextContinuationToken + : undefined; + } while (continuationToken); + } + + destroy(): void { + this.client.destroy(); + } +} From a24f5a14bc4665abc5fed200c224d964240ebd3d Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 09:41:45 +0800 Subject: [PATCH 02/15] e2e tests --- .../__tests__/object-store-lifecycle.test.ts | 875 ++++++++++++++++++ migrations/__tests__/object-store-rls.test.ts | 733 +++++++++++++++ migrations/jest.config.js | 18 + migrations/object_store.sql | 42 +- migrations/package.json | 12 + pnpm-lock.yaml | 6 + pnpm-workspace.yaml | 1 + 7 files changed, 1682 insertions(+), 5 deletions(-) create mode 100644 migrations/__tests__/object-store-lifecycle.test.ts create mode 100644 migrations/__tests__/object-store-rls.test.ts create mode 100644 migrations/jest.config.js create mode 100644 migrations/package.json diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts new file mode 100644 index 000000000..be2b7ac80 --- /dev/null +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -0,0 +1,875 @@ +jest.setTimeout(60000); + +import { resolve } from 'path'; + +import { getConnections, PgTestClient, seed } from 'pgsql-test'; + +const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); + +const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; + +let pg: PgTestClient; +let teardown: () => Promise; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function switchRole( + role: string, + context: Record = {} +) { + await pg.query(`SET LOCAL ROLE ${role}`); + for (const [key, value] of Object.entries(context)) { + await pg.query('SELECT set_config($1, $2, true)', [key, value]); + } +} + +/** Read all recorded jobs from the job_log table */ +async function getJobLog() { + const result = await pg.query( + 'SELECT identifier, payload, job_key FROM _test_job_log ORDER BY logged_at' + ); + return result.rows; +} + +async function clearJobLog() { + await pg.query('DELETE FROM _test_job_log'); +} + +// --------------------------------------------------------------------------- +// Setup +// --------------------------------------------------------------------------- + +beforeAll(async () => { + ({ pg, teardown } = await getConnections( + {}, + [seed.sqlfile([MIGRATION_PATH])] + )); + + // Ensure anonymous role exists + await pg.query(` + DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anonymous') THEN + CREATE ROLE anonymous NOLOGIN; + END IF; + END $$ + `); + + // Grants needed for isolated test (normally from pgpm extension deploy) + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); + + // Replace the app_jobs.add_job stub with one that records calls + await pg.query(` + CREATE TABLE _test_job_log ( + logged_at timestamptz NOT NULL DEFAULT now(), + identifier text NOT NULL, + payload json, + job_key text + ) + `); + + await pg.query(` + CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL + ) RETURNS void AS $$ + BEGIN + INSERT INTO _test_job_log (identifier, payload, job_key) + VALUES (identifier, payload, job_key); + END; + $$ LANGUAGE plpgsql + `); + + // Grant app_jobs access to roles that trigger job-enqueuing functions. + // In production, the database-jobs pgpm module handles these grants. + await pg.query('GRANT USAGE ON SCHEMA app_jobs TO authenticated, service_role'); + await pg.query('GRANT EXECUTE ON FUNCTION app_jobs.add_job(text, json, text, timestamptz, integer, text, integer, text[]) TO authenticated, service_role'); + await pg.query('GRANT INSERT ON _test_job_log TO authenticated, service_role'); + + // Seed a default bucket + await pg.query(` + INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + VALUES (1, 'default', 'Default Bucket', false, '{}') + `); +}); + +afterAll(async () => { + await teardown(); +}); + +// ========================================================================== +// E2E-01: Full Upload Lifecycle (happy path) +// ========================================================================== + +describe('E2E-01: Upload Lifecycle -- happy path', () => { + const ORIGIN_ID = '10000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/abc123_origin'; + const VERSION_THUMB_KEY = '1/default/abc123_thumb'; + const VERSION_LARGE_KEY = '1/default/abc123_large'; + + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('step 1: user uploads file → status=pending, process-image job queued', async () => { + // Authenticated user inserts a file (simulates upload endpoint) + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + // Verify file exists with pending status + const file = await pg.query( + 'SELECT * FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rowCount).toBe(1); + expect(file.rows[0].status).toBe('pending'); + expect(file.rows[0].created_by).toBe(USER_A); + + // Verify process-image job was queued (read job log as superuser) + await pg.query('RESET ROLE'); + const jobs = await getJobLog(); + expect(jobs).toEqual([ + expect.objectContaining({ + identifier: 'process-image', + job_key: `file:${ORIGIN_ID}`, + }), + ]); + const payload = jobs[0].payload; + expect(payload.file_id).toBe(ORIGIN_ID); + expect(payload.database_id).toBe(1); + }); + + it('step 2: service_role transitions pending → processing', async () => { + // Insert as superuser first + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'pending') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + // Service role picks up the job + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + // Verify processing_started_at is set + await pg.query('RESET ROLE'); + const file = await pg.query( + 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe('processing'); + expect(file.rows[0].processing_started_at).not.toBeNull(); + }); + + it('step 3: service_role inserts version rows (status=ready, bypasses job trigger)', async () => { + // Setup: origin in processing state + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + // Service role creates version rows with status='ready' + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + VALUES + (1, $1, 'default', $2, 'etag-thumb', 'ready'), + (1, $3, 'default', $2, 'etag-large', 'ready') + `, [VERSION_THUMB_KEY, USER_A, VERSION_LARGE_KEY]); + + // Version rows with status='ready' should NOT trigger process-image + await pg.query('RESET ROLE'); + const jobs = await getJobLog(); + expect(jobs).toHaveLength(0); + + // Verify all three rows exist + const files = await pg.query( + `SELECT key, status FROM object_store_public.files WHERE database_id = 1 ORDER BY key` + ); + expect(files.rowCount).toBe(3); + expect(files.rows.map((r: any) => ({ key: r.key, status: r.status }))).toEqual([ + { key: VERSION_LARGE_KEY, status: 'ready' }, + { key: ORIGIN_KEY, status: 'processing' }, + { key: VERSION_THUMB_KEY, status: 'ready' }, + ]); + }); + + it('step 4: service_role transitions origin processing → ready', async () => { + // Setup: origin in processing state + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + // Verify status and processing_started_at cleared + await pg.query('RESET ROLE'); + const file = await pg.query( + 'SELECT status, processing_started_at, updated_at, created_at FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe('ready'); + expect(file.rows[0].processing_started_at).toBeNull(); + // updated_at should be refreshed + expect(new Date(file.rows[0].updated_at).getTime()) + .toBeGreaterThanOrEqual(new Date(file.rows[0].created_at).getTime()); + }); + + it('step 5: user sees origin + versions after processing completes', async () => { + // Setup: origin ready + 2 version rows ready + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), + (gen_random_uuid(), 1, $3, 'default', $4, 'etag-thumb', 'ready'), + (gen_random_uuid(), 1, $5, 'default', $4, 'etag-large', 'ready') + `, [ORIGIN_ID, ORIGIN_KEY, VERSION_THUMB_KEY, USER_A, VERSION_LARGE_KEY]); + + // Authenticated user queries + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const files = await pg.query( + `SELECT key, status FROM object_store_public.files WHERE key LIKE '1/default/abc123%' ORDER BY key` + ); + expect(files.rowCount).toBe(3); + expect(files.rows.every((r: any) => r.status === 'ready')).toBe(true); + }); +}); + +// ========================================================================== +// E2E-02: Error + Retry Path +// ========================================================================== + +describe('E2E-02: Error + Retry Path', () => { + const ORIGIN_ID = '20000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/err123_origin'; + + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('processing → error stores status_reason', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'processing') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files + SET status = 'error', status_reason = 'sharp: unsupported image format' + WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + await pg.query('RESET ROLE'); + const file = await pg.query( + 'SELECT status, status_reason, processing_started_at FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe('error'); + expect(file.rows[0].status_reason).toBe('sharp: unsupported image format'); + // processing_started_at cleared on exit from processing + expect(file.rows[0].processing_started_at).toBeNull(); + }); + + it('error → pending (retry) re-queues process-image job', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + // Verify retry job queued + await pg.query('RESET ROLE'); + const jobs = await getJobLog(); + expect(jobs).toEqual([ + expect.objectContaining({ + identifier: 'process-image', + job_key: `file:${ORIGIN_ID}`, + }), + ]); + }); + + it('full retry cycle: pending → processing → error → pending → processing → ready', async () => { + // Step 1: upload (pending) + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + VALUES ($1, 1, $2, 'default', $3, 'etag') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await clearJobLog(); + + // Step 2: processing + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + [ORIGIN_ID] + ); + let file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('processing'); + expect(file.rows[0].processing_started_at).not.toBeNull(); + + // Step 3: error + await pg.query( + `UPDATE object_store_public.files SET status = 'error', status_reason = 'timeout' WHERE id = $1`, + [ORIGIN_ID] + ); + file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('error'); + expect(file.rows[0].processing_started_at).toBeNull(); + + // Step 4: retry (error → pending) — should re-queue job + await pg.query( + `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1`, + [ORIGIN_ID] + ); + let jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('process-image'); + + // Step 5: processing again + await clearJobLog(); + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + [ORIGIN_ID] + ); + + // Step 6: ready + await pg.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1`, + [ORIGIN_ID] + ); + file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('ready'); + expect(file.rows[0].processing_started_at).toBeNull(); + }); +}); + +// ========================================================================== +// E2E-03: Deletion Flow +// ========================================================================== + +describe('E2E-03: Deletion Flow', () => { + const ORIGIN_ID = '30000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/del123_origin'; + const VERSION_KEY = '1/default/del123_thumb'; + + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('ready → deleting queues delete_s3_object job', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'ready') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting' WHERE id = $1`, + [ORIGIN_ID] + ); + + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete_s3_object'); + expect(jobs[0].job_key).toBe(`delete:${ORIGIN_ID}`); + expect(jobs[0].payload.key).toBe(ORIGIN_KEY); + }); + + it('deleting origin + version rows each queue separate jobs', async () => { + const VERSION_ID = '30000000-0000-0000-0000-000000000002'; + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), + ($3, 1, $5, 'default', $4, 'etag-thumb', 'ready') + `, [ORIGIN_ID, ORIGIN_KEY, VERSION_ID, USER_A, VERSION_KEY]); + await clearJobLog(); + + // Delete both + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting' + WHERE database_id = 1 AND key LIKE '1/default/del123%'` + ); + + const jobs = await getJobLog(); + expect(jobs).toHaveLength(2); + const keys = jobs.map((j: any) => j.payload.key).sort(); + expect(keys).toEqual([ORIGIN_KEY, VERSION_KEY]); + }); + + it('error → deleting is valid (skip processing on permanent failure)', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting', status_reason = 'user cancelled' + WHERE id = $1`, + [ORIGIN_ID] + ); + + const file = await pg.query('SELECT status, status_reason FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('deleting'); + expect(file.rows[0].status_reason).toBe('user cancelled'); + + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete_s3_object'); + }); + + it('service_role can hard-DELETE after marking as deleting', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'deleting') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query( + 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = 1', + [ORIGIN_ID] + ); + expect(result.rowCount).toBe(1); + + // Verify gone + await pg.query('RESET ROLE'); + const check = await pg.query( + 'SELECT * FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(check.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// E2E-04: State Machine Validation +// ========================================================================== + +describe('E2E-04: State Machine Validation', () => { + const ORIGIN_ID = '40000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/sm123_origin'; + + beforeEach(async () => { + await pg.beforeEach(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + const invalidTransitions = [ + ['pending', 'ready'], + ['pending', 'deleting'], + ['processing', 'pending'], + ['ready', 'pending'], + ['ready', 'processing'], + ['ready', 'error'], + ['error', 'processing'], + ['error', 'ready'], + ]; + + it.each(invalidTransitions)( + 'rejects %s → %s', + async (from, to) => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', $4) + `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); + + await expect( + pg.query( + `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + [to, ORIGIN_ID] + ) + ).rejects.toThrow(/Invalid status transition/); + } + ); + + const validTransitions = [ + ['pending', 'processing'], + ['pending', 'error'], + ['processing', 'ready'], + ['processing', 'error'], + ['processing', 'deleting'], + ['ready', 'deleting'], + ['error', 'deleting'], + ['error', 'pending'], + ]; + + it.each(validTransitions)( + 'allows %s → %s', + async (from, to) => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', $4) + `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); + + await pg.query( + `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + [to, ORIGIN_ID] + ); + + const file = await pg.query( + 'SELECT status FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe(to); + } + ); +}); + +// ========================================================================== +// E2E-05: Constraints +// ========================================================================== + +describe('E2E-05: Constraints', () => { + beforeEach(async () => { + await pg.beforeEach(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('rejects empty key', async () => { + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '', 'default', 'x') + `) + ).rejects.toThrow(/files_key_not_empty/); + }); + + it('rejects key exceeding 1024 chars', async () => { + const longKey = '1/default/' + 'a'.repeat(1020); + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, $1, 'default', 'x') + `, [longKey]) + ).rejects.toThrow(/files_key_max_length/); + }); + + it('rejects invalid bucket_key format', async () => { + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/BAD/test_origin', 'BAD-BUCKET', 'x') + `) + ).rejects.toThrow(/files_bucket_key_format/); + }); + + it('rejects partial source reference (source_table without source_column)', async () => { + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag, source_table) + VALUES (1, '1/default/partial_origin', 'default', 'x', 'some_schema.some_table') + `) + ).rejects.toThrow(/files_source_complete/); + }); + + it('accepts complete source reference', async () => { + const result = await pg.query(` + INSERT INTO object_store_public.files + (database_id, key, bucket_key, etag, source_table, source_column, source_id) + VALUES (1, '1/default/ref_origin', 'default', 'x', + 'some_schema.some_table', 'image', gen_random_uuid()) + RETURNING source_table, source_column, source_id + `); + expect(result.rowCount).toBe(1); + expect(result.rows[0].source_table).toBe('some_schema.some_table'); + }); + + it('enforces unique key per tenant', async () => { + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/dup_origin', 'default', 'e1') + `); + + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/dup_origin', 'default', 'e2') + `) + ).rejects.toThrow(/files_key_unique/); + }); + + it('allows same key in different tenants', async () => { + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/shared_origin', 'default', 'e1') + `); + + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (2, '1/default/shared_origin', 'default', 'e2') + RETURNING * + `); + expect(result.rowCount).toBe(1); + }); +}); + +// ========================================================================== +// E2E-06: Full E2E -- upload through versions processed (under RLS) +// ========================================================================== + +describe('E2E-06: Full lifecycle under RLS', () => { + const ORIGIN_KEY = '1/default/full_e2e_origin'; + const THUMB_KEY = '1/default/full_e2e_thumb'; + const LARGE_KEY = '1/default/full_e2e_large'; + + beforeEach(async () => { + await pg.beforeEach(); + await pg.query(` + INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + VALUES (1, 'default', 'Default Bucket', false, '{}') + ON CONFLICT DO NOTHING + `); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('complete upload → process → versions → ready → visible → delete', async () => { + // --------------------------------------------------------------- + // 1. User uploads an image (INSERT as authenticated) + // --------------------------------------------------------------- + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + VALUES (1, $1, 'default', $2, 'etag-origin') + `, [ORIGIN_KEY, USER_A]); + + // Verify: user sees their pending file + let myFiles = await pg.query( + `SELECT key, status FROM object_store_public.files WHERE key = $1`, + [ORIGIN_KEY] + ); + expect(myFiles.rowCount).toBe(1); + expect(myFiles.rows[0].status).toBe('pending'); + + // Verify: process-image job queued + await pg.query('RESET ROLE'); + let jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('process-image'); + + // Get the origin ID for later + const originRow = await pg.query( + `SELECT id FROM object_store_public.files WHERE key = $1 AND database_id = 1`, + [ORIGIN_KEY] + ); + const originId = originRow.rows[0].id; + + // --------------------------------------------------------------- + // 2. Job worker picks up → pending → processing (as service_role) + // --------------------------------------------------------------- + await clearJobLog(); + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + [originId] + ); + + await pg.query('RESET ROLE'); + let origin = await pg.query( + 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + [originId] + ); + expect(origin.rows[0].status).toBe('processing'); + expect(origin.rows[0].processing_started_at).not.toBeNull(); + + // --------------------------------------------------------------- + // 3. Processor creates version rows (thumb + large) + // --------------------------------------------------------------- + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + VALUES + (1, $1, 'default', $2, 'etag-thumb', 'ready'), + (1, $3, 'default', $2, 'etag-large', 'ready') + `, [THUMB_KEY, USER_A, LARGE_KEY]); + + // No additional jobs should be queued (version rows are ready, not pending) + await pg.query('RESET ROLE'); + jobs = await getJobLog(); + expect(jobs).toHaveLength(0); + + // --------------------------------------------------------------- + // 4. Processor marks origin as ready + // --------------------------------------------------------------- + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + [originId] + ); + + await pg.query('RESET ROLE'); + origin = await pg.query( + 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + [originId] + ); + expect(origin.rows[0].status).toBe('ready'); + expect(origin.rows[0].processing_started_at).toBeNull(); + + // --------------------------------------------------------------- + // 5. User can see all 3 files (origin + 2 versions) + // --------------------------------------------------------------- + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const allFiles = await pg.query( + `SELECT key, status FROM object_store_public.files + WHERE key LIKE '1/default/full_e2e%' + ORDER BY key` + ); + expect(allFiles.rowCount).toBe(3); + expect(allFiles.rows).toEqual([ + { key: LARGE_KEY, status: 'ready' }, + { key: ORIGIN_KEY, status: 'ready' }, + { key: THUMB_KEY, status: 'ready' }, + ]); + + // --------------------------------------------------------------- + // 6. Deletion: mark all as deleting (as service_role) + // --------------------------------------------------------------- + await pg.query('RESET ROLE'); + await clearJobLog(); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting' + WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` + ); + + // All 3 deletion jobs queued + await pg.query('RESET ROLE'); + jobs = await getJobLog(); + expect(jobs).toHaveLength(3); + expect(jobs.every((j: any) => j.identifier === 'delete_s3_object')).toBe(true); + const deletedKeys = jobs.map((j: any) => j.payload.key).sort(); + expect(deletedKeys).toEqual([LARGE_KEY, ORIGIN_KEY, THUMB_KEY]); + + // --------------------------------------------------------------- + // 7. Cleanup worker hard-deletes rows + // --------------------------------------------------------------- + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const deleted = await pg.query( + `DELETE FROM object_store_public.files + WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` + ); + expect(deleted.rowCount).toBe(3); + + // Verify: no files remain + await pg.query('RESET ROLE'); + const remaining = await pg.query( + `SELECT * FROM object_store_public.files WHERE key LIKE '1/default/full_e2e%'` + ); + expect(remaining.rowCount).toBe(0); + }); +}); diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts new file mode 100644 index 000000000..d6b2d9a85 --- /dev/null +++ b/migrations/__tests__/object-store-rls.test.ts @@ -0,0 +1,733 @@ +jest.setTimeout(60000); + +import { resolve } from 'path'; + +import { getConnections, PgTestClient, seed } from 'pgsql-test'; + +const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); + +const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; +const USER_B = 'bbbbbbbb-0000-0000-0000-000000000002'; +const USER_C = 'cccccccc-0000-0000-0000-000000000003'; + +let pg: PgTestClient; +let teardown: () => Promise; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function switchRole( + role: string, + context: Record = {} +) { + await pg.query(`SET LOCAL ROLE ${role}`); + for (const [key, value] of Object.entries(context)) { + await pg.query('SELECT set_config($1, $2, true)', [key, value]); + } +} + +async function insertBuckets() { + await pg.query(` + INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + VALUES + (1, 'default', 'Default Bucket', false, '{}'), + (1, 'public-assets', 'Public Assets', true, '{}'), + (2, 'default', 'Default Bucket (Tenant 2)', false, '{}') + `); +} + +async function insertFixtures() { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('11111111-0000-0000-0000-000000000001', 1, 'default', '1/default/aaa_origin', 'ready', $1, 'etag1'), + ('11111111-0000-0000-0000-000000000002', 1, 'default', '1/default/bbb_origin', 'pending', $1, 'etag2'), + ('11111111-0000-0000-0000-000000000003', 1, 'default', '1/default/ccc_origin', 'processing', $1, 'etag3'), + ('11111111-0000-0000-0000-000000000004', 1, 'default', '1/default/ddd_origin', 'error', $1, 'etag4') + `, [USER_A]); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('22222222-0000-0000-0000-000000000001', 1, 'default', '1/default/eee_origin', 'ready', $1, 'etag5'), + ('22222222-0000-0000-0000-000000000002', 1, 'default', '1/default/fff_origin', 'pending', $1, 'etag6') + `, [USER_B]); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('33333333-0000-0000-0000-000000000001', 1, 'public-assets', '1/public-assets/ggg_origin', 'ready', $1, 'etag7'), + ('33333333-0000-0000-0000-000000000002', 1, 'public-assets', '1/public-assets/hhh_origin', 'pending', $1, 'etag8') + `, [USER_A]); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('44444444-0000-0000-0000-000000000001', 2, 'default', '2/default/iii_origin', 'ready', $1, 'etag9') + `, [USER_C]); +} + +// --------------------------------------------------------------------------- +// Setup +// --------------------------------------------------------------------------- + +beforeAll(async () => { + ({ pg, teardown } = await getConnections( + {}, + [seed.sqlfile([MIGRATION_PATH])] + )); + + // Ensure anonymous role exists (cluster-wide, idempotent) + await pg.query(` + DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anonymous') THEN + CREATE ROLE anonymous NOLOGIN; + END IF; + END $$ + `); + + // The migration assumes object_store_public schema USAGE is already granted + // (from the original object-store pgpm extension). In isolation, grant explicitly. + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO anonymous'); + + // Grant SELECT on buckets to roles that need it for the public_bucket_read policy subquery. + // Without this, the EXISTS subquery in files_public_bucket_read fails with + // "permission denied for table buckets". + await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO anonymous'); +}); + +afterAll(async () => { + await teardown(); +}); + +// ========================================================================== +// RLS-07: Superuser Bypass (negative control -- run first) +// ========================================================================== + +describe('RLS-07: Superuser Bypass', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-07a: superuser sees all tenants', async () => { + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(9); + }); + + it('RLS-07b: superuser can INSERT into any tenant', async () => { + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (999, '999/default/su_origin', 'default', 'su-etag') + RETURNING id + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-07c: superuser can DELETE any row', async () => { + const result = await pg.query( + 'DELETE FROM object_store_public.files WHERE database_id = 2' + ); + expect(result.rowCount).toBeGreaterThan(0); + }); +}); + +// ========================================================================== +// RLS-01: Tenant Isolation (authenticated) +// ========================================================================== + +describe('RLS-01: Tenant Isolation', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + // RLS-01d runs FIRST so app.database_id has never been set in this session. + // current_setting('app.database_id') without missing_ok raises "unrecognized". + it('RLS-01d: missing app.database_id raises error', async () => { + await switchRole('authenticated'); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/app\.database_id|invalid input syntax for type integer/); + }); + + it('RLS-01a: SELECT scoped to own tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); + expect(result.rows.find((r: any) => r.database_id === 2)).toBeUndefined(); + }); + + it('RLS-01b: INSERT rejected for wrong tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + VALUES (2, 'default', '2/default/bad_origin', $1, 'bad-etag') + `, [USER_A]) + ).rejects.toThrow(/row-level security/i); + }); + + it('RLS-01c: UPDATE rejected for wrong tenant (0 rows)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'test' + WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 + `); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-02: Visibility (authenticated) +// ========================================================================== + +describe('RLS-02: Visibility', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-02a: User A sees own files in all statuses', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE created_by = $1', + [USER_A] + ); + expect(result.rowCount).toBe(6); + }); + + it('RLS-02b: User A sees other users\' ready files only', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE created_by = $1', + [USER_B] + ); + expect(result.rowCount).toBe(1); + expect(result.rows[0].status).toBe('ready'); + }); + + it('RLS-02c: User B sees own pending files', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_B, + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE created_by = $1', + [USER_B] + ); + expect(result.rowCount).toBe(2); + }); + + it('RLS-02d: User B cannot see User A\'s non-ready files', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_B, + }); + + const result = await pg.query( + `SELECT * FROM object_store_public.files + WHERE created_by = $1 AND status != 'ready'`, + [USER_A] + ); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-03: INSERT/UPDATE Permissions (authenticated) +// ========================================================================== + +describe('RLS-03: INSERT/UPDATE Permissions', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-03a: INSERT succeeds with correct tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + VALUES (1, 'default', '1/default/new_origin', $1, 'newtag') + RETURNING * + `, [USER_A]); + expect(result.rowCount).toBe(1); + expect(result.rows[0].status).toBe('pending'); + }); + + it('RLS-03b: UPDATE own file succeeds', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'user note' + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-03c: DELETE denied (no DELETE grant)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await expect( + pg.query(` + DELETE FROM object_store_public.files + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `) + ).rejects.toThrow(/permission denied/i); + }); + + it('RLS-03d: UPDATE invisible file (other user\'s pending) -- 0 rows', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'hacked' + WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 + `); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-04: Anonymous -- No Access +// ========================================================================== + +describe('RLS-04: Anonymous -- No Access', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-04a: SELECT denied', async () => { + await switchRole('anonymous', { 'app.database_id': '1' }); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/permission denied/i); + }); + + it('RLS-04b: INSERT denied', async () => { + await switchRole('anonymous', { 'app.database_id': '1' }); + + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/anon_origin', 'default', 'x') + `) + ).rejects.toThrow(/permission denied/i); + }); + + it('RLS-04c: public bucket policy works with temporary GRANT', async () => { + // Temporarily grant SELECT to anonymous (rolled back in afterEach) + await pg.query('GRANT SELECT ON object_store_public.files TO anonymous'); + + await switchRole('anonymous', { 'app.database_id': '1' }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + + // Anonymous only has files_public_bucket_read (files_visibility is TO authenticated). + // Should see only public-assets bucket + ready status. + expect(result.rows.length).toBe(1); + expect(result.rows[0].id).toBe('33333333-0000-0000-0000-000000000001'); + expect(result.rows[0].bucket_key).toBe('public-assets'); + expect(result.rows[0].status).toBe('ready'); + }); +}); + +// ========================================================================== +// RLS-05: Administrator Override +// ========================================================================== + +describe('RLS-05: Administrator Override', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-05a: admin sees all files in tenant regardless of status/creator', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(8); + expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); + }); + + it('RLS-05b: admin sees other users\' pending/error files', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + SELECT * FROM object_store_public.files + WHERE status IN ('pending', 'error') + `); + expect(result.rowCount).toBe(4); + }); + + it('RLS-05c: admin can UPDATE any file in tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'admin override' + WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-05d: admin still cannot access other tenants', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE database_id = 2' + ); + expect(result.rowCount).toBe(0); + }); + + it('RLS-05e: admin DELETE still denied (no DELETE grant on authenticated)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await expect( + pg.query(` + DELETE FROM object_store_public.files + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `) + ).rejects.toThrow(/permission denied/i); + }); +}); + +// ========================================================================== +// RLS-06: service_role -- Full Access Including DELETE +// ========================================================================== + +describe('RLS-06: service_role', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-06a: service_role sees all files in tenant (with admin override)', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBeGreaterThanOrEqual(8); + expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); + }); + + it('RLS-06b: service_role with app.role=administrator sees all', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(8); + }); + + it('RLS-06c: service_role without app.role sees only ready (visibility gap)', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + // Without app.role and without app.user_id, visibility policy reduces to + // status = 'ready' (NULLIF on empty user_id → NULL → created_by check is NULL). + // Expect ready files in tenant 1: 111...01, 222...01, 333...01 = 3 + expect(result.rowCount).toBe(3); + expect(result.rows.every((r: any) => r.status === 'ready')).toBe(true); + }); + + it('RLS-06d: service_role can DELETE files', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + DELETE FROM object_store_public.files + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-06e: service_role cannot DELETE cross-tenant', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + DELETE FROM object_store_public.files + WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 + `); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-08: Buckets Table Access +// ========================================================================== + +describe('RLS-08: Buckets Table Access', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-08a: authenticated role can read buckets (GRANT added for policy subquery)', async () => { + await switchRole('authenticated', { 'app.database_id': '1' }); + + const result = await pg.query('SELECT * FROM object_store_public.buckets'); + expect(result.rowCount).toBeGreaterThan(0); + }); + + it('RLS-08b: service_role can read buckets (GRANT added for policy subquery)', async () => { + await switchRole('service_role', { 'app.database_id': '1' }); + + const result = await pg.query('SELECT * FROM object_store_public.buckets'); + expect(result.rowCount).toBeGreaterThan(0); + }); +}); + +// ========================================================================== +// RLS-09: Edge Cases +// ========================================================================== + +describe('RLS-09: Edge Cases', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-09a: app.database_id type mismatch', async () => { + await switchRole('authenticated', { + 'app.database_id': 'not-a-number', + 'app.user_id': USER_A, + }); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/invalid input syntax for type integer/); + }); + + it('RLS-09b: app.user_id type mismatch', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': 'not-a-uuid', + }); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/invalid input syntax for type uuid/); + }); + + it('RLS-09c: empty tenant (no files for database_id=999)', async () => { + await switchRole('authenticated', { + 'app.database_id': '999', + 'app.user_id': USER_A, + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(0); + }); + + it('RLS-09d: INSERT with mismatched created_by (spoofing)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + // created_by is NOT enforced by RLS -- application layer must set it correctly. + // Note: RETURNING * would fail here because SELECT policies block reading + // the row back (created_by=USER_B != app.user_id=USER_A and status='pending'). + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + VALUES (1, '1/default/spoof_origin', 'default', $1, 'x') + `, [USER_B]); + expect(result.rowCount).toBe(1); + + // Verify the spoofed created_by was persisted by reading as superuser + await pg.query('RESET ROLE'); + const verify = await pg.query( + `SELECT created_by FROM object_store_public.files WHERE key = '1/default/spoof_origin'` + ); + expect(verify.rows[0].created_by).toBe(USER_B); + }); + + it('RLS-09e: multiple policies combine with OR for SELECT', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + 'app.role': 'authenticated', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + // Policies: RESTRICTIVE(tenant_isolation) AND PERMISSIVE(visibility OR public_bucket_read OR admin_override) + // User A sees: own files (all 6) + User B's ready file (1) = 7 + // (User B's pending file is invisible; admin_override is false) + expect(result.rowCount).toBe(7); + }); +}); + +// ========================================================================== +// RLS-10: State Machine with RLS +// ========================================================================== + +describe('RLS-10: State Machine with RLS', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-10a: authenticated user can transition own file pending->processing', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status = 'processing' + WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 + RETURNING * + `); + expect(result.rowCount).toBe(1); + expect(result.rows[0].processing_started_at).not.toBeNull(); + }); + + it('RLS-10b: authenticated user cannot transition other\'s pending file (invisible)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status = 'processing' + WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 + `); + expect(result.rowCount).toBe(0); + }); + + it('RLS-10c: invalid transition still raises under RLS', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await expect( + pg.query(` + UPDATE object_store_public.files + SET status = 'deleting' + WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 + `) + ).rejects.toThrow(/Invalid status transition from pending to deleting/); + }); +}); diff --git a/migrations/jest.config.js b/migrations/jest.config.js new file mode 100644 index 000000000..230290906 --- /dev/null +++ b/migrations/jest.config.js @@ -0,0 +1,18 @@ +/** @type {import('ts-jest').JestConfigWithTsJest} */ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + transform: { + '^.+\\.tsx?$': [ + 'ts-jest', + { + babelConfig: false, + tsconfig: '../tsconfig.json', + }, + ], + }, + transformIgnorePatterns: [`/node_modules/*`], + testRegex: '(/__tests__/.*|(\\.|/)(test|spec))\\.(jsx?|tsx?)$', + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + modulePathIgnorePatterns: ['dist/*'], +}; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index b370ffdca..658b7169e 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -302,21 +302,28 @@ COMMENT ON TRIGGER files_after_update_queue_retry ON object_store_public.files I ALTER TABLE object_store_public.files ENABLE ROW LEVEL SECURITY; ALTER TABLE object_store_public.files FORCE ROW LEVEL SECURITY; --- Policy 1: Tenant isolation (all operations, all authenticated roles) +-- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) +-- Without this being RESTRICTIVE, permissive policies would OR together and +-- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). CREATE POLICY files_tenant_isolation ON object_store_public.files + AS RESTRICTIVE FOR ALL USING (database_id = current_setting('app.database_id')::integer) WITH CHECK (database_id = current_setting('app.database_id')::integer); --- Policy 2: Creator-only for non-ready files (SELECT) +-- Policy 2: Visibility for SELECT (authenticated + service_role only) +-- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling +-- when app.user_id is missing or empty (returns NULL instead of cast error). +-- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. CREATE POLICY files_visibility ON object_store_public.files FOR SELECT + TO authenticated, service_role USING ( status = 'ready' - OR created_by = current_setting('app.user_id')::uuid + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid ); --- Policy 3: Public bucket read (SELECT, for anonymous access) +-- Policy 3: Public bucket read for SELECT (all roles including anonymous) CREATE POLICY files_public_bucket_read ON object_store_public.files FOR SELECT USING ( @@ -329,12 +336,37 @@ CREATE POLICY files_public_bucket_read ON object_store_public.files AND status = 'ready' ); --- Policy 4: Admin override (all operations) +-- Policy 4: Admin override (all operations, authenticated + service_role) CREATE POLICY files_admin_override ON object_store_public.files FOR ALL + TO authenticated, service_role USING (current_setting('app.role', true) = 'administrator') WITH CHECK (current_setting('app.role', true) = 'administrator'); +-- Policy 5: INSERT access (permissive base so non-admin users can insert) +CREATE POLICY files_insert_access ON object_store_public.files + FOR INSERT + TO authenticated, service_role + WITH CHECK (true); + +-- Policy 6: UPDATE access (replicates visibility for row targeting) +-- Non-admin users can only update rows they can see (ready or own). +-- Admin override policy covers admin UPDATE access separately. +CREATE POLICY files_update_access ON object_store_public.files + FOR UPDATE + TO authenticated, service_role + USING ( + status = 'ready' + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid + ) + WITH CHECK (true); + +-- Policy 7: DELETE access (service_role only, grants already restrict authenticated) +CREATE POLICY files_delete_access ON object_store_public.files + FOR DELETE + TO service_role + USING (true); + -- Grants GRANT SELECT, INSERT, UPDATE ON object_store_public.files TO authenticated; GRANT SELECT, INSERT, UPDATE, DELETE ON object_store_public.files TO service_role; diff --git a/migrations/package.json b/migrations/package.json new file mode 100644 index 000000000..a5e831e03 --- /dev/null +++ b/migrations/package.json @@ -0,0 +1,12 @@ +{ + "name": "@constructive/migrations", + "version": "0.0.1", + "private": true, + "scripts": { + "test": "jest", + "test:watch": "jest --watch" + }, + "devDependencies": { + "pgsql-test": "workspace:^" + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 6ea1ca196..a915c7516 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1647,6 +1647,12 @@ importers: version: link:../../postgres/pgsql-test/dist publishDirectory: dist + migrations: + devDependencies: + pgsql-test: + specifier: workspace:^ + version: link:../postgres/pgsql-test/dist + packages/12factor-env: dependencies: envalid: diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index fcc12b909..0c0b8d497 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -9,3 +9,4 @@ packages: - 'graphile/*' - 'jobs/*' - 'functions/*' + - 'migrations' From 6d845c6f46b3fd5aa009a29276760570ec7b5cd3 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 09:46:55 +0800 Subject: [PATCH 03/15] fixed tests --- migrations/__tests__/object-store-rls.test.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts index d6b2d9a85..2c8c7ab86 100644 --- a/migrations/__tests__/object-store-rls.test.ts +++ b/migrations/__tests__/object-store-rls.test.ts @@ -174,6 +174,8 @@ describe('RLS-01: Tenant Isolation', () => { }); const result = await pg.query('SELECT * FROM object_store_public.files'); + // Must return rows (prevents vacuous pass on empty result from Array.every) + expect(result.rowCount).toBeGreaterThan(0); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); expect(result.rows.find((r: any) => r.database_id === 2)).toBeUndefined(); }); @@ -503,7 +505,7 @@ describe('RLS-06: service_role', () => { }); const result = await pg.query('SELECT * FROM object_store_public.files'); - expect(result.rowCount).toBeGreaterThanOrEqual(8); + expect(result.rowCount).toBe(8); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); }); @@ -575,14 +577,15 @@ describe('RLS-08: Buckets Table Access', () => { await switchRole('authenticated', { 'app.database_id': '1' }); const result = await pg.query('SELECT * FROM object_store_public.buckets'); - expect(result.rowCount).toBeGreaterThan(0); + // Buckets has no RLS -- all 3 seeded buckets visible (2 tenant 1 + 1 tenant 2) + expect(result.rowCount).toBe(3); }); it('RLS-08b: service_role can read buckets (GRANT added for policy subquery)', async () => { await switchRole('service_role', { 'app.database_id': '1' }); const result = await pg.query('SELECT * FROM object_store_public.buckets'); - expect(result.rowCount).toBeGreaterThan(0); + expect(result.rowCount).toBe(3); }); }); From f82bd177f757641e5a82f365b2df13a48796bdc6 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 11:48:20 +0800 Subject: [PATCH 04/15] added tests and debug --- .../__tests__/upload-resolver.e2e.test.ts | 240 ++++++++++++++++++ .../graphile-settings/src/upload-resolver.ts | 20 ++ 2 files changed, 260 insertions(+) create mode 100644 graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts diff --git a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts new file mode 100644 index 000000000..5965f7460 --- /dev/null +++ b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts @@ -0,0 +1,240 @@ +import { S3StorageProvider } from '@constructive-io/s3-streamer'; +import { Client as PgClient } from 'pg'; +import { Readable } from 'stream'; + +jest.setTimeout(60000); + +const SCHEMA = 'object_store_public'; +const TABLE = 'files'; +const BUCKET = 'test-bucket'; +const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; +const MINIMAL_PNG = Buffer.from( + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO6xM4cAAAAASUVORK5CYII=', + 'base64', +); + +type UploadResolverModule = typeof import('../src/upload-resolver'); + +function makePg(): PgClient { + return new PgClient({ + host: 'localhost', + port: 5432, + user: 'postgres', + password: 'password', + database: 'constructive', + }); +} + +function makeStorage(): S3StorageProvider { + return new S3StorageProvider({ + bucket: BUCKET, + awsRegion: 'us-east-1', + awsAccessKey: 'minioadmin', + awsSecretKey: 'minioadmin', + minioEndpoint: 'http://localhost:9000', + provider: 'minio', + }); +} + +async function setupObjectStoreSchema(pg: PgClient): Promise { + await pg.query('CREATE EXTENSION IF NOT EXISTS pgcrypto'); + await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); + await pg.query(` + DO $$ BEGIN + CREATE TYPE ${SCHEMA}.file_status AS ENUM ( + 'pending', 'processing', 'ready', 'error', 'deleting' + ); + EXCEPTION WHEN duplicate_object THEN NULL; + END $$ + `); + await pg.query(` + CREATE TABLE IF NOT EXISTS ${SCHEMA}.${TABLE} ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status ${SCHEMA}.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + CONSTRAINT graphile_settings_object_store_files_pkey PRIMARY KEY (id, database_id) + ) + `); +} + +async function cleanupObjectStoreRows(pg: PgClient): Promise { + await pg.query(`DELETE FROM ${SCHEMA}.${TABLE}`); +} + +async function objectExists(storage: S3StorageProvider, key: string): Promise { + try { + await storage.head(key); + return true; + } catch { + return false; + } +} + +async function loadUploadResolverModule(): Promise { + jest.resetModules(); + return import('../src/upload-resolver'); +} + +function makeUpload(filename: string, body: Buffer) { + return { + filename, + createReadStream: () => Readable.from(body), + }; +} + +describe('upload-resolver e2e', () => { + let pg: PgClient; + let storage: S3StorageProvider; + let uploadResolverModule: UploadResolverModule | null = null; + const originalEnv = { ...process.env }; + const uploadedKeys = new Set(); + + beforeAll(async () => { + process.env.UPLOAD_V2_ENABLED = 'true'; + process.env.BUCKET_PROVIDER = 'minio'; + process.env.BUCKET_NAME = BUCKET; + process.env.AWS_REGION = 'us-east-1'; + process.env.AWS_ACCESS_KEY = 'minioadmin'; + process.env.AWS_SECRET_KEY = 'minioadmin'; + process.env.MINIO_ENDPOINT = 'http://localhost:9000'; + process.env.PGHOST = 'localhost'; + process.env.PGPORT = '5432'; + process.env.PGUSER = 'postgres'; + process.env.PGPASSWORD = 'password'; + process.env.PGDATABASE = 'constructive'; + + pg = makePg(); + await pg.connect(); + storage = makeStorage(); + await setupObjectStoreSchema(pg); + }); + + afterEach(async () => { + if (uploadResolverModule) { + await uploadResolverModule.__resetUploadResolverForTests(); + uploadResolverModule = null; + } + + for (const key of uploadedKeys) { + try { + await storage.delete(key); + } catch { + // ignore cleanup failures for already-deleted objects + } + } + uploadedKeys.clear(); + + await cleanupObjectStoreRows(pg); + }); + + afterAll(async () => { + process.env = originalEnv; + await pg.end(); + storage.destroy(); + }); + + it('streams a REST upload to storage and inserts a pending files row', async () => { + uploadResolverModule = await loadUploadResolverModule(); + + const result = await uploadResolverModule.streamToStorage( + Readable.from(MINIMAL_PNG), + 'avatar.png', + { + databaseId: '1', + userId: USER_ID, + bucketKey: 'default', + } + ); + + expect(result.mime).toBe('image/png'); + expect(result.filename).toBe('avatar.png'); + expect(result.key).toMatch(/^1\/default\/[0-9a-f-]+_origin$/); + + uploadedKeys.add(result.key as string); + expect(await objectExists(storage, result.key as string)).toBe(true); + + const dbResult = await pg.query( + `SELECT database_id, bucket_key, key, status, created_by, etag + FROM ${SCHEMA}.${TABLE} + WHERE key = $1`, + [result.key] + ); + + expect(dbResult.rowCount).toBe(1); + expect(dbResult.rows[0]).toEqual( + expect.objectContaining({ + database_id: 1, + bucket_key: 'default', + key: result.key, + status: 'pending', + created_by: USER_ID, + }) + ); + expect(dbResult.rows[0].etag).toEqual(expect.any(String)); + expect(dbResult.rows[0].etag.length).toBeGreaterThan(0); + }); + + it('handles inline image uploads and inserts the same pending files row shape', async () => { + uploadResolverModule = await loadUploadResolverModule(); + + const imageUploadDefinition = uploadResolverModule.constructiveUploadFieldDefinitions.find( + (definition) => 'name' in definition && definition.name === 'image' + ); + + if (!imageUploadDefinition) { + throw new Error('Missing image upload definition'); + } + + const result = await imageUploadDefinition.resolve( + makeUpload('inline.png', MINIMAL_PNG) as any, + {}, + { + req: { + api: { databaseId: '1' }, + token: { user_id: USER_ID }, + }, + }, + { uploadPlugin: { tags: {}, type: 'image' } } as any + ); + + expect(result).toEqual( + expect.objectContaining({ + filename: 'inline.png', + mime: 'image/png', + key: expect.stringMatching(/^1\/default\/[0-9a-f-]+_origin$/), + url: expect.any(String), + }) + ); + + const key = (result as { key: string }).key; + uploadedKeys.add(key); + expect(await objectExists(storage, key)).toBe(true); + + const dbResult = await pg.query( + `SELECT database_id, bucket_key, key, status, created_by + FROM ${SCHEMA}.${TABLE} + WHERE key = $1`, + [key] + ); + + expect(dbResult.rowCount).toBe(1); + expect(dbResult.rows[0]).toEqual({ + database_id: 1, + bucket_key: 'default', + key, + status: 'pending', + created_by: USER_ID, + }); + }); +}); diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index c744ee11f..fb9754565 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -221,6 +221,26 @@ export async function streamToStorage( }; } +export async function __resetUploadResolverForTests(): Promise { + if (streamer && typeof (streamer as { destroy?: () => void }).destroy === 'function') { + streamer.destroy(); + } + streamer = null; + + if ( + storageProvider + && typeof (storageProvider as StorageProvider & { destroy?: () => void }).destroy === 'function' + ) { + (storageProvider as StorageProvider & { destroy: () => void }).destroy(); + } + storageProvider = null; + + if (pgPool) { + await pgPool.end(); + } + pgPool = null; +} + /** * Upload resolver that streams files to S3/MinIO. * From ce9b8c4b770aa7566a180b8b161e0adb7b3b5718 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Fri, 13 Mar 2026 16:31:17 +0800 Subject: [PATCH 05/15] change schema names --- .../__tests__/upload-resolver.e2e.test.ts | 12 +- .../graphile-settings/src/upload-resolver.ts | 6 +- graphql/server/src/middleware/upload.ts | 2 +- .../__tests__/object-store-lifecycle.test.ts | 140 ++-- migrations/__tests__/object-store-rls.test.ts | 106 +-- migrations/files_store.sql | 639 ++++++++++++++++++ migrations/object_store.sql | 164 ++--- 7 files changed, 854 insertions(+), 215 deletions(-) create mode 100644 migrations/files_store.sql diff --git a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts index 5965f7460..a06dd603e 100644 --- a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts +++ b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts @@ -4,7 +4,7 @@ import { Readable } from 'stream'; jest.setTimeout(60000); -const SCHEMA = 'object_store_public'; +const SCHEMA = 'files_store_public'; const TABLE = 'files'; const BUCKET = 'test-bucket'; const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -36,7 +36,7 @@ function makeStorage(): S3StorageProvider { }); } -async function setupObjectStoreSchema(pg: PgClient): Promise { +async function setupFilesStoreSchema(pg: PgClient): Promise { await pg.query('CREATE EXTENSION IF NOT EXISTS pgcrypto'); await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); await pg.query(` @@ -63,12 +63,12 @@ async function setupObjectStoreSchema(pg: PgClient): Promise { created_by uuid, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), - CONSTRAINT graphile_settings_object_store_files_pkey PRIMARY KEY (id, database_id) + CONSTRAINT graphile_settings_files_store_files_pkey PRIMARY KEY (id, database_id) ) `); } -async function cleanupObjectStoreRows(pg: PgClient): Promise { +async function cleanupFilesStoreRows(pg: PgClient): Promise { await pg.query(`DELETE FROM ${SCHEMA}.${TABLE}`); } @@ -117,7 +117,7 @@ describe('upload-resolver e2e', () => { pg = makePg(); await pg.connect(); storage = makeStorage(); - await setupObjectStoreSchema(pg); + await setupFilesStoreSchema(pg); }); afterEach(async () => { @@ -135,7 +135,7 @@ describe('upload-resolver e2e', () => { } uploadedKeys.clear(); - await cleanupObjectStoreRows(pg); + await cleanupFilesStoreRows(pg); }); afterAll(async () => { diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index fb9754565..2e5d08647 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -9,7 +9,7 @@ * * V2 mode (UPLOAD_V2_ENABLED=true): * - Key format: {database_id}/{bucket_key}/{uuid}_origin - * - INSERT into object_store_public.files after S3 upload + * - INSERT into files_store_public.files after S3 upload * - Returns { key, url, mime, filename } for image/upload types * * Legacy mode (UPLOAD_V2_ENABLED=false, default): @@ -144,7 +144,7 @@ function generateV2Key(databaseId: string, bucketKey: string): { key: string; fi } /** - * INSERTs a row into object_store_public.files. + * INSERTs a row into files_store_public.files. * Fires the AFTER INSERT trigger which enqueues a process-image job. */ async function insertFileRecord( @@ -157,7 +157,7 @@ async function insertFileRecord( ): Promise { const pool = getPgPool(); await pool.query( - `INSERT INTO object_store_public.files + `INSERT INTO files_store_public.files (id, database_id, bucket_key, key, etag, created_by) VALUES ($1, $2, $3, $4, $5, $6)`, [fileId, Number(databaseId), bucketKey, key, etag, createdBy], diff --git a/graphql/server/src/middleware/upload.ts b/graphql/server/src/middleware/upload.ts index 71ab851b1..0d34775c4 100644 --- a/graphql/server/src/middleware/upload.ts +++ b/graphql/server/src/middleware/upload.ts @@ -270,7 +270,7 @@ export const createUploadAuthenticateMiddleware = ( * 2. GraphQL mutation -> patch row with the returned metadata * * When UPLOAD_V2_ENABLED=true, passes databaseId and userId to streamToStorage - * so it can use the new key format and INSERT into object_store_public.files. + * so it can use the new key format and INSERT into files_store_public.files. */ export const uploadRoute: RequestHandler[] = [ parseFileWithErrors, diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts index be2b7ac80..e18cf0aad 100644 --- a/migrations/__tests__/object-store-lifecycle.test.ts +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -4,7 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; -const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); +const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -57,10 +57,10 @@ beforeAll(async () => { `); // Grants needed for isolated test (normally from pgpm extension deploy) - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO service_role'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO service_role'); // Replace the app_jobs.add_job stub with one that records calls await pg.query(` @@ -98,7 +98,7 @@ beforeAll(async () => { // Seed a default bucket await pg.query(` - INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) VALUES (1, 'default', 'Default Bucket', false, '{}') `); }); @@ -134,13 +134,13 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag) VALUES ($1, 1, $2, 'default', $3, 'etag-origin') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); // Verify file exists with pending status const file = await pg.query( - 'SELECT * FROM object_store_public.files WHERE id = $1', + 'SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rowCount).toBe(1); @@ -164,7 +164,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 2: service_role transitions pending → processing', async () => { // Insert as superuser first await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'pending') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); @@ -176,14 +176,14 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] ); // Verify processing_started_at is set await pg.query('RESET ROLE'); const file = await pg.query( - 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe('processing'); @@ -193,7 +193,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 3: service_role inserts version rows (status=ready, bypasses job trigger)', async () => { // Setup: origin in processing state await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); @@ -205,7 +205,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag, status) VALUES (1, $1, 'default', $2, 'etag-thumb', 'ready'), (1, $3, 'default', $2, 'etag-large', 'ready') @@ -218,7 +218,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { // Verify all three rows exist const files = await pg.query( - `SELECT key, status FROM object_store_public.files WHERE database_id = 1 ORDER BY key` + `SELECT key, status FROM files_store_public.files WHERE database_id = 1 ORDER BY key` ); expect(files.rowCount).toBe(3); expect(files.rows.map((r: any) => ({ key: r.key, status: r.status }))).toEqual([ @@ -231,7 +231,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 4: service_role transitions origin processing → ready', async () => { // Setup: origin in processing state await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -241,14 +241,14 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] ); // Verify status and processing_started_at cleared await pg.query('RESET ROLE'); const file = await pg.query( - 'SELECT status, processing_started_at, updated_at, created_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at, updated_at, created_at FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe('ready'); @@ -261,7 +261,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 5: user sees origin + versions after processing completes', async () => { // Setup: origin ready + 2 version rows ready await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), (gen_random_uuid(), 1, $3, 'default', $4, 'etag-thumb', 'ready'), @@ -275,7 +275,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); const files = await pg.query( - `SELECT key, status FROM object_store_public.files WHERE key LIKE '1/default/abc123%' ORDER BY key` + `SELECT key, status FROM files_store_public.files WHERE key LIKE '1/default/abc123%' ORDER BY key` ); expect(files.rowCount).toBe(3); expect(files.rows.every((r: any) => r.status === 'ready')).toBe(true); @@ -301,7 +301,7 @@ describe('E2E-02: Error + Retry Path', () => { it('processing → error stores status_reason', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'processing') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -311,7 +311,7 @@ describe('E2E-02: Error + Retry Path', () => { }); await pg.query( - `UPDATE object_store_public.files + `UPDATE files_store_public.files SET status = 'error', status_reason = 'sharp: unsupported image format' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] @@ -319,7 +319,7 @@ describe('E2E-02: Error + Retry Path', () => { await pg.query('RESET ROLE'); const file = await pg.query( - 'SELECT status, status_reason, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, status_reason, processing_started_at FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe('error'); @@ -330,7 +330,7 @@ describe('E2E-02: Error + Retry Path', () => { it('error → pending (retry) re-queues process-image job', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); @@ -341,7 +341,7 @@ describe('E2E-02: Error + Retry Path', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'pending' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] ); @@ -359,7 +359,7 @@ describe('E2E-02: Error + Retry Path', () => { it('full retry cycle: pending → processing → error → pending → processing → ready', async () => { // Step 1: upload (pending) await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag) VALUES ($1, 1, $2, 'default', $3, 'etag') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -367,25 +367,25 @@ describe('E2E-02: Error + Retry Path', () => { // Step 2: processing await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1`, [ORIGIN_ID] ); - let file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + let file = await pg.query('SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('processing'); expect(file.rows[0].processing_started_at).not.toBeNull(); // Step 3: error await pg.query( - `UPDATE object_store_public.files SET status = 'error', status_reason = 'timeout' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'error', status_reason = 'timeout' WHERE id = $1`, [ORIGIN_ID] ); - file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + file = await pg.query('SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('error'); expect(file.rows[0].processing_started_at).toBeNull(); // Step 4: retry (error → pending) — should re-queue job await pg.query( - `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'pending' WHERE id = $1`, [ORIGIN_ID] ); let jobs = await getJobLog(); @@ -395,16 +395,16 @@ describe('E2E-02: Error + Retry Path', () => { // Step 5: processing again await clearJobLog(); await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1`, [ORIGIN_ID] ); // Step 6: ready await pg.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1`, [ORIGIN_ID] ); - file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + file = await pg.query('SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('ready'); expect(file.rows[0].processing_started_at).toBeNull(); }); @@ -430,13 +430,13 @@ describe('E2E-03: Deletion Flow', () => { it('ready → deleting queues delete_s3_object job', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'ready') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); await pg.query( - `UPDATE object_store_public.files SET status = 'deleting' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'deleting' WHERE id = $1`, [ORIGIN_ID] ); @@ -450,7 +450,7 @@ describe('E2E-03: Deletion Flow', () => { it('deleting origin + version rows each queue separate jobs', async () => { const VERSION_ID = '30000000-0000-0000-0000-000000000002'; await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), ($3, 1, $5, 'default', $4, 'etag-thumb', 'ready') @@ -459,7 +459,7 @@ describe('E2E-03: Deletion Flow', () => { // Delete both await pg.query( - `UPDATE object_store_public.files SET status = 'deleting' + `UPDATE files_store_public.files SET status = 'deleting' WHERE database_id = 1 AND key LIKE '1/default/del123%'` ); @@ -471,18 +471,18 @@ describe('E2E-03: Deletion Flow', () => { it('error → deleting is valid (skip processing on permanent failure)', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); await pg.query( - `UPDATE object_store_public.files SET status = 'deleting', status_reason = 'user cancelled' + `UPDATE files_store_public.files SET status = 'deleting', status_reason = 'user cancelled' WHERE id = $1`, [ORIGIN_ID] ); - const file = await pg.query('SELECT status, status_reason FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + const file = await pg.query('SELECT status, status_reason FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('deleting'); expect(file.rows[0].status_reason).toBe('user cancelled'); @@ -493,7 +493,7 @@ describe('E2E-03: Deletion Flow', () => { it('service_role can hard-DELETE after marking as deleting', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'deleting') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -503,7 +503,7 @@ describe('E2E-03: Deletion Flow', () => { }); const result = await pg.query( - 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = 1', + 'DELETE FROM files_store_public.files WHERE id = $1 AND database_id = 1', [ORIGIN_ID] ); expect(result.rowCount).toBe(1); @@ -511,7 +511,7 @@ describe('E2E-03: Deletion Flow', () => { // Verify gone await pg.query('RESET ROLE'); const check = await pg.query( - 'SELECT * FROM object_store_public.files WHERE id = $1', + 'SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(check.rowCount).toBe(0); @@ -549,13 +549,13 @@ describe('E2E-04: State Machine Validation', () => { 'rejects %s → %s', async (from, to) => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', $4) `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); await expect( pg.query( - `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + `UPDATE files_store_public.files SET status = $1 WHERE id = $2`, [to, ORIGIN_ID] ) ).rejects.toThrow(/Invalid status transition/); @@ -577,17 +577,17 @@ describe('E2E-04: State Machine Validation', () => { 'allows %s → %s', async (from, to) => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', $4) `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); await pg.query( - `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + `UPDATE files_store_public.files SET status = $1 WHERE id = $2`, [to, ORIGIN_ID] ); const file = await pg.query( - 'SELECT status FROM object_store_public.files WHERE id = $1', + 'SELECT status FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe(to); @@ -611,7 +611,7 @@ describe('E2E-05: Constraints', () => { it('rejects empty key', async () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '', 'default', 'x') `) ).rejects.toThrow(/files_key_not_empty/); @@ -621,7 +621,7 @@ describe('E2E-05: Constraints', () => { const longKey = '1/default/' + 'a'.repeat(1020); await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, $1, 'default', 'x') `, [longKey]) ).rejects.toThrow(/files_key_max_length/); @@ -630,7 +630,7 @@ describe('E2E-05: Constraints', () => { it('rejects invalid bucket_key format', async () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/BAD/test_origin', 'BAD-BUCKET', 'x') `) ).rejects.toThrow(/files_bucket_key_format/); @@ -639,7 +639,7 @@ describe('E2E-05: Constraints', () => { it('rejects partial source reference (source_table without source_column)', async () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag, source_table) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag, source_table) VALUES (1, '1/default/partial_origin', 'default', 'x', 'some_schema.some_table') `) ).rejects.toThrow(/files_source_complete/); @@ -647,7 +647,7 @@ describe('E2E-05: Constraints', () => { it('accepts complete source reference', async () => { const result = await pg.query(` - INSERT INTO object_store_public.files + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag, source_table, source_column, source_id) VALUES (1, '1/default/ref_origin', 'default', 'x', 'some_schema.some_table', 'image', gen_random_uuid()) @@ -659,13 +659,13 @@ describe('E2E-05: Constraints', () => { it('enforces unique key per tenant', async () => { await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/dup_origin', 'default', 'e1') `); await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/dup_origin', 'default', 'e2') `) ).rejects.toThrow(/files_key_unique/); @@ -673,12 +673,12 @@ describe('E2E-05: Constraints', () => { it('allows same key in different tenants', async () => { await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/shared_origin', 'default', 'e1') `); const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (2, '1/default/shared_origin', 'default', 'e2') RETURNING * `); @@ -698,7 +698,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { beforeEach(async () => { await pg.beforeEach(); await pg.query(` - INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) VALUES (1, 'default', 'Default Bucket', false, '{}') ON CONFLICT DO NOTHING `); @@ -719,13 +719,13 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag) VALUES (1, $1, 'default', $2, 'etag-origin') `, [ORIGIN_KEY, USER_A]); // Verify: user sees their pending file let myFiles = await pg.query( - `SELECT key, status FROM object_store_public.files WHERE key = $1`, + `SELECT key, status FROM files_store_public.files WHERE key = $1`, [ORIGIN_KEY] ); expect(myFiles.rowCount).toBe(1); @@ -739,7 +739,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { // Get the origin ID for later const originRow = await pg.query( - `SELECT id FROM object_store_public.files WHERE key = $1 AND database_id = 1`, + `SELECT id FROM files_store_public.files WHERE key = $1 AND database_id = 1`, [ORIGIN_KEY] ); const originId = originRow.rows[0].id; @@ -754,13 +754,13 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, [originId] ); await pg.query('RESET ROLE'); let origin = await pg.query( - 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at FROM files_store_public.files WHERE id = $1', [originId] ); expect(origin.rows[0].status).toBe('processing'); @@ -775,7 +775,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag, status) VALUES (1, $1, 'default', $2, 'etag-thumb', 'ready'), (1, $3, 'default', $2, 'etag-large', 'ready') @@ -795,13 +795,13 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, [originId] ); await pg.query('RESET ROLE'); origin = await pg.query( - 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at FROM files_store_public.files WHERE id = $1', [originId] ); expect(origin.rows[0].status).toBe('ready'); @@ -816,7 +816,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); const allFiles = await pg.query( - `SELECT key, status FROM object_store_public.files + `SELECT key, status FROM files_store_public.files WHERE key LIKE '1/default/full_e2e%' ORDER BY key` ); @@ -839,7 +839,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'deleting' + `UPDATE files_store_public.files SET status = 'deleting' WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` ); @@ -860,7 +860,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); const deleted = await pg.query( - `DELETE FROM object_store_public.files + `DELETE FROM files_store_public.files WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` ); expect(deleted.rowCount).toBe(3); @@ -868,7 +868,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { // Verify: no files remain await pg.query('RESET ROLE'); const remaining = await pg.query( - `SELECT * FROM object_store_public.files WHERE key LIKE '1/default/full_e2e%'` + `SELECT * FROM files_store_public.files WHERE key LIKE '1/default/full_e2e%'` ); expect(remaining.rowCount).toBe(0); }); diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts index 2c8c7ab86..62b7ed40b 100644 --- a/migrations/__tests__/object-store-rls.test.ts +++ b/migrations/__tests__/object-store-rls.test.ts @@ -4,7 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; -const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); +const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; const USER_B = 'bbbbbbbb-0000-0000-0000-000000000002'; @@ -29,7 +29,7 @@ async function switchRole( async function insertBuckets() { await pg.query(` - INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) VALUES (1, 'default', 'Default Bucket', false, '{}'), (1, 'public-assets', 'Public Assets', true, '{}'), @@ -39,7 +39,7 @@ async function insertBuckets() { async function insertFixtures() { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('11111111-0000-0000-0000-000000000001', 1, 'default', '1/default/aaa_origin', 'ready', $1, 'etag1'), ('11111111-0000-0000-0000-000000000002', 1, 'default', '1/default/bbb_origin', 'pending', $1, 'etag2'), @@ -48,21 +48,21 @@ async function insertFixtures() { `, [USER_A]); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('22222222-0000-0000-0000-000000000001', 1, 'default', '1/default/eee_origin', 'ready', $1, 'etag5'), ('22222222-0000-0000-0000-000000000002', 1, 'default', '1/default/fff_origin', 'pending', $1, 'etag6') `, [USER_B]); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('33333333-0000-0000-0000-000000000001', 1, 'public-assets', '1/public-assets/ggg_origin', 'ready', $1, 'etag7'), ('33333333-0000-0000-0000-000000000002', 1, 'public-assets', '1/public-assets/hhh_origin', 'pending', $1, 'etag8') `, [USER_A]); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('44444444-0000-0000-0000-000000000001', 2, 'default', '2/default/iii_origin', 'ready', $1, 'etag9') `, [USER_C]); @@ -87,18 +87,18 @@ beforeAll(async () => { END $$ `); - // The migration assumes object_store_public schema USAGE is already granted + // The migration assumes files_store_public schema USAGE is already granted // (from the original object-store pgpm extension). In isolation, grant explicitly. - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO anonymous'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO service_role'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO anonymous'); // Grant SELECT on buckets to roles that need it for the public_bucket_read policy subquery. // Without this, the EXISTS subquery in files_public_bucket_read fails with // "permission denied for table buckets". - await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO anonymous'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO service_role'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO anonymous'); }); afterAll(async () => { @@ -121,13 +121,13 @@ describe('RLS-07: Superuser Bypass', () => { }); it('RLS-07a: superuser sees all tenants', async () => { - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(9); }); it('RLS-07b: superuser can INSERT into any tenant', async () => { const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (999, '999/default/su_origin', 'default', 'su-etag') RETURNING id `); @@ -136,7 +136,7 @@ describe('RLS-07: Superuser Bypass', () => { it('RLS-07c: superuser can DELETE any row', async () => { const result = await pg.query( - 'DELETE FROM object_store_public.files WHERE database_id = 2' + 'DELETE FROM files_store_public.files WHERE database_id = 2' ); expect(result.rowCount).toBeGreaterThan(0); }); @@ -163,7 +163,7 @@ describe('RLS-01: Tenant Isolation', () => { await switchRole('authenticated'); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/app\.database_id|invalid input syntax for type integer/); }); @@ -173,7 +173,7 @@ describe('RLS-01: Tenant Isolation', () => { 'app.user_id': USER_A, }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Must return rows (prevents vacuous pass on empty result from Array.every) expect(result.rowCount).toBeGreaterThan(0); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); @@ -188,7 +188,7 @@ describe('RLS-01: Tenant Isolation', () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + INSERT INTO files_store_public.files (database_id, bucket_key, key, created_by, etag) VALUES (2, 'default', '2/default/bad_origin', $1, 'bad-etag') `, [USER_A]) ).rejects.toThrow(/row-level security/i); @@ -201,7 +201,7 @@ describe('RLS-01: Tenant Isolation', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'test' WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 `); @@ -231,7 +231,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE created_by = $1', + 'SELECT * FROM files_store_public.files WHERE created_by = $1', [USER_A] ); expect(result.rowCount).toBe(6); @@ -244,7 +244,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE created_by = $1', + 'SELECT * FROM files_store_public.files WHERE created_by = $1', [USER_B] ); expect(result.rowCount).toBe(1); @@ -258,7 +258,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE created_by = $1', + 'SELECT * FROM files_store_public.files WHERE created_by = $1', [USER_B] ); expect(result.rowCount).toBe(2); @@ -271,7 +271,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - `SELECT * FROM object_store_public.files + `SELECT * FROM files_store_public.files WHERE created_by = $1 AND status != 'ready'`, [USER_A] ); @@ -301,7 +301,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { }); const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + INSERT INTO files_store_public.files (database_id, bucket_key, key, created_by, etag) VALUES (1, 'default', '1/default/new_origin', $1, 'newtag') RETURNING * `, [USER_A]); @@ -316,7 +316,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'user note' WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `); @@ -331,7 +331,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { await expect( pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `) ).rejects.toThrow(/permission denied/i); @@ -344,7 +344,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'hacked' WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 `); @@ -371,7 +371,7 @@ describe('RLS-04: Anonymous -- No Access', () => { await switchRole('anonymous', { 'app.database_id': '1' }); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/permission denied/i); }); @@ -380,7 +380,7 @@ describe('RLS-04: Anonymous -- No Access', () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/anon_origin', 'default', 'x') `) ).rejects.toThrow(/permission denied/i); @@ -388,11 +388,11 @@ describe('RLS-04: Anonymous -- No Access', () => { it('RLS-04c: public bucket policy works with temporary GRANT', async () => { // Temporarily grant SELECT to anonymous (rolled back in afterEach) - await pg.query('GRANT SELECT ON object_store_public.files TO anonymous'); + await pg.query('GRANT SELECT ON files_store_public.files TO anonymous'); await switchRole('anonymous', { 'app.database_id': '1' }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Anonymous only has files_public_bucket_read (files_visibility is TO authenticated). // Should see only public-assets bucket + ready status. @@ -424,7 +424,7 @@ describe('RLS-05: Administrator Override', () => { 'app.role': 'administrator', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(8); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); }); @@ -436,7 +436,7 @@ describe('RLS-05: Administrator Override', () => { }); const result = await pg.query(` - SELECT * FROM object_store_public.files + SELECT * FROM files_store_public.files WHERE status IN ('pending', 'error') `); expect(result.rowCount).toBe(4); @@ -449,7 +449,7 @@ describe('RLS-05: Administrator Override', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'admin override' WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 `); @@ -463,7 +463,7 @@ describe('RLS-05: Administrator Override', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE database_id = 2' + 'SELECT * FROM files_store_public.files WHERE database_id = 2' ); expect(result.rowCount).toBe(0); }); @@ -476,7 +476,7 @@ describe('RLS-05: Administrator Override', () => { await expect( pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `) ).rejects.toThrow(/permission denied/i); @@ -504,7 +504,7 @@ describe('RLS-06: service_role', () => { 'app.role': 'administrator', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(8); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); }); @@ -515,7 +515,7 @@ describe('RLS-06: service_role', () => { 'app.role': 'administrator', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(8); }); @@ -524,7 +524,7 @@ describe('RLS-06: service_role', () => { 'app.database_id': '1', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Without app.role and without app.user_id, visibility policy reduces to // status = 'ready' (NULLIF on empty user_id → NULL → created_by check is NULL). // Expect ready files in tenant 1: 111...01, 222...01, 333...01 = 3 @@ -539,7 +539,7 @@ describe('RLS-06: service_role', () => { }); const result = await pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `); expect(result.rowCount).toBe(1); @@ -552,7 +552,7 @@ describe('RLS-06: service_role', () => { }); const result = await pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 `); expect(result.rowCount).toBe(0); @@ -576,7 +576,7 @@ describe('RLS-08: Buckets Table Access', () => { it('RLS-08a: authenticated role can read buckets (GRANT added for policy subquery)', async () => { await switchRole('authenticated', { 'app.database_id': '1' }); - const result = await pg.query('SELECT * FROM object_store_public.buckets'); + const result = await pg.query('SELECT * FROM files_store_public.buckets'); // Buckets has no RLS -- all 3 seeded buckets visible (2 tenant 1 + 1 tenant 2) expect(result.rowCount).toBe(3); }); @@ -584,7 +584,7 @@ describe('RLS-08: Buckets Table Access', () => { it('RLS-08b: service_role can read buckets (GRANT added for policy subquery)', async () => { await switchRole('service_role', { 'app.database_id': '1' }); - const result = await pg.query('SELECT * FROM object_store_public.buckets'); + const result = await pg.query('SELECT * FROM files_store_public.buckets'); expect(result.rowCount).toBe(3); }); }); @@ -611,7 +611,7 @@ describe('RLS-09: Edge Cases', () => { }); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/invalid input syntax for type integer/); }); @@ -622,7 +622,7 @@ describe('RLS-09: Edge Cases', () => { }); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/invalid input syntax for type uuid/); }); @@ -632,7 +632,7 @@ describe('RLS-09: Edge Cases', () => { 'app.user_id': USER_A, }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(0); }); @@ -646,7 +646,7 @@ describe('RLS-09: Edge Cases', () => { // Note: RETURNING * would fail here because SELECT policies block reading // the row back (created_by=USER_B != app.user_id=USER_A and status='pending'). const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag) VALUES (1, '1/default/spoof_origin', 'default', $1, 'x') `, [USER_B]); expect(result.rowCount).toBe(1); @@ -654,7 +654,7 @@ describe('RLS-09: Edge Cases', () => { // Verify the spoofed created_by was persisted by reading as superuser await pg.query('RESET ROLE'); const verify = await pg.query( - `SELECT created_by FROM object_store_public.files WHERE key = '1/default/spoof_origin'` + `SELECT created_by FROM files_store_public.files WHERE key = '1/default/spoof_origin'` ); expect(verify.rows[0].created_by).toBe(USER_B); }); @@ -666,7 +666,7 @@ describe('RLS-09: Edge Cases', () => { 'app.role': 'authenticated', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Policies: RESTRICTIVE(tenant_isolation) AND PERMISSIVE(visibility OR public_bucket_read OR admin_override) // User A sees: own files (all 6) + User B's ready file (1) = 7 // (User B's pending file is invisible; admin_override is false) @@ -696,7 +696,7 @@ describe('RLS-10: State Machine with RLS', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'processing' WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 RETURNING * @@ -712,7 +712,7 @@ describe('RLS-10: State Machine with RLS', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'processing' WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 `); @@ -727,7 +727,7 @@ describe('RLS-10: State Machine with RLS', () => { await expect( pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'deleting' WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 `) diff --git a/migrations/files_store.sql b/migrations/files_store.sql new file mode 100644 index 000000000..169ec8f99 --- /dev/null +++ b/migrations/files_store.sql @@ -0,0 +1,639 @@ +-- ============================================================================= +-- Constructive Upload System -- files_store_public schema +-- ============================================================================= +-- Run: psql -h localhost -U postgres -d constructive < migrations/files_store.sql +-- ============================================================================= + +BEGIN; + +-- Ensure required roles exist (idempotent for dev environments) +DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN + CREATE ROLE authenticated NOLOGIN; + END IF; + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN + CREATE ROLE service_role NOLOGIN; + END IF; +END $$; + +-- Ensure app_jobs schema + stub add_job exist (required by trigger functions). +-- In production, app_jobs is deployed by the database-jobs pgpm module. +-- This stub is a no-op that prevents trigger creation from failing in dev. +CREATE SCHEMA IF NOT EXISTS app_jobs; + +CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL +) RETURNS void AS $$ +BEGIN + -- Stub: in production this is provided by database-jobs pgpm module. + -- In dev, jobs are enqueued but not processed unless the job worker is running. + RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; +END; +$$ LANGUAGE plpgsql; + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS files_store_public; + +-- --------------------------------------------------------------------------- +-- 1. Status ENUM +-- --------------------------------------------------------------------------- + +CREATE TYPE files_store_public.file_status AS ENUM ( + 'pending', + 'processing', + 'ready', + 'error', + 'deleting' +); + +COMMENT ON TYPE files_store_public.file_status IS + 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; + +-- --------------------------------------------------------------------------- +-- 2. Files Table +-- --------------------------------------------------------------------------- + +CREATE TABLE files_store_public.files ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status files_store_public.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT files_pkey PRIMARY KEY (id, database_id), + CONSTRAINT files_key_unique UNIQUE (key, database_id), + CONSTRAINT files_key_not_empty CHECK (key <> ''), + CONSTRAINT files_key_max_length CHECK (length(key) <= 1024), + CONSTRAINT files_bucket_key_format CHECK (bucket_key ~ '^[a-z][a-z0-9_-]*$'), + CONSTRAINT files_source_table_format CHECK ( + source_table IS NULL OR source_table ~ '^[a-z_]+\.[a-z_]+$' + ), + CONSTRAINT files_source_complete CHECK ( + (source_table IS NULL AND source_column IS NULL AND source_id IS NULL) + OR (source_table IS NOT NULL AND source_column IS NOT NULL AND source_id IS NOT NULL) + ) +); + +COMMENT ON TABLE files_store_public.files IS + 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; +COMMENT ON COLUMN files_store_public.files.key IS + 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; +COMMENT ON COLUMN files_store_public.files.etag IS + 'S3 ETag for reconciliation and cache validation.'; +COMMENT ON COLUMN files_store_public.files.status_reason IS + 'Human-readable reason for current status (error details, deletion reason).'; +COMMENT ON COLUMN files_store_public.files.processing_started_at IS + 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; +COMMENT ON COLUMN files_store_public.files.source_table IS + 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; +COMMENT ON COLUMN files_store_public.files.source_column IS + 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; +COMMENT ON COLUMN files_store_public.files.source_id IS + 'Primary key of the row in the source table. NULL until domain trigger populates it.'; + +-- --------------------------------------------------------------------------- +-- 3. Buckets Table +-- --------------------------------------------------------------------------- + +CREATE TABLE files_store_public.buckets ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + key text NOT NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT false, + config jsonb NOT NULL DEFAULT '{}'::jsonb, + created_by uuid, + updated_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT buckets_pkey PRIMARY KEY (id, database_id), + CONSTRAINT buckets_key_unique UNIQUE (key, database_id), + CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') +); + +COMMENT ON TABLE files_store_public.buckets IS + 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; + +-- --------------------------------------------------------------------------- +-- 4. Indexes +-- --------------------------------------------------------------------------- + +-- Tenant queries +CREATE INDEX files_database_id_idx + ON files_store_public.files (database_id); + +-- Bucket + tenant queries +CREATE INDEX files_bucket_database_id_idx + ON files_store_public.files (bucket_key, database_id); + +-- "My uploads" queries +CREATE INDEX files_created_by_database_id_created_at_idx + ON files_store_public.files (created_by, database_id, created_at DESC); + +-- Back-reference lookups (cleanup worker, attachment queries) +CREATE INDEX files_source_ref_idx + ON files_store_public.files (source_table, source_column, source_id); + +-- Pending file reaper (hourly cron) +CREATE INDEX files_pending_created_at_idx + ON files_store_public.files (created_at) + WHERE status = 'pending'; + +-- Stuck processing detection +CREATE INDEX files_processing_idx + ON files_store_public.files (processing_started_at) + WHERE status = 'processing'; + +-- Deletion job queue +CREATE INDEX files_deleting_idx + ON files_store_public.files (updated_at) + WHERE status = 'deleting'; + +-- Time-range scans on large tables +CREATE INDEX files_created_at_brin_idx + ON files_store_public.files USING brin (created_at); + +-- --------------------------------------------------------------------------- +-- 5. Triggers +-- --------------------------------------------------------------------------- + +-- 5a. AFTER INSERT -- enqueue process-image job +-- NOTE: Version rows are inserted with status = 'ready', which intentionally +-- bypasses this trigger (condition: NEW.status = 'pending'). Only origin +-- uploads (status = 'pending') need processing. + +CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_insert_queue_processing + AFTER INSERT ON files_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'pending') + EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); + +COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS + 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; + +-- 5b. BEFORE UPDATE -- timestamp + state machine + +CREATE OR REPLACE FUNCTION files_store_public.files_before_update_timestamp() +RETURNS trigger AS $$ +BEGIN + -- Always update timestamp + NEW.updated_at := now(); + + -- State machine validation (only when status changes) + IF OLD.status IS DISTINCT FROM NEW.status THEN + IF NOT ( + (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) + OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) + OR (OLD.status = 'ready' AND NEW.status = 'deleting') + OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) + ) THEN + RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; + END IF; + + -- Track processing start/end + IF NEW.status = 'processing' THEN + NEW.processing_started_at := now(); + ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN + NEW.processing_started_at := NULL; + END IF; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_before_update_timestamp + BEFORE UPDATE ON files_store_public.files + FOR EACH ROW + EXECUTE FUNCTION files_store_public.files_before_update_timestamp(); + +COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS + 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; + +-- 5c. AFTER UPDATE -- enqueue delete_s3_object job + +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'delete_s3_object', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id, + 'key', NEW.key + ), + job_key := 'delete:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_deletion + AFTER UPDATE ON files_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') + EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); + +COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS + 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + +-- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry + +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_retry + AFTER UPDATE ON files_store_public.files + FOR EACH ROW + WHEN (OLD.status = 'error' AND NEW.status = 'pending') + EXECUTE FUNCTION files_store_public.files_after_update_queue_retry(); + +COMMENT ON TRIGGER files_after_update_queue_retry ON files_store_public.files IS + 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; + +-- --------------------------------------------------------------------------- +-- 6. RLS Policies & Grants +-- --------------------------------------------------------------------------- + +ALTER TABLE files_store_public.files ENABLE ROW LEVEL SECURITY; +ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; + +-- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) +-- Without this being RESTRICTIVE, permissive policies would OR together and +-- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). +CREATE POLICY files_tenant_isolation ON files_store_public.files + AS RESTRICTIVE + FOR ALL + USING (database_id = current_setting('app.database_id')::integer) + WITH CHECK (database_id = current_setting('app.database_id')::integer); + +-- Policy 2: Visibility for SELECT (authenticated + service_role only) +-- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling +-- when app.user_id is missing or empty (returns NULL instead of cast error). +-- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. +CREATE POLICY files_visibility ON files_store_public.files + FOR SELECT + TO authenticated, service_role + USING ( + status = 'ready' + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid + ); + +-- Policy 3: Public bucket read for SELECT (all roles including anonymous) +CREATE POLICY files_public_bucket_read ON files_store_public.files + FOR SELECT + USING ( + EXISTS ( + SELECT 1 FROM files_store_public.buckets b + WHERE b.key = bucket_key + AND b.database_id = files.database_id + AND b.is_public = true + ) + AND status = 'ready' + ); + +-- Policy 4: Admin override (all operations, authenticated + service_role) +CREATE POLICY files_admin_override ON files_store_public.files + FOR ALL + TO authenticated, service_role + USING (current_setting('app.role', true) = 'administrator') + WITH CHECK (current_setting('app.role', true) = 'administrator'); + +-- Policy 5: INSERT access (permissive base so non-admin users can insert) +CREATE POLICY files_insert_access ON files_store_public.files + FOR INSERT + TO authenticated, service_role + WITH CHECK (true); + +-- Policy 6: UPDATE access (replicates visibility for row targeting) +-- Non-admin users can only update rows they can see (ready or own). +-- Admin override policy covers admin UPDATE access separately. +CREATE POLICY files_update_access ON files_store_public.files + FOR UPDATE + TO authenticated, service_role + USING ( + status = 'ready' + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid + ) + WITH CHECK (true); + +-- Policy 7: DELETE access (service_role only, grants already restrict authenticated) +CREATE POLICY files_delete_access ON files_store_public.files + FOR DELETE + TO service_role + USING (true); + +-- Grants +GRANT SELECT, INSERT, UPDATE ON files_store_public.files TO authenticated; +GRANT SELECT, INSERT, UPDATE, DELETE ON files_store_public.files TO service_role; + +COMMENT ON POLICY files_tenant_isolation ON files_store_public.files IS + 'Every query is scoped to the current tenant via app.database_id session variable.'; +COMMENT ON POLICY files_visibility ON files_store_public.files IS + 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; +COMMENT ON POLICY files_public_bucket_read ON files_store_public.files IS + 'Allows unauthenticated reads on ready files in public buckets.'; +COMMENT ON POLICY files_admin_override ON files_store_public.files IS + 'Administrators can see and modify all files in the tenant regardless of status or creator.'; + +-- --------------------------------------------------------------------------- +-- 7. Domain Table Triggers +-- --------------------------------------------------------------------------- + +-- 7a. Generic trigger function: back-reference population +-- +-- When a domain table's image/upload/attachment column is updated with an S3 key, +-- find the files row by key and populate source_table, source_column, source_id. +-- Also finds version rows by key prefix and populates the same back-reference. +-- +-- Parameters (passed via TG_ARGV): +-- TG_ARGV[0] = column name (e.g. 'profile_picture') +-- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') + +CREATE OR REPLACE FUNCTION files_store_public.populate_file_back_reference() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + new_val jsonb; + old_val jsonb; + new_key text; + old_key text; + base_key text; + db_id integer; +BEGIN + -- Get the database_id from session context + db_id := current_setting('app.database_id')::integer; + + -- Extract the jsonb value from the specified column (dynamic) + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; + + -- Extract the key from the new and old values + new_key := new_val ->> 'key'; + old_key := old_val ->> 'key'; + + -- If no key change, nothing to do + IF new_key IS NOT DISTINCT FROM old_key THEN + RETURN NEW; + END IF; + + -- Handle file replacement: mark old files as deleting + IF old_key IS NOT NULL AND old_key <> '' THEN + -- Derive base key for the old file (strip version suffix) + base_key := regexp_replace(old_key, '_[^_]+$', ''); + + -- Mark old origin + all versions as deleting + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE database_id = db_id + AND (key = old_key OR key LIKE base_key || '_%') + AND status NOT IN ('deleting'); + END IF; + + -- Populate back-reference on new file (origin + versions) + IF new_key IS NOT NULL AND new_key <> '' THEN + -- Derive base key for the new file + base_key := regexp_replace(new_key, '_[^_]+$', ''); + + -- Set back-reference on origin + all version rows + UPDATE files_store_public.files + SET source_table = table_name, + source_column = col_name, + source_id = NEW.id + WHERE database_id = db_id + AND (key = new_key OR key LIKE base_key || '_%'); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION files_store_public.populate_file_back_reference() IS + 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; + +-- 7b. Generic trigger function: source row deletion +-- +-- When a domain row is deleted, mark all associated files as deleting. + +CREATE OR REPLACE FUNCTION files_store_public.mark_files_deleting_on_source_delete() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + db_id integer; +BEGIN + db_id := current_setting('app.database_id')::integer; + + -- Mark all files for this source row + column as deleting + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'source row deleted' + WHERE database_id = db_id + AND source_table = table_name + AND source_column = col_name + AND source_id = OLD.id + AND status NOT IN ('deleting'); + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS + 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; + +-- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns +-- +-- Each domain column gets two triggers: +-- - AFTER UPDATE: back-reference population + file replacement +-- - BEFORE DELETE: mark files deleting on source row deletion +-- +-- These are wrapped in a DO block so they gracefully skip tables that +-- don't exist yet (e.g. in fresh dev environments). In production, +-- domain tables will exist before this migration runs. + +DO $domain_triggers$ +DECLARE + _tbl text; +BEGIN + -- constructive_users_public.users.profile_picture + SELECT 'constructive_users_public.users' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_users_public' AND table_name = 'users'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref + AFTER UPDATE OF profile_picture ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete + BEFORE DELETE ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; + RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; + END IF; + + -- constructive_status_public.app_levels.image + SELECT 'constructive_status_public.app_levels' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_status_public' AND table_name = 'app_levels'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER app_levels_image_file_ref + AFTER UPDATE OF image ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE 'CREATE TRIGGER app_levels_image_file_delete + BEFORE DELETE ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; + RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; + END IF; + + -- services_public.sites (og_image, apple_touch_icon, logo, favicon) + SELECT 'services_public.sites' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'sites'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER sites_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_og_image_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref + AFTER UPDATE OF apple_touch_icon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_logo_file_ref + AFTER UPDATE OF logo ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_logo_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_favicon_file_ref + AFTER UPDATE OF favicon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_favicon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; + RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; + END IF; + + -- services_public.apps.app_image + SELECT 'services_public.apps' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'apps'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER apps_app_image_file_ref + AFTER UPDATE OF app_image ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; + EXECUTE 'CREATE TRIGGER apps_app_image_file_delete + BEFORE DELETE ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; + RAISE NOTICE 'Created triggers for services_public.apps.app_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; + END IF; + + -- services_public.site_metadata.og_image + SELECT 'services_public.site_metadata' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'site_metadata'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete + BEFORE DELETE ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; + RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; + END IF; + + -- db_migrate.migrate_files.upload + SELECT 'db_migrate.migrate_files' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'db_migrate' AND table_name = 'migrate_files'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref + AFTER UPDATE OF upload ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete + BEFORE DELETE ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; + RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; + ELSE + RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; + END IF; +END +$domain_triggers$; + +COMMIT; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index 658b7169e..169ec8f99 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -1,7 +1,7 @@ -- ============================================================================= --- Constructive Upload System -- object_store_public schema +-- Constructive Upload System -- files_store_public schema -- ============================================================================= --- Run: psql -h localhost -U postgres -d constructive < migrations/object_store.sql +-- Run: psql -h localhost -U postgres -d constructive < migrations/files_store.sql -- ============================================================================= BEGIN; @@ -39,13 +39,13 @@ END; $$ LANGUAGE plpgsql; -- Ensure schema exists -CREATE SCHEMA IF NOT EXISTS object_store_public; +CREATE SCHEMA IF NOT EXISTS files_store_public; -- --------------------------------------------------------------------------- -- 1. Status ENUM -- --------------------------------------------------------------------------- -CREATE TYPE object_store_public.file_status AS ENUM ( +CREATE TYPE files_store_public.file_status AS ENUM ( 'pending', 'processing', 'ready', @@ -53,19 +53,19 @@ CREATE TYPE object_store_public.file_status AS ENUM ( 'deleting' ); -COMMENT ON TYPE object_store_public.file_status IS +COMMENT ON TYPE files_store_public.file_status IS 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; -- --------------------------------------------------------------------------- -- 2. Files Table -- --------------------------------------------------------------------------- -CREATE TABLE object_store_public.files ( +CREATE TABLE files_store_public.files ( id uuid NOT NULL DEFAULT gen_random_uuid(), database_id integer NOT NULL, bucket_key text NOT NULL DEFAULT 'default', key text NOT NULL, - status object_store_public.file_status NOT NULL DEFAULT 'pending', + status files_store_public.file_status NOT NULL DEFAULT 'pending', status_reason text, etag text, source_table text, @@ -90,28 +90,28 @@ CREATE TABLE object_store_public.files ( ) ); -COMMENT ON TABLE object_store_public.files IS +COMMENT ON TABLE files_store_public.files IS 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; -COMMENT ON COLUMN object_store_public.files.key IS +COMMENT ON COLUMN files_store_public.files.key IS 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; -COMMENT ON COLUMN object_store_public.files.etag IS +COMMENT ON COLUMN files_store_public.files.etag IS 'S3 ETag for reconciliation and cache validation.'; -COMMENT ON COLUMN object_store_public.files.status_reason IS +COMMENT ON COLUMN files_store_public.files.status_reason IS 'Human-readable reason for current status (error details, deletion reason).'; -COMMENT ON COLUMN object_store_public.files.processing_started_at IS +COMMENT ON COLUMN files_store_public.files.processing_started_at IS 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; -COMMENT ON COLUMN object_store_public.files.source_table IS +COMMENT ON COLUMN files_store_public.files.source_table IS 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; -COMMENT ON COLUMN object_store_public.files.source_column IS +COMMENT ON COLUMN files_store_public.files.source_column IS 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; -COMMENT ON COLUMN object_store_public.files.source_id IS +COMMENT ON COLUMN files_store_public.files.source_id IS 'Primary key of the row in the source table. NULL until domain trigger populates it.'; -- --------------------------------------------------------------------------- -- 3. Buckets Table -- --------------------------------------------------------------------------- -CREATE TABLE object_store_public.buckets ( +CREATE TABLE files_store_public.buckets ( id uuid NOT NULL DEFAULT gen_random_uuid(), database_id integer NOT NULL, key text NOT NULL, @@ -128,7 +128,7 @@ CREATE TABLE object_store_public.buckets ( CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') ); -COMMENT ON TABLE object_store_public.buckets IS +COMMENT ON TABLE files_store_public.buckets IS 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; -- --------------------------------------------------------------------------- @@ -137,38 +137,38 @@ COMMENT ON TABLE object_store_public.buckets IS -- Tenant queries CREATE INDEX files_database_id_idx - ON object_store_public.files (database_id); + ON files_store_public.files (database_id); -- Bucket + tenant queries CREATE INDEX files_bucket_database_id_idx - ON object_store_public.files (bucket_key, database_id); + ON files_store_public.files (bucket_key, database_id); -- "My uploads" queries CREATE INDEX files_created_by_database_id_created_at_idx - ON object_store_public.files (created_by, database_id, created_at DESC); + ON files_store_public.files (created_by, database_id, created_at DESC); -- Back-reference lookups (cleanup worker, attachment queries) CREATE INDEX files_source_ref_idx - ON object_store_public.files (source_table, source_column, source_id); + ON files_store_public.files (source_table, source_column, source_id); -- Pending file reaper (hourly cron) CREATE INDEX files_pending_created_at_idx - ON object_store_public.files (created_at) + ON files_store_public.files (created_at) WHERE status = 'pending'; -- Stuck processing detection CREATE INDEX files_processing_idx - ON object_store_public.files (processing_started_at) + ON files_store_public.files (processing_started_at) WHERE status = 'processing'; -- Deletion job queue CREATE INDEX files_deleting_idx - ON object_store_public.files (updated_at) + ON files_store_public.files (updated_at) WHERE status = 'deleting'; -- Time-range scans on large tables CREATE INDEX files_created_at_brin_idx - ON object_store_public.files USING brin (created_at); + ON files_store_public.files USING brin (created_at); -- --------------------------------------------------------------------------- -- 5. Triggers @@ -179,7 +179,7 @@ CREATE INDEX files_created_at_brin_idx -- bypasses this trigger (condition: NEW.status = 'pending'). Only origin -- uploads (status = 'pending') need processing. -CREATE OR REPLACE FUNCTION object_store_public.files_after_insert_queue_processing() +CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( @@ -195,17 +195,17 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_after_insert_queue_processing - AFTER INSERT ON object_store_public.files + AFTER INSERT ON files_store_public.files FOR EACH ROW WHEN (NEW.status = 'pending') - EXECUTE FUNCTION object_store_public.files_after_insert_queue_processing(); + EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); -COMMENT ON TRIGGER files_after_insert_queue_processing ON object_store_public.files IS +COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; -- 5b. BEFORE UPDATE -- timestamp + state machine -CREATE OR REPLACE FUNCTION object_store_public.files_before_update_timestamp() +CREATE OR REPLACE FUNCTION files_store_public.files_before_update_timestamp() RETURNS trigger AS $$ BEGIN -- Always update timestamp @@ -235,16 +235,16 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_before_update_timestamp - BEFORE UPDATE ON object_store_public.files + BEFORE UPDATE ON files_store_public.files FOR EACH ROW - EXECUTE FUNCTION object_store_public.files_before_update_timestamp(); + EXECUTE FUNCTION files_store_public.files_before_update_timestamp(); -COMMENT ON TRIGGER files_before_update_timestamp ON object_store_public.files IS +COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; -- 5c. AFTER UPDATE -- enqueue delete_s3_object job -CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_deletion() +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( @@ -261,17 +261,17 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_after_update_queue_deletion - AFTER UPDATE ON object_store_public.files + AFTER UPDATE ON files_store_public.files FOR EACH ROW WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') - EXECUTE FUNCTION object_store_public.files_after_update_queue_deletion(); + EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); -COMMENT ON TRIGGER files_after_update_queue_deletion ON object_store_public.files IS +COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry -CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_retry() +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( @@ -287,25 +287,25 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_after_update_queue_retry - AFTER UPDATE ON object_store_public.files + AFTER UPDATE ON files_store_public.files FOR EACH ROW WHEN (OLD.status = 'error' AND NEW.status = 'pending') - EXECUTE FUNCTION object_store_public.files_after_update_queue_retry(); + EXECUTE FUNCTION files_store_public.files_after_update_queue_retry(); -COMMENT ON TRIGGER files_after_update_queue_retry ON object_store_public.files IS +COMMENT ON TRIGGER files_after_update_queue_retry ON files_store_public.files IS 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; -- --------------------------------------------------------------------------- -- 6. RLS Policies & Grants -- --------------------------------------------------------------------------- -ALTER TABLE object_store_public.files ENABLE ROW LEVEL SECURITY; -ALTER TABLE object_store_public.files FORCE ROW LEVEL SECURITY; +ALTER TABLE files_store_public.files ENABLE ROW LEVEL SECURITY; +ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; -- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) -- Without this being RESTRICTIVE, permissive policies would OR together and -- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). -CREATE POLICY files_tenant_isolation ON object_store_public.files +CREATE POLICY files_tenant_isolation ON files_store_public.files AS RESTRICTIVE FOR ALL USING (database_id = current_setting('app.database_id')::integer) @@ -315,7 +315,7 @@ CREATE POLICY files_tenant_isolation ON object_store_public.files -- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling -- when app.user_id is missing or empty (returns NULL instead of cast error). -- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. -CREATE POLICY files_visibility ON object_store_public.files +CREATE POLICY files_visibility ON files_store_public.files FOR SELECT TO authenticated, service_role USING ( @@ -324,11 +324,11 @@ CREATE POLICY files_visibility ON object_store_public.files ); -- Policy 3: Public bucket read for SELECT (all roles including anonymous) -CREATE POLICY files_public_bucket_read ON object_store_public.files +CREATE POLICY files_public_bucket_read ON files_store_public.files FOR SELECT USING ( EXISTS ( - SELECT 1 FROM object_store_public.buckets b + SELECT 1 FROM files_store_public.buckets b WHERE b.key = bucket_key AND b.database_id = files.database_id AND b.is_public = true @@ -337,14 +337,14 @@ CREATE POLICY files_public_bucket_read ON object_store_public.files ); -- Policy 4: Admin override (all operations, authenticated + service_role) -CREATE POLICY files_admin_override ON object_store_public.files +CREATE POLICY files_admin_override ON files_store_public.files FOR ALL TO authenticated, service_role USING (current_setting('app.role', true) = 'administrator') WITH CHECK (current_setting('app.role', true) = 'administrator'); -- Policy 5: INSERT access (permissive base so non-admin users can insert) -CREATE POLICY files_insert_access ON object_store_public.files +CREATE POLICY files_insert_access ON files_store_public.files FOR INSERT TO authenticated, service_role WITH CHECK (true); @@ -352,7 +352,7 @@ CREATE POLICY files_insert_access ON object_store_public.files -- Policy 6: UPDATE access (replicates visibility for row targeting) -- Non-admin users can only update rows they can see (ready or own). -- Admin override policy covers admin UPDATE access separately. -CREATE POLICY files_update_access ON object_store_public.files +CREATE POLICY files_update_access ON files_store_public.files FOR UPDATE TO authenticated, service_role USING ( @@ -362,22 +362,22 @@ CREATE POLICY files_update_access ON object_store_public.files WITH CHECK (true); -- Policy 7: DELETE access (service_role only, grants already restrict authenticated) -CREATE POLICY files_delete_access ON object_store_public.files +CREATE POLICY files_delete_access ON files_store_public.files FOR DELETE TO service_role USING (true); -- Grants -GRANT SELECT, INSERT, UPDATE ON object_store_public.files TO authenticated; -GRANT SELECT, INSERT, UPDATE, DELETE ON object_store_public.files TO service_role; +GRANT SELECT, INSERT, UPDATE ON files_store_public.files TO authenticated; +GRANT SELECT, INSERT, UPDATE, DELETE ON files_store_public.files TO service_role; -COMMENT ON POLICY files_tenant_isolation ON object_store_public.files IS +COMMENT ON POLICY files_tenant_isolation ON files_store_public.files IS 'Every query is scoped to the current tenant via app.database_id session variable.'; -COMMENT ON POLICY files_visibility ON object_store_public.files IS +COMMENT ON POLICY files_visibility ON files_store_public.files IS 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; -COMMENT ON POLICY files_public_bucket_read ON object_store_public.files IS +COMMENT ON POLICY files_public_bucket_read ON files_store_public.files IS 'Allows unauthenticated reads on ready files in public buckets.'; -COMMENT ON POLICY files_admin_override ON object_store_public.files IS +COMMENT ON POLICY files_admin_override ON files_store_public.files IS 'Administrators can see and modify all files in the tenant regardless of status or creator.'; -- --------------------------------------------------------------------------- @@ -394,7 +394,7 @@ COMMENT ON POLICY files_admin_override ON object_store_public.files IS -- TG_ARGV[0] = column name (e.g. 'profile_picture') -- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') -CREATE OR REPLACE FUNCTION object_store_public.populate_file_back_reference() +CREATE OR REPLACE FUNCTION files_store_public.populate_file_back_reference() RETURNS trigger AS $$ DECLARE col_name text := TG_ARGV[0]; @@ -428,7 +428,7 @@ BEGIN base_key := regexp_replace(old_key, '_[^_]+$', ''); -- Mark old origin + all versions as deleting - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'deleting', status_reason = 'replaced by new file' WHERE database_id = db_id AND (key = old_key OR key LIKE base_key || '_%') @@ -441,7 +441,7 @@ BEGIN base_key := regexp_replace(new_key, '_[^_]+$', ''); -- Set back-reference on origin + all version rows - UPDATE object_store_public.files + UPDATE files_store_public.files SET source_table = table_name, source_column = col_name, source_id = NEW.id @@ -453,14 +453,14 @@ BEGIN END; $$ LANGUAGE plpgsql; -COMMENT ON FUNCTION object_store_public.populate_file_back_reference() IS +COMMENT ON FUNCTION files_store_public.populate_file_back_reference() IS 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; -- 7b. Generic trigger function: source row deletion -- -- When a domain row is deleted, mark all associated files as deleting. -CREATE OR REPLACE FUNCTION object_store_public.mark_files_deleting_on_source_delete() +CREATE OR REPLACE FUNCTION files_store_public.mark_files_deleting_on_source_delete() RETURNS trigger AS $$ DECLARE col_name text := TG_ARGV[0]; @@ -470,7 +470,7 @@ BEGIN db_id := current_setting('app.database_id')::integer; -- Mark all files for this source row + column as deleting - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'deleting', status_reason = 'source row deleted' WHERE database_id = db_id AND source_table = table_name @@ -482,7 +482,7 @@ BEGIN END; $$ LANGUAGE plpgsql; -COMMENT ON FUNCTION object_store_public.mark_files_deleting_on_source_delete() IS +COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; -- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns @@ -507,11 +507,11 @@ BEGIN EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref AFTER UPDATE OF profile_picture ON constructive_users_public.users FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete BEFORE DELETE ON constructive_users_public.users FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; ELSE RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; @@ -525,11 +525,11 @@ BEGIN EXECUTE 'CREATE TRIGGER app_levels_image_file_ref AFTER UPDATE OF image ON constructive_status_public.app_levels FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; EXECUTE 'CREATE TRIGGER app_levels_image_file_delete BEFORE DELETE ON constructive_status_public.app_levels FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; ELSE RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; @@ -543,38 +543,38 @@ BEGIN EXECUTE 'CREATE TRIGGER sites_og_image_file_ref AFTER UPDATE OF og_image ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_og_image_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref AFTER UPDATE OF apple_touch_icon ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_logo_file_ref AFTER UPDATE OF logo ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_logo_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_favicon_file_ref AFTER UPDATE OF favicon ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_favicon_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; ELSE RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; @@ -588,11 +588,11 @@ BEGIN EXECUTE 'CREATE TRIGGER apps_app_image_file_ref AFTER UPDATE OF app_image ON services_public.apps FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; EXECUTE 'CREATE TRIGGER apps_app_image_file_delete BEFORE DELETE ON services_public.apps FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; RAISE NOTICE 'Created triggers for services_public.apps.app_image'; ELSE RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; @@ -606,11 +606,11 @@ BEGIN EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref AFTER UPDATE OF og_image ON services_public.site_metadata FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete BEFORE DELETE ON services_public.site_metadata FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; ELSE RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; @@ -624,11 +624,11 @@ BEGIN EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref AFTER UPDATE OF upload ON db_migrate.migrate_files FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete BEFORE DELETE ON db_migrate.migrate_files FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; ELSE RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; From f5ddd9adcfa016350fbccb466aaa5cb18ccb64b2 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 13:26:40 +0800 Subject: [PATCH 06/15] app jobs fix --- migrations/__tests__/app-jobs-stub.sql | 20 ++++++++++++ .../__tests__/object-store-lifecycle.test.ts | 3 +- migrations/__tests__/object-store-rls.test.ts | 3 +- migrations/files_store.sql | 32 ++++++++----------- migrations/object_store.sql | 32 ++++++++----------- 5 files changed, 50 insertions(+), 40 deletions(-) create mode 100644 migrations/__tests__/app-jobs-stub.sql diff --git a/migrations/__tests__/app-jobs-stub.sql b/migrations/__tests__/app-jobs-stub.sql new file mode 100644 index 000000000..29989606c --- /dev/null +++ b/migrations/__tests__/app-jobs-stub.sql @@ -0,0 +1,20 @@ +-- Test-only stub for app_jobs.add_job. +-- In production, this is provided by pgpm-database-jobs. +-- This file must be loaded BEFORE files_store.sql in test seeds. + +CREATE SCHEMA IF NOT EXISTS app_jobs; + +CREATE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL +) RETURNS void AS $$ +BEGIN + RAISE NOTICE '[TEST STUB] app_jobs.add_job: % %', identifier, payload; +END; +$$ LANGUAGE plpgsql; diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts index e18cf0aad..49f91a01c 100644 --- a/migrations/__tests__/object-store-lifecycle.test.ts +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -4,6 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; +const APP_JOBS_STUB_PATH = resolve(__dirname, 'app-jobs-stub.sql'); const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -44,7 +45,7 @@ async function clearJobLog() { beforeAll(async () => { ({ pg, teardown } = await getConnections( {}, - [seed.sqlfile([MIGRATION_PATH])] + [seed.sqlfile([APP_JOBS_STUB_PATH, MIGRATION_PATH])] )); // Ensure anonymous role exists diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts index 62b7ed40b..f06df2883 100644 --- a/migrations/__tests__/object-store-rls.test.ts +++ b/migrations/__tests__/object-store-rls.test.ts @@ -4,6 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; +const APP_JOBS_STUB_PATH = resolve(__dirname, 'app-jobs-stub.sql'); const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -75,7 +76,7 @@ async function insertFixtures() { beforeAll(async () => { ({ pg, teardown } = await getConnections( {}, - [seed.sqlfile([MIGRATION_PATH])] + [seed.sqlfile([APP_JOBS_STUB_PATH, MIGRATION_PATH])] )); // Ensure anonymous role exists (cluster-wide, idempotent) diff --git a/migrations/files_store.sql b/migrations/files_store.sql index 169ec8f99..c3f522200 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -16,27 +16,21 @@ DO $$ BEGIN END IF; END $$; --- Ensure app_jobs schema + stub add_job exist (required by trigger functions). --- In production, app_jobs is deployed by the database-jobs pgpm module. --- This stub is a no-op that prevents trigger creation from failing in dev. +-- Require app_jobs.add_job to exist (provided by pgpm-database-jobs). +-- Deploy pgpm-database-jobs BEFORE running this migration. +-- DO NOT stub this function here -- CREATE OR REPLACE would silently overwrite +-- the production implementation, causing all trigger-enqueued jobs to be lost. CREATE SCHEMA IF NOT EXISTS app_jobs; -CREATE OR REPLACE FUNCTION app_jobs.add_job( - identifier text, - payload json DEFAULT '{}'::json, - queue_name text DEFAULT NULL, - run_at timestamptz DEFAULT NULL, - max_attempts integer DEFAULT NULL, - job_key text DEFAULT NULL, - priority integer DEFAULT NULL, - flags text[] DEFAULT NULL -) RETURNS void AS $$ -BEGIN - -- Stub: in production this is provided by database-jobs pgpm module. - -- In dev, jobs are enqueued but not processed unless the job worker is running. - RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; -END; -$$ LANGUAGE plpgsql; +DO $$ BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_proc p + JOIN pg_namespace n ON p.pronamespace = n.oid + WHERE n.nspname = 'app_jobs' AND p.proname = 'add_job' + ) THEN + RAISE EXCEPTION 'app_jobs.add_job not found. Deploy pgpm-database-jobs before running this migration.'; + END IF; +END $$; -- Ensure schema exists CREATE SCHEMA IF NOT EXISTS files_store_public; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index 169ec8f99..c3f522200 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -16,27 +16,21 @@ DO $$ BEGIN END IF; END $$; --- Ensure app_jobs schema + stub add_job exist (required by trigger functions). --- In production, app_jobs is deployed by the database-jobs pgpm module. --- This stub is a no-op that prevents trigger creation from failing in dev. +-- Require app_jobs.add_job to exist (provided by pgpm-database-jobs). +-- Deploy pgpm-database-jobs BEFORE running this migration. +-- DO NOT stub this function here -- CREATE OR REPLACE would silently overwrite +-- the production implementation, causing all trigger-enqueued jobs to be lost. CREATE SCHEMA IF NOT EXISTS app_jobs; -CREATE OR REPLACE FUNCTION app_jobs.add_job( - identifier text, - payload json DEFAULT '{}'::json, - queue_name text DEFAULT NULL, - run_at timestamptz DEFAULT NULL, - max_attempts integer DEFAULT NULL, - job_key text DEFAULT NULL, - priority integer DEFAULT NULL, - flags text[] DEFAULT NULL -) RETURNS void AS $$ -BEGIN - -- Stub: in production this is provided by database-jobs pgpm module. - -- In dev, jobs are enqueued but not processed unless the job worker is running. - RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; -END; -$$ LANGUAGE plpgsql; +DO $$ BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_proc p + JOIN pg_namespace n ON p.pronamespace = n.oid + WHERE n.nspname = 'app_jobs' AND p.proname = 'add_job' + ) THEN + RAISE EXCEPTION 'app_jobs.add_job not found. Deploy pgpm-database-jobs before running this migration.'; + END IF; +END $$; -- Ensure schema exists CREATE SCHEMA IF NOT EXISTS files_store_public; From f6a70413639cfe3259bf290cec62c653a1357bb5 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 13:38:55 +0800 Subject: [PATCH 07/15] Job identifier mismatch --- migrations/__tests__/object-store-lifecycle.test.ts | 8 ++++---- migrations/files_store.sql | 6 +++--- migrations/object_store.sql | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts index 49f91a01c..83d827c5f 100644 --- a/migrations/__tests__/object-store-lifecycle.test.ts +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -429,7 +429,7 @@ describe('E2E-03: Deletion Flow', () => { await pg.afterEach(); }); - it('ready → deleting queues delete_s3_object job', async () => { + it('ready → deleting queues delete-s3-object job', async () => { await pg.query(` INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'ready') @@ -443,7 +443,7 @@ describe('E2E-03: Deletion Flow', () => { const jobs = await getJobLog(); expect(jobs).toHaveLength(1); - expect(jobs[0].identifier).toBe('delete_s3_object'); + expect(jobs[0].identifier).toBe('delete-s3-object'); expect(jobs[0].job_key).toBe(`delete:${ORIGIN_ID}`); expect(jobs[0].payload.key).toBe(ORIGIN_KEY); }); @@ -489,7 +489,7 @@ describe('E2E-03: Deletion Flow', () => { const jobs = await getJobLog(); expect(jobs).toHaveLength(1); - expect(jobs[0].identifier).toBe('delete_s3_object'); + expect(jobs[0].identifier).toBe('delete-s3-object'); }); it('service_role can hard-DELETE after marking as deleting', async () => { @@ -848,7 +848,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { await pg.query('RESET ROLE'); jobs = await getJobLog(); expect(jobs).toHaveLength(3); - expect(jobs.every((j: any) => j.identifier === 'delete_s3_object')).toBe(true); + expect(jobs.every((j: any) => j.identifier === 'delete-s3-object')).toBe(true); const deletedKeys = jobs.map((j: any) => j.payload.key).sort(); expect(deletedKeys).toEqual([LARGE_KEY, ORIGIN_KEY, THUMB_KEY]); diff --git a/migrations/files_store.sql b/migrations/files_store.sql index c3f522200..76a6a8026 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -236,13 +236,13 @@ CREATE TRIGGER files_before_update_timestamp COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; --- 5c. AFTER UPDATE -- enqueue delete_s3_object job +-- 5c. AFTER UPDATE -- enqueue delete-s3-object job CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( - 'delete_s3_object', + 'delete-s3-object', json_build_object( 'file_id', NEW.id, 'database_id', NEW.database_id, @@ -261,7 +261,7 @@ CREATE TRIGGER files_after_update_queue_deletion EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry diff --git a/migrations/object_store.sql b/migrations/object_store.sql index c3f522200..76a6a8026 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -236,13 +236,13 @@ CREATE TRIGGER files_before_update_timestamp COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; --- 5c. AFTER UPDATE -- enqueue delete_s3_object job +-- 5c. AFTER UPDATE -- enqueue delete-s3-object job CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( - 'delete_s3_object', + 'delete-s3-object', json_build_object( 'file_id', NEW.id, 'database_id', NEW.database_id, @@ -261,7 +261,7 @@ CREATE TRIGGER files_after_update_queue_deletion EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry From 443a56bd16df6564f46ca62f87b68cb14b4ed2e7 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 14:03:00 +0800 Subject: [PATCH 08/15] Cron scheduling implemented --- migrations/__tests__/file-cleanup.test.ts | 370 ++++++++++++++++++++++ migrations/files_store.sql | 75 +++++ migrations/object_store.sql | 75 +++++ 3 files changed, 520 insertions(+) create mode 100644 migrations/__tests__/file-cleanup.test.ts diff --git a/migrations/__tests__/file-cleanup.test.ts b/migrations/__tests__/file-cleanup.test.ts new file mode 100644 index 000000000..95502ac47 --- /dev/null +++ b/migrations/__tests__/file-cleanup.test.ts @@ -0,0 +1,370 @@ +jest.setTimeout(60000); + +import { resolve } from 'path'; + +import { getConnections, PgTestClient, seed } from 'pgsql-test'; + +const APP_JOBS_STUB_PATH = resolve(__dirname, 'app-jobs-stub.sql'); +const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); + +const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; + +let pg: PgTestClient; +let teardown: () => Promise; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** Read all recorded jobs from the job_log table */ +async function getJobLog() { + const result = await pg.query( + 'SELECT identifier, payload, job_key FROM _test_job_log ORDER BY logged_at' + ); + return result.rows; +} + +async function clearJobLog() { + await pg.query('DELETE FROM _test_job_log'); +} + +// --------------------------------------------------------------------------- +// Setup +// --------------------------------------------------------------------------- + +beforeAll(async () => { + ({ pg, teardown } = await getConnections( + {}, + [seed.sqlfile([APP_JOBS_STUB_PATH, MIGRATION_PATH])] + )); + + // Ensure anonymous role exists + await pg.query(` + DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anonymous') THEN + CREATE ROLE anonymous NOLOGIN; + END IF; + END $$ + `); + + // Grants needed for isolated test + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO service_role'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO service_role'); + + // Replace the app_jobs.add_job stub with one that records calls + await pg.query(` + CREATE TABLE _test_job_log ( + logged_at timestamptz NOT NULL DEFAULT now(), + identifier text NOT NULL, + payload json, + job_key text + ) + `); + + await pg.query(` + CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL + ) RETURNS void AS $$ + BEGIN + INSERT INTO _test_job_log (identifier, payload, job_key) + VALUES (identifier, payload, job_key); + END; + $$ LANGUAGE plpgsql + `); + + await pg.query('GRANT USAGE ON SCHEMA app_jobs TO authenticated, service_role'); + await pg.query('GRANT EXECUTE ON FUNCTION app_jobs.add_job(text, json, text, timestamptz, integer, text, integer, text[]) TO authenticated, service_role'); + await pg.query('GRANT INSERT ON _test_job_log TO authenticated, service_role'); + + // Seed a default bucket + await pg.query(` + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) + VALUES (1, 'default', 'Default Bucket', false, '{}') + `); +}); + +afterAll(async () => { + await teardown(); +}); + +// ========================================================================== +// Cleanup-01: pending_reaper -- pending → error (valid transition) +// ========================================================================== + +describe('Cleanup-01: pending_reaper', () => { + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('marks stale pending files as error', async () => { + // Insert a pending file with created_at older than 24 hours + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at) + VALUES + ('c1000000-0000-0000-0000-000000000001', 1, '1/default/stale_pending', 'default', $1, 'etag1', 'pending', now() - interval '25 hours') + `, [USER_A]); + await clearJobLog(); + + // Run the cleanup query directly (simulates what the handler does) + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'error', status_reason = 'upload timeout' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'pending' AND created_at < now() - interval '24 hours' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(1); + + // Verify the file is now in error status + const file = await pg.query( + "SELECT status, status_reason FROM files_store_public.files WHERE id = 'c1000000-0000-0000-0000-000000000001'" + ); + expect(file.rows[0].status).toBe('error'); + expect(file.rows[0].status_reason).toBe('upload timeout'); + }); + + it('does not affect recent pending files', async () => { + // Insert a pending file with recent created_at + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ('c1000000-0000-0000-0000-000000000002', 1, '1/default/recent_pending', 'default', $1, 'etag2', 'pending') + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'error', status_reason = 'upload timeout' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'pending' AND created_at < now() - interval '24 hours' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + + // File should still be pending + const file = await pg.query( + "SELECT status FROM files_store_public.files WHERE id = 'c1000000-0000-0000-0000-000000000002'" + ); + expect(file.rows[0].status).toBe('pending'); + }); +}); + +// ========================================================================== +// Cleanup-02: error_cleanup -- error → deleting (valid transition) +// ========================================================================== + +describe('Cleanup-02: error_cleanup', () => { + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('marks old error files as deleting', async () => { + // Insert an error file with updated_at older than 30 days + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, updated_at) + VALUES + ('c2000000-0000-0000-0000-000000000001', 1, '1/default/old_error', 'default', $1, 'etag1', 'error', now() - interval '31 days') + `, [USER_A]); + await clearJobLog(); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'expired error' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'error' AND updated_at < now() - interval '30 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(1); + + const file = await pg.query( + "SELECT status, status_reason FROM files_store_public.files WHERE id = 'c2000000-0000-0000-0000-000000000001'" + ); + expect(file.rows[0].status).toBe('deleting'); + expect(file.rows[0].status_reason).toBe('expired error'); + + // Verify the delete-s3-object job was auto-enqueued by the trigger + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete-s3-object'); + }); + + it('does not affect recent error files', async () => { + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ('c2000000-0000-0000-0000-000000000002', 1, '1/default/recent_error', 'default', $1, 'etag2', 'error') + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'expired error' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'error' AND updated_at < now() - interval '30 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// Cleanup-03: unattached_cleanup -- ready → deleting (valid transition) +// This is the ISSUE-006 fix regression test. +// ========================================================================== + +describe('Cleanup-03: unattached_cleanup', () => { + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('marks unattached ready files as deleting (not error)', async () => { + // Insert a ready file with no source_table, older than 7 days + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at) + VALUES + ('c3000000-0000-0000-0000-000000000001', 1, '1/default/unattached', 'default', $1, 'etag1', 'ready', now() - interval '8 days') + `, [USER_A]); + await clearJobLog(); + + // Run the FIXED cleanup query (ready → deleting, NOT ready → error) + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(1); + + const file = await pg.query( + "SELECT status, status_reason FROM files_store_public.files WHERE id = 'c3000000-0000-0000-0000-000000000001'" + ); + expect(file.rows[0].status).toBe('deleting'); + expect(file.rows[0].status_reason).toBe('never attached'); + + // Verify the delete-s3-object job was auto-enqueued + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete-s3-object'); + }); + + it('ready → error is rejected by state machine (regression for ISSUE-006)', async () => { + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at) + VALUES + ('c3000000-0000-0000-0000-000000000002', 1, '1/default/unattached2', 'default', $1, 'etag2', 'ready', now() - interval '8 days') + `, [USER_A]); + + // The OLD buggy query (ready → error) should be rejected + await expect( + pg.query(` + UPDATE files_store_public.files + SET status = 'error', status_reason = 'never attached' + WHERE id = 'c3000000-0000-0000-0000-000000000002' + `) + ).rejects.toThrow(/Invalid status transition from ready to error/); + }); + + it('does not affect attached ready files', async () => { + // Insert a ready file WITH source_table (attached) + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at, + source_table, source_column, source_id) + VALUES + ('c3000000-0000-0000-0000-000000000003', 1, '1/default/attached', 'default', $1, 'etag3', 'ready', + now() - interval '8 days', 'some_schema.some_table', 'image', gen_random_uuid()) + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + }); + + it('does not affect recent unattached files', async () => { + // Insert a ready file with no source_table but recent created_at + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ('c3000000-0000-0000-0000-000000000004', 1, '1/default/recent_unattached', 'default', $1, 'etag4', 'ready') + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// Cleanup-04: Scheduled job registration +// ========================================================================== + +describe('Cleanup-04: Scheduled job registration', () => { + it('migration registers file-cleanup scheduled jobs when metaschema is present', async () => { + // The migration's cron block looks up metaschema_public.database. + // In isolated test DBs this table doesn't exist, so scheduled jobs + // are not registered (the block skips silently). This test verifies + // the skip path doesn't error. + // + // To test actual registration, we'd need to deploy metaschema first. + // Instead, we verify the schedule SQL is syntactically valid by checking + // it didn't abort the migration transaction. + const result = await pg.query( + "SELECT COUNT(*) as cnt FROM files_store_public.files WHERE 1=0" + ); + // If migration committed successfully, table exists + expect(result.rows[0].cnt).toBe('0'); + }); +}); diff --git a/migrations/files_store.sql b/migrations/files_store.sql index 76a6a8026..d98a2c042 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -630,4 +630,79 @@ BEGIN END $domain_triggers$; +-- --------------------------------------------------------------------------- +-- 8. Scheduled cleanup jobs (requires pgpm-database-jobs with scheduling) +-- --------------------------------------------------------------------------- +-- Register recurring file-cleanup jobs via app_jobs.add_scheduled_job. +-- The scheduler (knative-job-service) picks these up and spawns one-shot jobs +-- on the configured schedule. Each job calls the file-cleanup function with +-- the appropriate cleanup type. +-- +-- Schedules: +-- pending_reaper: every hour (clear stale pending uploads) +-- error_cleanup: daily at 03:00 UTC (expire old error files) +-- unattached_cleanup: daily at 04:00 UTC (clean unattached ready files) +-- --------------------------------------------------------------------------- + +DO $cron$ +DECLARE + v_db_id uuid; +BEGIN + -- Look up the database ID for the current database. + -- If metaschema_public.database is not deployed yet, skip silently. + BEGIN + SELECT id INTO v_db_id + FROM metaschema_public.database + ORDER BY created_at + LIMIT 1; + EXCEPTION WHEN undefined_table THEN + RAISE NOTICE 'metaschema_public.database not found, skipping scheduled job registration.'; + RETURN; + END; + + IF v_db_id IS NULL THEN + RAISE NOTICE 'No database row found, skipping scheduled job registration.'; + RETURN; + END IF; + + -- pending_reaper: every hour (minute 0) + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"pending_reaper"}'::json, + schedule_info := '{"minute": 0}'::json, + job_key := 'file-cleanup:pending_reaper', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- error_cleanup: daily at 03:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"error_cleanup"}'::json, + schedule_info := '{"hour": 3, "minute": 0}'::json, + job_key := 'file-cleanup:error_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- unattached_cleanup: daily at 04:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"unattached_cleanup"}'::json, + schedule_info := '{"hour": 4, "minute": 0}'::json, + job_key := 'file-cleanup:unattached_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + RAISE NOTICE 'Registered 3 file-cleanup scheduled jobs for database %', v_db_id; +END +$cron$; + COMMIT; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index 76a6a8026..d98a2c042 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -630,4 +630,79 @@ BEGIN END $domain_triggers$; +-- --------------------------------------------------------------------------- +-- 8. Scheduled cleanup jobs (requires pgpm-database-jobs with scheduling) +-- --------------------------------------------------------------------------- +-- Register recurring file-cleanup jobs via app_jobs.add_scheduled_job. +-- The scheduler (knative-job-service) picks these up and spawns one-shot jobs +-- on the configured schedule. Each job calls the file-cleanup function with +-- the appropriate cleanup type. +-- +-- Schedules: +-- pending_reaper: every hour (clear stale pending uploads) +-- error_cleanup: daily at 03:00 UTC (expire old error files) +-- unattached_cleanup: daily at 04:00 UTC (clean unattached ready files) +-- --------------------------------------------------------------------------- + +DO $cron$ +DECLARE + v_db_id uuid; +BEGIN + -- Look up the database ID for the current database. + -- If metaschema_public.database is not deployed yet, skip silently. + BEGIN + SELECT id INTO v_db_id + FROM metaschema_public.database + ORDER BY created_at + LIMIT 1; + EXCEPTION WHEN undefined_table THEN + RAISE NOTICE 'metaschema_public.database not found, skipping scheduled job registration.'; + RETURN; + END; + + IF v_db_id IS NULL THEN + RAISE NOTICE 'No database row found, skipping scheduled job registration.'; + RETURN; + END IF; + + -- pending_reaper: every hour (minute 0) + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"pending_reaper"}'::json, + schedule_info := '{"minute": 0}'::json, + job_key := 'file-cleanup:pending_reaper', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- error_cleanup: daily at 03:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"error_cleanup"}'::json, + schedule_info := '{"hour": 3, "minute": 0}'::json, + job_key := 'file-cleanup:error_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- unattached_cleanup: daily at 04:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"unattached_cleanup"}'::json, + schedule_info := '{"hour": 4, "minute": 0}'::json, + job_key := 'file-cleanup:unattached_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + RAISE NOTICE 'Registered 3 file-cleanup scheduled jobs for database %', v_db_id; +END +$cron$; + COMMIT; From 3a80ebcd0c33b29b3eed63591c092117a61c5b4a Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 14:33:07 +0800 Subject: [PATCH 09/15] remove missleading flag --- .../__tests__/upload-resolver.e2e.test.ts | 1 - .../__tests__/upload-resolver.test.ts | 121 +-- .../graphile-settings/src/upload-resolver.ts | 159 +--- graphql/explorer/src/resolvers/uploads.ts | 32 +- migrations/object_store.sql | 708 ------------------ uploads/s3-streamer/src/index.ts | 1 + 6 files changed, 129 insertions(+), 893 deletions(-) delete mode 100644 migrations/object_store.sql diff --git a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts index a06dd603e..e9b6c30fd 100644 --- a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts +++ b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts @@ -101,7 +101,6 @@ describe('upload-resolver e2e', () => { const uploadedKeys = new Set(); beforeAll(async () => { - process.env.UPLOAD_V2_ENABLED = 'true'; process.env.BUCKET_PROVIDER = 'minio'; process.env.BUCKET_NAME = BUCKET; process.env.AWS_REGION = 'us-east-1'; diff --git a/graphile/graphile-settings/__tests__/upload-resolver.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.test.ts index d7f0d1caa..349b4a36f 100644 --- a/graphile/graphile-settings/__tests__/upload-resolver.test.ts +++ b/graphile/graphile-settings/__tests__/upload-resolver.test.ts @@ -1,31 +1,29 @@ import { Readable } from 'stream'; -interface MockUploadResult { - upload: { Location: string }; - contentType: string; -} - async function loadUploadResolverModule(opts: { detectedContentType: string; - uploadResultContentType?: string; }) { jest.resetModules(); - const mockDetectContentType = jest.fn().mockResolvedValue({ + const mockStreamContentType = jest.fn().mockResolvedValue({ stream: Readable.from([Buffer.alloc(16)]), magic: { type: opts.detectedContentType, charset: 'binary' }, contentType: opts.detectedContentType, }); - const mockUploadWithContentType = jest.fn().mockResolvedValue({ - upload: { Location: 'https://cdn.example.com/uploaded-file' }, - contentType: opts.uploadResultContentType ?? opts.detectedContentType, - } as MockUploadResult); + const mockUpload = jest.fn().mockResolvedValue({ etag: 'test-etag' }); + const mockPresignGet = jest.fn().mockResolvedValue('https://cdn.example.com/signed-url'); - const mockUpload = jest.fn().mockResolvedValue({ - upload: { Location: 'https://cdn.example.com/storage-upload' }, - contentType: 'application/octet-stream', - } as MockUploadResult); + const MockS3StorageProvider = jest.fn().mockImplementation(() => ({ + upload: mockUpload, + presignGet: mockPresignGet, + })); + + const mockPoolQuery = jest.fn().mockResolvedValue({ rows: [], rowCount: 0 }); + const MockPool = jest.fn().mockImplementation(() => ({ + query: mockPoolQuery, + end: jest.fn(), + })); jest.doMock('@constructive-io/graphql-env', () => ({ getEnvOptions: jest.fn(() => ({ @@ -40,25 +38,24 @@ async function loadUploadResolverModule(opts: { })), })); - jest.doMock('@constructive-io/s3-streamer', () => { - const StreamerMock = jest.fn().mockImplementation(() => ({ - upload: mockUpload, - uploadWithContentType: mockUploadWithContentType, - detectContentType: mockDetectContentType, - })); - return { - __esModule: true, - default: StreamerMock, - }; - }); + jest.doMock('@constructive-io/s3-streamer', () => ({ + __esModule: true, + S3StorageProvider: MockS3StorageProvider, + streamContentType: mockStreamContentType, + })); + + jest.doMock('pg', () => ({ + Pool: MockPool, + })); const mod = await import('../src/upload-resolver'); return { ...mod, - mockDetectContentType, - mockUploadWithContentType, + mockStreamContentType, mockUpload, + mockPresignGet, + mockPoolQuery, }; } @@ -69,12 +66,21 @@ function makeFakeUpload(filename: string) { }; } +function makeFakeContext(databaseId?: string, userId?: string) { + return { + req: { + api: { databaseId }, + token: { user_id: userId }, + }, + }; +} + describe('uploadResolver MIME validation', () => { it('rejects disallowed MIME before uploading to storage', async () => { const { constructiveUploadFieldDefinitions, - mockDetectContentType, - mockUploadWithContentType, + mockStreamContentType, + mockUpload, } = await loadUploadResolverModule({ detectedContentType: 'application/pdf', }); @@ -92,23 +98,23 @@ describe('uploadResolver MIME validation', () => { imageDef.resolve( fakeUpload as any, {}, - {}, + makeFakeContext('1'), { uploadPlugin: { tags: {}, type: 'image' } }, ), ).rejects.toThrow('UPLOAD_MIMETYPE'); - expect(mockDetectContentType).toHaveBeenCalledTimes(1); - expect(mockUploadWithContentType).not.toHaveBeenCalled(); + expect(mockStreamContentType).toHaveBeenCalledTimes(1); + expect(mockUpload).not.toHaveBeenCalled(); }); it('uploads and returns image metadata when MIME is allowed', async () => { const { constructiveUploadFieldDefinitions, - mockDetectContentType, - mockUploadWithContentType, + mockStreamContentType, + mockUpload, + mockPresignGet, } = await loadUploadResolverModule({ detectedContentType: 'image/png', - uploadResultContentType: 'image/png', }); const imageDef = constructiveUploadFieldDefinitions.find( @@ -123,21 +129,44 @@ describe('uploadResolver MIME validation', () => { const result = await imageDef.resolve( fakeUpload as any, {}, - {}, + makeFakeContext('1', 'user-123'), { uploadPlugin: { tags: {}, type: 'image' } }, ); - expect(result).toEqual({ - filename: 'photo.png', - mime: 'image/png', - url: 'https://cdn.example.com/uploaded-file', - }); - expect(mockDetectContentType).toHaveBeenCalledTimes(1); - expect(mockUploadWithContentType).toHaveBeenCalledTimes(1); - expect(mockUploadWithContentType).toHaveBeenCalledWith( + expect(result).toEqual( expect.objectContaining({ - contentType: 'image/png', + filename: 'photo.png', + mime: 'image/png', + url: 'https://cdn.example.com/signed-url', + key: expect.stringMatching(/^1\/default\/[0-9a-f-]+_origin$/), }), ); + expect(mockStreamContentType).toHaveBeenCalledTimes(1); + expect(mockUpload).toHaveBeenCalledTimes(1); + expect(mockPresignGet).toHaveBeenCalledTimes(1); + }); + + it('throws when databaseId is missing', async () => { + const { constructiveUploadFieldDefinitions } = await loadUploadResolverModule({ + detectedContentType: 'image/png', + }); + + const imageDef = constructiveUploadFieldDefinitions.find( + (def) => 'name' in def && def.name === 'image', + ); + if (!imageDef) { + throw new Error('Missing image upload field definition'); + } + + const fakeUpload = makeFakeUpload('photo.png'); + + await expect( + imageDef.resolve( + fakeUpload as any, + {}, + {}, // no databaseId + { uploadPlugin: { tags: {}, type: 'image' } }, + ), + ).rejects.toThrow('databaseId is required'); }); }); diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index 2e5d08647..a60a1704b 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -4,18 +4,16 @@ * Reads CDN/S3/MinIO configuration from environment variables (via getEnvOptions) * and streams uploaded files to the configured storage backend. * - * Lazily initializes the S3 streamer on first upload to avoid requiring + * Lazily initializes the S3 storage provider on first upload to avoid requiring * env vars at module load time. * - * V2 mode (UPLOAD_V2_ENABLED=true): - * - Key format: {database_id}/{bucket_key}/{uuid}_origin - * - INSERT into files_store_public.files after S3 upload - * - Returns { key, url, mime, filename } for image/upload types + * Key format: {database_id}/{bucket_key}/{uuid}_origin + * INSERTs into files_store_public.files after S3 upload. + * The AFTER INSERT trigger enqueues a process-image job automatically. * - * Legacy mode (UPLOAD_V2_ENABLED=false, default): - * - Key format: {random24hex}-{sanitized-filename} - * - No files table INSERT - * - Returns { url, mime, filename } for image/upload types + * Callers must associate the returned metadata with a domain table row via a + * GraphQL mutation; the domain trigger automatically populates source_* fields; + * files not associated within 7 days are cleaned up by unattached_cleanup cron. * * ENV VARS: * BUCKET_PROVIDER - 'minio' | 's3' (default: 'minio') @@ -24,16 +22,14 @@ * AWS_ACCESS_KEY - access key (default: 'minioadmin') * AWS_SECRET_KEY - secret key (default: 'minioadmin') * MINIO_ENDPOINT - MinIO endpoint (default: 'http://localhost:9000') - * UPLOAD_V2_ENABLED - enable v2 upload with files index (default: 'false') */ -import Streamer from '@constructive-io/s3-streamer'; -import { S3StorageProvider } from '@constructive-io/s3-streamer'; +import { S3StorageProvider, streamContentType } from '@constructive-io/s3-streamer'; import type { StorageProvider } from '@constructive-io/s3-streamer'; import uploadNames from '@constructive-io/upload-names'; import { getEnvOptions } from '@constructive-io/graphql-env'; import { Logger } from '@pgpmjs/logger'; -import { randomBytes, randomUUID } from 'crypto'; +import { randomUUID } from 'crypto'; import { Pool } from 'pg'; import type { Readable } from 'stream'; import type { @@ -45,14 +41,10 @@ import type { const log = new Logger('upload-resolver'); const DEFAULT_IMAGE_MIME_TYPES = ['image/jpeg', 'image/png', 'image/svg+xml']; -let streamer: Streamer | null = null; let storageProvider: StorageProvider | null = null; let bucketName: string; let pgPool: Pool | null = null; -const isV2Enabled = (): boolean => - process.env.UPLOAD_V2_ENABLED === 'true' || process.env.UPLOAD_V2_ENABLED === '1'; - function getCdnConfig() { const opts = getEnvOptions(); const cdn = opts.cdn || {}; @@ -66,8 +58,8 @@ function getCdnConfig() { }; } -function getStreamer(): Streamer { - if (streamer) return streamer; +function getStorageProvider(): StorageProvider { + if (storageProvider) return storageProvider; const cdn = getCdnConfig(); bucketName = cdn.bucketName; @@ -82,24 +74,6 @@ function getStreamer(): Streamer { `[upload-resolver] Initializing: provider=${cdn.provider} bucket=${bucketName}`, ); - streamer = new Streamer({ - defaultBucket: bucketName, - awsRegion: cdn.awsRegion, - awsSecretKey: cdn.awsSecretKey, - awsAccessKey: cdn.awsAccessKey, - minioEndpoint: cdn.minioEndpoint, - provider: cdn.provider, - }); - - return streamer; -} - -function getStorageProvider(): StorageProvider { - if (storageProvider) return storageProvider; - - const cdn = getCdnConfig(); - bucketName = cdn.bucketName; - storageProvider = new S3StorageProvider({ bucket: cdn.bucketName, awsRegion: cdn.awsRegion, @@ -125,15 +99,6 @@ function getPgPool(): Pool { return pgPool; } -/** - * Generates a randomized storage key from a filename (legacy format). - * Format: {random24hex}-{sanitized-filename} - */ -function generateLegacyKey(filename: string): string { - const rand = randomBytes(12).toString('hex'); - return `${rand}-${uploadNames(filename)}`; -} - /** * Generates a v2 storage key. * Format: {database_id}/{bucket_key}/{uuid}_origin @@ -169,7 +134,6 @@ async function insertFileRecord( * In PostGraphile, context contains the Express request. */ function extractContextInfo(context: any): { databaseId: string | null; userId: string | null } { - // PostGraphile v5 stores the request on context const req = context?.req || context?.request; const databaseId = req?.api?.databaseId || req?.databaseId || null; const userId = req?.token?.user_id || null; @@ -180,53 +144,34 @@ function extractContextInfo(context: any): { databaseId: string | null; userId: * Streams a file to S3/MinIO storage and returns the URL and metadata. * * Reusable by both the GraphQL upload resolver and REST /upload endpoint. - * - * When UPLOAD_V2_ENABLED, uses the new key format and INSERTs a files row. */ export async function streamToStorage( readStream: Readable, filename: string, opts?: { databaseId?: string; userId?: string; bucketKey?: string }, ): Promise<{ url: string; filename: string; mime: string; key?: string }> { - if (isV2Enabled() && opts?.databaseId) { - const storage = getStorageProvider(); - const bucketKey = opts.bucketKey || 'default'; - const { key, fileId } = generateV2Key(opts.databaseId, bucketKey); + const storage = getStorageProvider(); + const bucketKey = opts?.bucketKey || 'default'; + const databaseId = opts?.databaseId; - const s3 = getStreamer(); - const detected = await s3.detectContentType({ readStream, filename }); - const contentType = detected.contentType; + if (!databaseId) { + throw new Error('[upload-resolver] databaseId is required for file uploads'); + } - const result = await storage.upload(key, detected.stream, { contentType }); + const { key, fileId } = generateV2Key(databaseId, bucketKey); - await insertFileRecord(fileId, opts.databaseId, bucketKey, key, result.etag, opts.userId || null); + const detected = await streamContentType({ readStream, filename }); + const contentType = detected.contentType; - const url = await storage.presignGet(key, 3600); - return { key, url, filename, mime: contentType }; - } + const result = await storage.upload(key, detected.stream, { contentType }); - // Legacy path - const s3 = getStreamer(); - const key = generateLegacyKey(filename); - const uploadResult = await s3.upload({ - readStream, - filename, - key, - bucket: bucketName, - }); - return { - url: uploadResult.upload.Location, - filename, - mime: uploadResult.contentType, - }; + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, opts?.userId || null); + + const url = await storage.presignGet(key, 3600); + return { key, url, filename, mime: contentType }; } export async function __resetUploadResolverForTests(): Promise { - if (streamer && typeof (streamer as { destroy?: () => void }).destroy === 'function') { - streamer.destroy(); - } - streamer = null; - if ( storageProvider && typeof (storageProvider as StorageProvider & { destroy?: () => void }).destroy === 'function' @@ -245,7 +190,7 @@ export async function __resetUploadResolverForTests(): Promise { * Upload resolver that streams files to S3/MinIO. * * Returns different shapes based on the column's type hint: - * - 'image' / 'upload' → { key, url, mime, filename } (v2) or { url, mime, filename } (legacy) + * - 'image' / 'upload' → { key, url, mime, filename } * - 'attachment' / default → url string (for text domain columns) * * MIME validation happens before persistence: content type is detected from @@ -258,7 +203,6 @@ async function uploadResolver( info: { uploadPlugin: UploadPluginInfo }, ): Promise { const { tags, type } = info.uploadPlugin; - const s3 = getStreamer(); const { filename } = upload; // MIME type validation from smart tags @@ -274,7 +218,7 @@ async function uploadResolver( ? DEFAULT_IMAGE_MIME_TYPES : []; - const detected = await s3.detectContentType({ + const detected = await streamContentType({ readStream: upload.createReadStream(), filename, }); @@ -285,54 +229,29 @@ async function uploadResolver( throw new Error('UPLOAD_MIMETYPE'); } - // V2 path: new key format + files table INSERT - if (isV2Enabled()) { - const { databaseId, userId } = extractContextInfo(_context); - - if (databaseId) { - const storage = getStorageProvider(); - const bucketKey = 'default'; - const { key, fileId } = generateV2Key(databaseId, bucketKey); - - const result = await storage.upload(key, detected.stream, { - contentType: detectedContentType, - }); - - await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + const { databaseId, userId } = extractContextInfo(_context); - const url = await storage.presignGet(key, 3600); - - switch (typ) { - case 'image': - case 'upload': - return { key, filename, mime: detectedContentType, url }; - case 'attachment': - default: - return url; - } - } - - log.warn('[upload-resolver] V2 enabled but no databaseId in context, falling back to legacy'); + if (!databaseId) { + detected.stream.destroy(); + throw new Error('[upload-resolver] databaseId is required for file uploads'); } - // Legacy path - const key = generateLegacyKey(filename); + const storage = getStorageProvider(); + const bucketKey = 'default'; + const { key, fileId } = generateV2Key(databaseId, bucketKey); - const result = await s3.uploadWithContentType({ - readStream: detected.stream, + const result = await storage.upload(key, detected.stream, { contentType: detectedContentType, - magic: detected.magic, - key, - bucket: bucketName, }); - const url = result.upload.Location; - const { contentType } = result; + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + + const url = await storage.presignGet(key, 3600); switch (typ) { case 'image': case 'upload': - return { filename, mime: contentType, url }; + return { key, filename, mime: detectedContentType, url }; case 'attachment': default: return url; diff --git a/graphql/explorer/src/resolvers/uploads.ts b/graphql/explorer/src/resolvers/uploads.ts index b06aed646..1561c7b71 100644 --- a/graphql/explorer/src/resolvers/uploads.ts +++ b/graphql/explorer/src/resolvers/uploads.ts @@ -1,4 +1,4 @@ -import Streamer from '@constructive-io/s3-streamer'; +import { S3StorageProvider, streamContentType } from '@constructive-io/s3-streamer'; import uploadNames from '@constructive-io/upload-names'; import { ReadStream } from 'fs'; import type { GraphQLResolveInfo } from 'graphql'; @@ -26,16 +26,18 @@ interface UploadPluginInfo { } export class UploadHandler { - private streamer: Streamer; + private storage: S3StorageProvider; + private bucketName: string; constructor(private options: UploaderOptions) { - this.streamer = new Streamer({ - defaultBucket: options.bucketName, + this.bucketName = options.bucketName; + this.storage = new S3StorageProvider({ + bucket: options.bucketName, awsRegion: options.awsRegion, awsSecretKey: options.awsSecretKey, awsAccessKey: options.awsAccessKey, minioEndpoint: options.minioEndpoint, - provider: options.provider + provider: options.provider, }); } @@ -50,25 +52,15 @@ export class UploadHandler { } = info; const readStream = upload.createReadStream() as ReadStream; - const { filename, mimetype } = upload; + const { filename } = upload; const rand = Math.random().toString(36).substring(2, 7) + Math.random().toString(36).substring(2, 7); const key = rand + '-' + uploadNames(filename); - const result = await this.streamer.upload({ - readStream, - filename, - key, - bucket: this.options.bucketName - }); - - const url = result.upload.Location; - const { - contentType, - magic: { charset } - } = result; + const detected = await streamContentType({ readStream, filename }); + const { contentType } = detected; const typ = type || tags.type; @@ -79,9 +71,13 @@ export class UploadHandler { : []; if (mim.length && !mim.includes(contentType)) { + detected.stream.destroy(); throw new Error(`UPLOAD_MIMETYPE ${mim.join(',')}`); } + await this.storage.upload(key, detected.stream, { contentType }); + const url = await this.storage.presignGet(key, 3600); + switch (typ) { case 'image': case 'upload': diff --git a/migrations/object_store.sql b/migrations/object_store.sql deleted file mode 100644 index d98a2c042..000000000 --- a/migrations/object_store.sql +++ /dev/null @@ -1,708 +0,0 @@ --- ============================================================================= --- Constructive Upload System -- files_store_public schema --- ============================================================================= --- Run: psql -h localhost -U postgres -d constructive < migrations/files_store.sql --- ============================================================================= - -BEGIN; - --- Ensure required roles exist (idempotent for dev environments) -DO $$ BEGIN - IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN - CREATE ROLE authenticated NOLOGIN; - END IF; - IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN - CREATE ROLE service_role NOLOGIN; - END IF; -END $$; - --- Require app_jobs.add_job to exist (provided by pgpm-database-jobs). --- Deploy pgpm-database-jobs BEFORE running this migration. --- DO NOT stub this function here -- CREATE OR REPLACE would silently overwrite --- the production implementation, causing all trigger-enqueued jobs to be lost. -CREATE SCHEMA IF NOT EXISTS app_jobs; - -DO $$ BEGIN - IF NOT EXISTS ( - SELECT 1 FROM pg_proc p - JOIN pg_namespace n ON p.pronamespace = n.oid - WHERE n.nspname = 'app_jobs' AND p.proname = 'add_job' - ) THEN - RAISE EXCEPTION 'app_jobs.add_job not found. Deploy pgpm-database-jobs before running this migration.'; - END IF; -END $$; - --- Ensure schema exists -CREATE SCHEMA IF NOT EXISTS files_store_public; - --- --------------------------------------------------------------------------- --- 1. Status ENUM --- --------------------------------------------------------------------------- - -CREATE TYPE files_store_public.file_status AS ENUM ( - 'pending', - 'processing', - 'ready', - 'error', - 'deleting' -); - -COMMENT ON TYPE files_store_public.file_status IS - 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; - --- --------------------------------------------------------------------------- --- 2. Files Table --- --------------------------------------------------------------------------- - -CREATE TABLE files_store_public.files ( - id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, - bucket_key text NOT NULL DEFAULT 'default', - key text NOT NULL, - status files_store_public.file_status NOT NULL DEFAULT 'pending', - status_reason text, - etag text, - source_table text, - source_column text, - source_id uuid, - processing_started_at timestamptz, - created_by uuid, - created_at timestamptz NOT NULL DEFAULT now(), - updated_at timestamptz NOT NULL DEFAULT now(), - - CONSTRAINT files_pkey PRIMARY KEY (id, database_id), - CONSTRAINT files_key_unique UNIQUE (key, database_id), - CONSTRAINT files_key_not_empty CHECK (key <> ''), - CONSTRAINT files_key_max_length CHECK (length(key) <= 1024), - CONSTRAINT files_bucket_key_format CHECK (bucket_key ~ '^[a-z][a-z0-9_-]*$'), - CONSTRAINT files_source_table_format CHECK ( - source_table IS NULL OR source_table ~ '^[a-z_]+\.[a-z_]+$' - ), - CONSTRAINT files_source_complete CHECK ( - (source_table IS NULL AND source_column IS NULL AND source_id IS NULL) - OR (source_table IS NOT NULL AND source_column IS NOT NULL AND source_id IS NOT NULL) - ) -); - -COMMENT ON TABLE files_store_public.files IS - 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; -COMMENT ON COLUMN files_store_public.files.key IS - 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; -COMMENT ON COLUMN files_store_public.files.etag IS - 'S3 ETag for reconciliation and cache validation.'; -COMMENT ON COLUMN files_store_public.files.status_reason IS - 'Human-readable reason for current status (error details, deletion reason).'; -COMMENT ON COLUMN files_store_public.files.processing_started_at IS - 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; -COMMENT ON COLUMN files_store_public.files.source_table IS - 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; -COMMENT ON COLUMN files_store_public.files.source_column IS - 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; -COMMENT ON COLUMN files_store_public.files.source_id IS - 'Primary key of the row in the source table. NULL until domain trigger populates it.'; - --- --------------------------------------------------------------------------- --- 3. Buckets Table --- --------------------------------------------------------------------------- - -CREATE TABLE files_store_public.buckets ( - id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, - key text NOT NULL, - name text NOT NULL, - is_public boolean NOT NULL DEFAULT false, - config jsonb NOT NULL DEFAULT '{}'::jsonb, - created_by uuid, - updated_by uuid, - created_at timestamptz NOT NULL DEFAULT now(), - updated_at timestamptz NOT NULL DEFAULT now(), - - CONSTRAINT buckets_pkey PRIMARY KEY (id, database_id), - CONSTRAINT buckets_key_unique UNIQUE (key, database_id), - CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') -); - -COMMENT ON TABLE files_store_public.buckets IS - 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; - --- --------------------------------------------------------------------------- --- 4. Indexes --- --------------------------------------------------------------------------- - --- Tenant queries -CREATE INDEX files_database_id_idx - ON files_store_public.files (database_id); - --- Bucket + tenant queries -CREATE INDEX files_bucket_database_id_idx - ON files_store_public.files (bucket_key, database_id); - --- "My uploads" queries -CREATE INDEX files_created_by_database_id_created_at_idx - ON files_store_public.files (created_by, database_id, created_at DESC); - --- Back-reference lookups (cleanup worker, attachment queries) -CREATE INDEX files_source_ref_idx - ON files_store_public.files (source_table, source_column, source_id); - --- Pending file reaper (hourly cron) -CREATE INDEX files_pending_created_at_idx - ON files_store_public.files (created_at) - WHERE status = 'pending'; - --- Stuck processing detection -CREATE INDEX files_processing_idx - ON files_store_public.files (processing_started_at) - WHERE status = 'processing'; - --- Deletion job queue -CREATE INDEX files_deleting_idx - ON files_store_public.files (updated_at) - WHERE status = 'deleting'; - --- Time-range scans on large tables -CREATE INDEX files_created_at_brin_idx - ON files_store_public.files USING brin (created_at); - --- --------------------------------------------------------------------------- --- 5. Triggers --- --------------------------------------------------------------------------- - --- 5a. AFTER INSERT -- enqueue process-image job --- NOTE: Version rows are inserted with status = 'ready', which intentionally --- bypasses this trigger (condition: NEW.status = 'pending'). Only origin --- uploads (status = 'pending') need processing. - -CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() -RETURNS trigger AS $$ -BEGIN - PERFORM app_jobs.add_job( - 'process-image', - json_build_object( - 'file_id', NEW.id, - 'database_id', NEW.database_id - ), - job_key := 'file:' || NEW.id::text - ); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_insert_queue_processing - AFTER INSERT ON files_store_public.files - FOR EACH ROW - WHEN (NEW.status = 'pending') - EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); - -COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS - 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; - --- 5b. BEFORE UPDATE -- timestamp + state machine - -CREATE OR REPLACE FUNCTION files_store_public.files_before_update_timestamp() -RETURNS trigger AS $$ -BEGIN - -- Always update timestamp - NEW.updated_at := now(); - - -- State machine validation (only when status changes) - IF OLD.status IS DISTINCT FROM NEW.status THEN - IF NOT ( - (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) - OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) - OR (OLD.status = 'ready' AND NEW.status = 'deleting') - OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) - ) THEN - RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; - END IF; - - -- Track processing start/end - IF NEW.status = 'processing' THEN - NEW.processing_started_at := now(); - ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN - NEW.processing_started_at := NULL; - END IF; - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_before_update_timestamp - BEFORE UPDATE ON files_store_public.files - FOR EACH ROW - EXECUTE FUNCTION files_store_public.files_before_update_timestamp(); - -COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS - 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; - --- 5c. AFTER UPDATE -- enqueue delete-s3-object job - -CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() -RETURNS trigger AS $$ -BEGIN - PERFORM app_jobs.add_job( - 'delete-s3-object', - json_build_object( - 'file_id', NEW.id, - 'database_id', NEW.database_id, - 'key', NEW.key - ), - job_key := 'delete:' || NEW.id::text - ); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_update_queue_deletion - AFTER UPDATE ON files_store_public.files - FOR EACH ROW - WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') - EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); - -COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; - --- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry - -CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() -RETURNS trigger AS $$ -BEGIN - PERFORM app_jobs.add_job( - 'process-image', - json_build_object( - 'file_id', NEW.id, - 'database_id', NEW.database_id - ), - job_key := 'file:' || NEW.id::text - ); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_update_queue_retry - AFTER UPDATE ON files_store_public.files - FOR EACH ROW - WHEN (OLD.status = 'error' AND NEW.status = 'pending') - EXECUTE FUNCTION files_store_public.files_after_update_queue_retry(); - -COMMENT ON TRIGGER files_after_update_queue_retry ON files_store_public.files IS - 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; - --- --------------------------------------------------------------------------- --- 6. RLS Policies & Grants --- --------------------------------------------------------------------------- - -ALTER TABLE files_store_public.files ENABLE ROW LEVEL SECURITY; -ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; - --- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) --- Without this being RESTRICTIVE, permissive policies would OR together and --- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). -CREATE POLICY files_tenant_isolation ON files_store_public.files - AS RESTRICTIVE - FOR ALL - USING (database_id = current_setting('app.database_id')::integer) - WITH CHECK (database_id = current_setting('app.database_id')::integer); - --- Policy 2: Visibility for SELECT (authenticated + service_role only) --- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling --- when app.user_id is missing or empty (returns NULL instead of cast error). --- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. -CREATE POLICY files_visibility ON files_store_public.files - FOR SELECT - TO authenticated, service_role - USING ( - status = 'ready' - OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid - ); - --- Policy 3: Public bucket read for SELECT (all roles including anonymous) -CREATE POLICY files_public_bucket_read ON files_store_public.files - FOR SELECT - USING ( - EXISTS ( - SELECT 1 FROM files_store_public.buckets b - WHERE b.key = bucket_key - AND b.database_id = files.database_id - AND b.is_public = true - ) - AND status = 'ready' - ); - --- Policy 4: Admin override (all operations, authenticated + service_role) -CREATE POLICY files_admin_override ON files_store_public.files - FOR ALL - TO authenticated, service_role - USING (current_setting('app.role', true) = 'administrator') - WITH CHECK (current_setting('app.role', true) = 'administrator'); - --- Policy 5: INSERT access (permissive base so non-admin users can insert) -CREATE POLICY files_insert_access ON files_store_public.files - FOR INSERT - TO authenticated, service_role - WITH CHECK (true); - --- Policy 6: UPDATE access (replicates visibility for row targeting) --- Non-admin users can only update rows they can see (ready or own). --- Admin override policy covers admin UPDATE access separately. -CREATE POLICY files_update_access ON files_store_public.files - FOR UPDATE - TO authenticated, service_role - USING ( - status = 'ready' - OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid - ) - WITH CHECK (true); - --- Policy 7: DELETE access (service_role only, grants already restrict authenticated) -CREATE POLICY files_delete_access ON files_store_public.files - FOR DELETE - TO service_role - USING (true); - --- Grants -GRANT SELECT, INSERT, UPDATE ON files_store_public.files TO authenticated; -GRANT SELECT, INSERT, UPDATE, DELETE ON files_store_public.files TO service_role; - -COMMENT ON POLICY files_tenant_isolation ON files_store_public.files IS - 'Every query is scoped to the current tenant via app.database_id session variable.'; -COMMENT ON POLICY files_visibility ON files_store_public.files IS - 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; -COMMENT ON POLICY files_public_bucket_read ON files_store_public.files IS - 'Allows unauthenticated reads on ready files in public buckets.'; -COMMENT ON POLICY files_admin_override ON files_store_public.files IS - 'Administrators can see and modify all files in the tenant regardless of status or creator.'; - --- --------------------------------------------------------------------------- --- 7. Domain Table Triggers --- --------------------------------------------------------------------------- - --- 7a. Generic trigger function: back-reference population --- --- When a domain table's image/upload/attachment column is updated with an S3 key, --- find the files row by key and populate source_table, source_column, source_id. --- Also finds version rows by key prefix and populates the same back-reference. --- --- Parameters (passed via TG_ARGV): --- TG_ARGV[0] = column name (e.g. 'profile_picture') --- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') - -CREATE OR REPLACE FUNCTION files_store_public.populate_file_back_reference() -RETURNS trigger AS $$ -DECLARE - col_name text := TG_ARGV[0]; - table_name text := TG_ARGV[1]; - new_val jsonb; - old_val jsonb; - new_key text; - old_key text; - base_key text; - db_id integer; -BEGIN - -- Get the database_id from session context - db_id := current_setting('app.database_id')::integer; - - -- Extract the jsonb value from the specified column (dynamic) - EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; - EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; - - -- Extract the key from the new and old values - new_key := new_val ->> 'key'; - old_key := old_val ->> 'key'; - - -- If no key change, nothing to do - IF new_key IS NOT DISTINCT FROM old_key THEN - RETURN NEW; - END IF; - - -- Handle file replacement: mark old files as deleting - IF old_key IS NOT NULL AND old_key <> '' THEN - -- Derive base key for the old file (strip version suffix) - base_key := regexp_replace(old_key, '_[^_]+$', ''); - - -- Mark old origin + all versions as deleting - UPDATE files_store_public.files - SET status = 'deleting', status_reason = 'replaced by new file' - WHERE database_id = db_id - AND (key = old_key OR key LIKE base_key || '_%') - AND status NOT IN ('deleting'); - END IF; - - -- Populate back-reference on new file (origin + versions) - IF new_key IS NOT NULL AND new_key <> '' THEN - -- Derive base key for the new file - base_key := regexp_replace(new_key, '_[^_]+$', ''); - - -- Set back-reference on origin + all version rows - UPDATE files_store_public.files - SET source_table = table_name, - source_column = col_name, - source_id = NEW.id - WHERE database_id = db_id - AND (key = new_key OR key LIKE base_key || '_%'); - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -COMMENT ON FUNCTION files_store_public.populate_file_back_reference() IS - 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; - --- 7b. Generic trigger function: source row deletion --- --- When a domain row is deleted, mark all associated files as deleting. - -CREATE OR REPLACE FUNCTION files_store_public.mark_files_deleting_on_source_delete() -RETURNS trigger AS $$ -DECLARE - col_name text := TG_ARGV[0]; - table_name text := TG_ARGV[1]; - db_id integer; -BEGIN - db_id := current_setting('app.database_id')::integer; - - -- Mark all files for this source row + column as deleting - UPDATE files_store_public.files - SET status = 'deleting', status_reason = 'source row deleted' - WHERE database_id = db_id - AND source_table = table_name - AND source_column = col_name - AND source_id = OLD.id - AND status NOT IN ('deleting'); - - RETURN OLD; -END; -$$ LANGUAGE plpgsql; - -COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS - 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; - --- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns --- --- Each domain column gets two triggers: --- - AFTER UPDATE: back-reference population + file replacement --- - BEFORE DELETE: mark files deleting on source row deletion --- --- These are wrapped in a DO block so they gracefully skip tables that --- don't exist yet (e.g. in fresh dev environments). In production, --- domain tables will exist before this migration runs. - -DO $domain_triggers$ -DECLARE - _tbl text; -BEGIN - -- constructive_users_public.users.profile_picture - SELECT 'constructive_users_public.users' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'constructive_users_public' AND table_name = 'users'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref - AFTER UPDATE OF profile_picture ON constructive_users_public.users - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; - EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete - BEFORE DELETE ON constructive_users_public.users - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; - RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; - ELSE - RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; - END IF; - - -- constructive_status_public.app_levels.image - SELECT 'constructive_status_public.app_levels' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'constructive_status_public' AND table_name = 'app_levels'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER app_levels_image_file_ref - AFTER UPDATE OF image ON constructive_status_public.app_levels - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; - EXECUTE 'CREATE TRIGGER app_levels_image_file_delete - BEFORE DELETE ON constructive_status_public.app_levels - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; - RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; - ELSE - RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; - END IF; - - -- services_public.sites (og_image, apple_touch_icon, logo, favicon) - SELECT 'services_public.sites' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'services_public' AND table_name = 'sites'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER sites_og_image_file_ref - AFTER UPDATE OF og_image ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_og_image_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; - - EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref - AFTER UPDATE OF apple_touch_icon ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; - - EXECUTE 'CREATE TRIGGER sites_logo_file_ref - AFTER UPDATE OF logo ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_logo_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; - - EXECUTE 'CREATE TRIGGER sites_favicon_file_ref - AFTER UPDATE OF favicon ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_favicon_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; - RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; - ELSE - RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; - END IF; - - -- services_public.apps.app_image - SELECT 'services_public.apps' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'services_public' AND table_name = 'apps'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER apps_app_image_file_ref - AFTER UPDATE OF app_image ON services_public.apps - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; - EXECUTE 'CREATE TRIGGER apps_app_image_file_delete - BEFORE DELETE ON services_public.apps - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; - RAISE NOTICE 'Created triggers for services_public.apps.app_image'; - ELSE - RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; - END IF; - - -- services_public.site_metadata.og_image - SELECT 'services_public.site_metadata' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'services_public' AND table_name = 'site_metadata'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref - AFTER UPDATE OF og_image ON services_public.site_metadata - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; - EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete - BEFORE DELETE ON services_public.site_metadata - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; - RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; - ELSE - RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; - END IF; - - -- db_migrate.migrate_files.upload - SELECT 'db_migrate.migrate_files' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'db_migrate' AND table_name = 'migrate_files'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref - AFTER UPDATE OF upload ON db_migrate.migrate_files - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; - EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete - BEFORE DELETE ON db_migrate.migrate_files - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; - RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; - ELSE - RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; - END IF; -END -$domain_triggers$; - --- --------------------------------------------------------------------------- --- 8. Scheduled cleanup jobs (requires pgpm-database-jobs with scheduling) --- --------------------------------------------------------------------------- --- Register recurring file-cleanup jobs via app_jobs.add_scheduled_job. --- The scheduler (knative-job-service) picks these up and spawns one-shot jobs --- on the configured schedule. Each job calls the file-cleanup function with --- the appropriate cleanup type. --- --- Schedules: --- pending_reaper: every hour (clear stale pending uploads) --- error_cleanup: daily at 03:00 UTC (expire old error files) --- unattached_cleanup: daily at 04:00 UTC (clean unattached ready files) --- --------------------------------------------------------------------------- - -DO $cron$ -DECLARE - v_db_id uuid; -BEGIN - -- Look up the database ID for the current database. - -- If metaschema_public.database is not deployed yet, skip silently. - BEGIN - SELECT id INTO v_db_id - FROM metaschema_public.database - ORDER BY created_at - LIMIT 1; - EXCEPTION WHEN undefined_table THEN - RAISE NOTICE 'metaschema_public.database not found, skipping scheduled job registration.'; - RETURN; - END; - - IF v_db_id IS NULL THEN - RAISE NOTICE 'No database row found, skipping scheduled job registration.'; - RETURN; - END IF; - - -- pending_reaper: every hour (minute 0) - PERFORM app_jobs.add_scheduled_job( - db_id := v_db_id, - identifier := 'file-cleanup', - payload := '{"type":"pending_reaper"}'::json, - schedule_info := '{"minute": 0}'::json, - job_key := 'file-cleanup:pending_reaper', - queue_name := 'maintenance', - max_attempts := 3, - priority := 100 - ); - - -- error_cleanup: daily at 03:00 UTC - PERFORM app_jobs.add_scheduled_job( - db_id := v_db_id, - identifier := 'file-cleanup', - payload := '{"type":"error_cleanup"}'::json, - schedule_info := '{"hour": 3, "minute": 0}'::json, - job_key := 'file-cleanup:error_cleanup', - queue_name := 'maintenance', - max_attempts := 3, - priority := 100 - ); - - -- unattached_cleanup: daily at 04:00 UTC - PERFORM app_jobs.add_scheduled_job( - db_id := v_db_id, - identifier := 'file-cleanup', - payload := '{"type":"unattached_cleanup"}'::json, - schedule_info := '{"hour": 4, "minute": 0}'::json, - job_key := 'file-cleanup:unattached_cleanup', - queue_name := 'maintenance', - max_attempts := 3, - priority := 100 - ); - - RAISE NOTICE 'Registered 3 file-cleanup scheduled jobs for database %', v_db_id; -END -$cron$; - -COMMIT; diff --git a/uploads/s3-streamer/src/index.ts b/uploads/s3-streamer/src/index.ts index 90cf965e1..8cd5fa38f 100644 --- a/uploads/s3-streamer/src/index.ts +++ b/uploads/s3-streamer/src/index.ts @@ -3,6 +3,7 @@ import Streamer from './streamer'; export * from './utils'; export * from './storage-provider'; +export { streamContentType } from '@constructive-io/content-type-stream'; export { getClient }; export { Streamer }; From 38cd62047310e0bf66cd7e68313d94dfbdf26f14 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Tue, 17 Mar 2026 11:29:45 +0800 Subject: [PATCH 10/15] apply origin id for versions of imgs --- .../graphile-settings/src/upload-resolver.ts | 11 +- migrations/files_store.sql | 138 +++++++++++++++--- 2 files changed, 123 insertions(+), 26 deletions(-) diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index a60a1704b..fd84d6a00 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -119,13 +119,14 @@ async function insertFileRecord( key: string, etag: string, createdBy: string | null, + contentType: string | null, ): Promise { const pool = getPgPool(); await pool.query( `INSERT INTO files_store_public.files - (id, database_id, bucket_key, key, etag, created_by) - VALUES ($1, $2, $3, $4, $5, $6)`, - [fileId, Number(databaseId), bucketKey, key, etag, createdBy], + (id, database_id, bucket_key, key, etag, created_by, mime_type) + VALUES ($1, $2, $3, $4, $5, $6, $7)`, + [fileId, Number(databaseId), bucketKey, key, etag, createdBy, contentType], ); } @@ -165,7 +166,7 @@ export async function streamToStorage( const result = await storage.upload(key, detected.stream, { contentType }); - await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, opts?.userId || null); + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, opts?.userId || null, contentType); const url = await storage.presignGet(key, 3600); return { key, url, filename, mime: contentType }; @@ -244,7 +245,7 @@ async function uploadResolver( contentType: detectedContentType, }); - await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId, detectedContentType); const url = await storage.presignGet(key, 3600); diff --git a/migrations/files_store.sql b/migrations/files_store.sql index d98a2c042..2a352c897 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -67,6 +67,8 @@ CREATE TABLE files_store_public.files ( source_id uuid, processing_started_at timestamptz, created_by uuid, + origin_id uuid, + mime_type text, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), @@ -100,6 +102,22 @@ COMMENT ON COLUMN files_store_public.files.source_column IS 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; COMMENT ON COLUMN files_store_public.files.source_id IS 'Primary key of the row in the source table. NULL until domain trigger populates it.'; +COMMENT ON COLUMN files_store_public.files.origin_id IS + 'Self-referential FK to the origin file. NULL for origin rows, set for version rows (thumbnail, medium).'; +COMMENT ON COLUMN files_store_public.files.mime_type IS + 'Detected MIME type of the file. Set at upload time for origins, at processing time for versions.'; + +-- Self-referential FK (version -> origin, same table) +-- ON DELETE CASCADE: DB-level safety net. The primary deletion path is +-- per-row delete-s3-object jobs (each row gets its own job via trigger). +-- CASCADE only fires if an origin row is directly DELETEd before its +-- version rows -- in that case, version DB rows are removed but version +-- S3 objects are still cleaned up by their already-enqueued jobs. +ALTER TABLE files_store_public.files + ADD CONSTRAINT files_origin_fk + FOREIGN KEY (origin_id, database_id) + REFERENCES files_store_public.files (id, database_id) + ON DELETE CASCADE; -- --------------------------------------------------------------------------- -- 3. Buckets Table @@ -164,6 +182,11 @@ CREATE INDEX files_deleting_idx CREATE INDEX files_created_at_brin_idx ON files_store_public.files USING brin (created_at); +-- Version lookups: "find all versions of this origin" +CREATE INDEX files_origin_id_idx + ON files_store_public.files (origin_id, database_id) + WHERE origin_id IS NOT NULL; + -- --------------------------------------------------------------------------- -- 5. Triggers -- --------------------------------------------------------------------------- @@ -397,8 +420,10 @@ DECLARE old_val jsonb; new_key text; old_key text; - base_key text; db_id integer; + origin_file_id uuid; + old_origin_file_id uuid; + versions_json json; BEGIN -- Get the database_id from session context db_id := current_setting('app.database_id')::integer; @@ -418,29 +443,75 @@ BEGIN -- Handle file replacement: mark old files as deleting IF old_key IS NOT NULL AND old_key <> '' THEN - -- Derive base key for the old file (strip version suffix) - base_key := regexp_replace(old_key, '_[^_]+$', ''); - - -- Mark old origin + all versions as deleting - UPDATE files_store_public.files - SET status = 'deleting', status_reason = 'replaced by new file' - WHERE database_id = db_id - AND (key = old_key OR key LIKE base_key || '_%') - AND status NOT IN ('deleting'); + -- Find old origin by exact key match + SELECT id INTO old_origin_file_id + FROM files_store_public.files + WHERE key = old_key AND database_id = db_id; + + IF old_origin_file_id IS NOT NULL THEN + -- Mark old origin as deleting + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE id = old_origin_file_id AND database_id = db_id + AND status NOT IN ('deleting'); + + -- Mark old versions as deleting (index hit on origin_id) + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE origin_id = old_origin_file_id AND database_id = db_id + AND status NOT IN ('deleting'); + END IF; END IF; -- Populate back-reference on new file (origin + versions) IF new_key IS NOT NULL AND new_key <> '' THEN - -- Derive base key for the new file - base_key := regexp_replace(new_key, '_[^_]+$', ''); - - -- Set back-reference on origin + all version rows - UPDATE files_store_public.files - SET source_table = table_name, - source_column = col_name, - source_id = NEW.id - WHERE database_id = db_id - AND (key = new_key OR key LIKE base_key || '_%'); + -- Find origin by exact key match + SELECT id INTO origin_file_id + FROM files_store_public.files + WHERE key = new_key AND database_id = db_id; + + IF origin_file_id IS NOT NULL THEN + -- Update origin row + UPDATE files_store_public.files + SET source_table = table_name, source_column = col_name, source_id = NEW.id + WHERE id = origin_file_id AND database_id = db_id; + + -- Update version rows (index hit on origin_id) + UPDATE files_store_public.files + SET source_table = table_name, source_column = col_name, source_id = NEW.id + WHERE origin_id = origin_file_id AND database_id = db_id; + + -- Backfill versions into domain JSONB if process-image already completed. + -- This fixes the race condition where process-image runs before domain + -- association (two-step upload path) and can't write back versions. + -- Uses mime_type column for accurate MIME (not hardcoded). + SELECT json_agg(json_build_object( + 'key', f.key, + 'mime', COALESCE(f.mime_type, 'image/jpeg'), + 'width', 0, + 'height', 0 + )) + INTO versions_json + FROM files_store_public.files f + WHERE f.origin_id = origin_file_id + AND f.database_id = db_id + AND f.status = 'ready'; + + IF versions_json IS NOT NULL THEN + -- RECURSION GUARD: This UPDATE re-fires the current trigger on the + -- domain table. It is safe because only the 'versions' subfield of + -- the JSONB column is modified -- the 'key' field is unchanged. + -- The IS NOT DISTINCT FROM check at the top of this function + -- compares old_key vs new_key (both extracted via ->> 'key'), + -- detects they are equal, and returns early. + -- DO NOT change the early-return comparison to use the full JSONB + -- value instead of just the 'key' field, or this will infinite-loop. + EXECUTE format( + 'UPDATE %s SET %I = jsonb_set(COALESCE(%I, ''{}''::jsonb), ''{versions}'', $1::jsonb) WHERE id = $2', + table_name, col_name, col_name + ) USING versions_json, NEW.id; + END IF; + END IF; END IF; RETURN NEW; @@ -479,7 +550,32 @@ $$ LANGUAGE plpgsql; COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; --- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns +-- 7c. Propagate deleting status from origin to version rows. +-- When an origin transitions to 'deleting', mark all its versions as 'deleting' too. +-- Each version row's AFTER UPDATE trigger then enqueues its own delete-s3-object job. + +CREATE OR REPLACE FUNCTION files_store_public.files_propagate_deleting_to_versions() +RETURNS trigger AS $$ +BEGIN + UPDATE files_store_public.files + SET status = 'deleting', status_reason = COALESCE(NEW.status_reason, 'origin marked deleting') + WHERE origin_id = NEW.id + AND database_id = NEW.database_id + AND status NOT IN ('deleting'); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_propagate_deleting + AFTER UPDATE ON files_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting' AND NEW.origin_id IS NULL) + EXECUTE FUNCTION files_store_public.files_propagate_deleting_to_versions(); + +COMMENT ON TRIGGER files_after_update_propagate_deleting ON files_store_public.files IS + 'When an origin file transitions to deleting, propagate that status to all version rows via origin_id. Each version then gets its own delete-s3-object job via the existing files_after_update_queue_deletion trigger. The WHEN clause filters to origin rows only (origin_id IS NULL).'; + +-- 7d. CREATE TRIGGER statements for all 6 tables, 9 columns -- -- Each domain column gets two triggers: -- - AFTER UPDATE: back-reference population + file replacement From cc689cfafff821df9ad51281d09c0b5416bb682a Mon Sep 17 00:00:00 2001 From: zetazzz Date: Tue, 17 Mar 2026 18:57:40 +0800 Subject: [PATCH 11/15] merge --- pnpm-lock.yaml | 1763 +++++++++--------------------- uploads/s3-streamer/package.json | 1 + 2 files changed, 515 insertions(+), 1249 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a6462591d..5736ee0ef 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -31,10 +31,10 @@ importers: version: 8.18.0 '@typescript-eslint/eslint-plugin': specifier: ^8.57.0 - version: 8.57.0(@typescript-eslint/parser@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + version: 8.57.1(@typescript-eslint/parser@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/parser': specifier: ^8.57.0 - version: 8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + version: 8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) eslint: specifier: ^9.39.2 version: 9.39.2(jiti@2.6.1) @@ -46,7 +46,7 @@ importers: version: 12.1.1(eslint@9.39.2(jiti@2.6.1)) eslint-plugin-unused-imports: specifier: ^4.4.1 - version: 4.4.1(@typescript-eslint/eslint-plugin@8.57.0(@typescript-eslint/parser@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1)) + version: 4.4.1(@typescript-eslint/eslint-plugin@8.57.1(@typescript-eslint/parser@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1)) jest: specifier: ^30.3.0 version: 30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)) @@ -67,7 +67,7 @@ importers: version: 6.1.3 ts-jest: specifier: ^29.4.6 - version: 29.4.6(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(jest-util@30.3.0)(jest@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.6(@babel/core@7.28.6)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.28.6))(jest-util@30.3.0)(jest@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)))(typescript@5.9.3) ts-node: specifier: ^10.9.2 version: 10.9.2(@types/node@22.19.11)(typescript@5.9.3) @@ -119,10 +119,10 @@ importers: version: link:../../packages/postmaster/dist '@launchql/mjml': specifier: 0.1.1 - version: 0.1.1(@babel/core@7.29.0)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) + version: 0.1.1(@babel/core@7.28.6)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) '@launchql/styled-email': specifier: 0.1.0 - version: 0.1.0(@babel/core@7.29.0)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) + version: 0.1.0(@babel/core@7.28.6)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) '@pgpmjs/env': specifier: workspace:^ version: link:../../pgpm/env/dist @@ -177,7 +177,7 @@ importers: version: 5.2.1 grafserv: specifier: 1.0.0-rc.7 - version: 1.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) + version: 1.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) lru-cache: specifier: ^11.2.7 version: 11.2.7 @@ -186,7 +186,7 @@ importers: version: link:../../postgres/pg-cache/dist postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) devDependencies: '@types/express': specifier: ^5.0.6 @@ -199,7 +199,7 @@ importers: version: 3.1.14 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist graphile/graphile-connection-filter: @@ -312,7 +312,7 @@ importers: version: 8.20.0 postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) devDependencies: '@types/pg': specifier: ^8.18.0 @@ -354,7 +354,7 @@ importers: version: 0.1.12 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist graphile/graphile-search: @@ -447,7 +447,7 @@ importers: version: 1.0.0-rc.9(graphql@16.13.0) grafserv: specifier: 1.0.0-rc.7 - version: 1.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) + version: 1.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) graphile-build: specifier: 5.0.0-rc.6 version: 5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0) @@ -495,7 +495,7 @@ importers: version: 5.0.0-rc.5 postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) request-ip: specifier: ^3.3.0 version: 3.3.0 @@ -529,7 +529,7 @@ importers: version: link:../../postgres/pgsql-test/dist ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist graphile/graphile-sql-expression-validator: @@ -551,10 +551,10 @@ importers: version: 16.13.0 pgsql-deparser: specifier: ^17.18.1 - version: 17.18.1 + version: 17.18.2 pgsql-parser: specifier: ^17.9.13 - version: 17.9.13 + version: 17.9.14 devDependencies: '@types/node': specifier: ^22.19.11 @@ -601,7 +601,7 @@ importers: version: link:../../postgres/pgsql-test/dist postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) devDependencies: '@types/pg': specifier: ^8.18.0 @@ -710,7 +710,7 @@ importers: version: link:../../postgres/pgsql-seed/dist undici: specifier: ^7.24.3 - version: 7.24.3 + version: 7.24.4 devDependencies: '@tanstack/react-query': specifier: ^5.90.21 @@ -735,7 +735,7 @@ importers: version: 19.2.4 ts-jest: specifier: ^29.2.5 - version: 29.4.6(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(jest-util@30.3.0)(jest@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.6(@babel/core@7.28.6)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.28.6))(jest-util@30.3.0)(jest@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)))(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -780,7 +780,7 @@ importers: version: 5.2.1 grafserv: specifier: 1.0.0-rc.7 - version: 1.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) + version: 1.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) graphile-cache: specifier: workspace:^ version: link:../../graphile/graphile-cache/dist @@ -801,7 +801,7 @@ importers: version: link:../../postgres/pg-env/dist postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) devDependencies: '@types/express': specifier: ^5.0.6 @@ -814,7 +814,7 @@ importers: version: 3.1.14 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist graphql/gql-ast: @@ -909,7 +909,7 @@ importers: version: 11.2.7 postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) devDependencies: makage: specifier: ^0.1.10 @@ -1006,7 +1006,7 @@ importers: version: 1.0.0-rc.9(graphql@16.13.0) grafserv: specifier: 1.0.0-rc.7 - version: 1.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) + version: 1.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) graphile-build: specifier: 5.0.0-rc.6 version: 5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0) @@ -1054,7 +1054,7 @@ importers: version: 5.0.0-rc.5 postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) postgraphile-plugin-connection-filter: specifier: 3.0.0-rc.1 version: 3.0.0-rc.1 @@ -1064,7 +1064,7 @@ importers: devDependencies: '@aws-sdk/client-s3': specifier: ^3.1009.0 - version: 3.1009.0 + version: 3.1010.0 '@types/cors': specifier: ^2.8.17 version: 2.8.19 @@ -1094,7 +1094,7 @@ importers: version: 3.1.14 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist graphql/server-test: @@ -1190,7 +1190,7 @@ importers: version: link:../../postgres/pgsql-test/dist postgraphile: specifier: 5.0.0-rc.10 - version: 5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87) + version: 5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495) devDependencies: '@types/pg': specifier: ^8.18.0 @@ -1238,7 +1238,7 @@ importers: version: 19.2.3(@types/react@19.2.14) '@vitejs/plugin-react': specifier: ^4.5.2 - version: 4.7.0(vite@6.4.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.7.0(vite@6.4.1(@types/node@25.3.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) react-test-renderer: specifier: ^19.2.4 version: 19.2.4(react@19.2.4) @@ -1253,7 +1253,7 @@ importers: version: 5.9.3 vite: specifier: ^6.3.5 - version: 6.4.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) + version: 6.4.1(@types/node@25.3.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) graphql/types: dependencies: @@ -1486,7 +1486,7 @@ importers: version: 7.2.2 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist jobs/knative-job-worker: @@ -1670,7 +1670,7 @@ importers: version: 17.6.2 '@pgsql/utils': specifier: ^17.8.14 - version: 17.8.14 + version: 17.8.15 csv-parser: specifier: ^3.2.0 version: 3.2.0 @@ -1682,7 +1682,7 @@ importers: version: 4.1.1 pgsql-deparser: specifier: ^17.18.1 - version: 17.18.1 + version: 17.18.2 devDependencies: '@types/js-yaml': specifier: ^4.0.9 @@ -1692,7 +1692,7 @@ importers: version: 0.1.12 pgsql-parser: specifier: ^17.9.13 - version: 17.9.13 + version: 17.9.14 publishDirectory: dist packages/oauth: @@ -1783,7 +1783,7 @@ importers: version: 0.1.12 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist packages/smtppostmaster: @@ -1812,7 +1812,7 @@ importers: version: 3.18.1 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.3.3)(typescript@5.9.3) publishDirectory: dist packages/url-domains: @@ -1872,7 +1872,7 @@ importers: version: link:../../postgres/pg-env/dist pgsql-deparser: specifier: ^17.18.1 - version: 17.18.1 + version: 17.18.2 semver: specifier: ^7.7.4 version: 7.7.4 @@ -1961,10 +1961,10 @@ importers: version: link:../../postgres/pg-env/dist pgsql-deparser: specifier: ^17.18.1 - version: 17.18.1 + version: 17.18.2 pgsql-parser: specifier: ^17.9.13 - version: 17.9.13 + version: 17.9.14 yanse: specifier: ^0.2.1 version: 0.2.1 @@ -2026,7 +2026,7 @@ importers: dependencies: pg: specifier: ^8.16.0 - version: 8.20.0 + version: 8.19.0 pgsql-test: specifier: workspace:^ version: link:../pgsql-test/dist @@ -2036,7 +2036,7 @@ importers: version: 8.18.0 drizzle-orm: specifier: ^0.45.1 - version: 0.45.1(@types/pg@8.18.0)(pg@8.20.0) + version: 0.45.1(@types/pg@8.18.0)(pg@8.19.0) makage: specifier: ^0.1.12 version: 0.1.12 @@ -2088,7 +2088,7 @@ importers: version: 1.30.5 pgsql-deparser: specifier: ^17.18.1 - version: 17.18.1 + version: 17.18.2 publishDirectory: dist postgres/pg-cache: @@ -2472,7 +2472,7 @@ importers: devDependencies: '@types/node': specifier: ^22.19.11 - version: 22.19.15 + version: 22.19.11 makage: specifier: ^0.1.12 version: 0.1.12 @@ -2533,18 +2533,13 @@ importers: dependencies: '@aws-sdk/client-s3': specifier: ^3.1009.0 - version: 3.1009.0 + version: 3.1010.0 '@aws-sdk/lib-storage': -<<<<<<< HEAD - specifier: ^3.1001.0 - version: 3.1001.0(@aws-sdk/client-s3@3.1001.0) - '@aws-sdk/s3-request-presigner': - specifier: ^3.1001.0 - version: 3.1007.0 -======= specifier: ^3.1009.0 - version: 3.1009.0(@aws-sdk/client-s3@3.1009.0) ->>>>>>> main + version: 3.1010.0(@aws-sdk/client-s3@3.1010.0) + '@aws-sdk/s3-request-presigner': + specifier: ^3.1010.0 + version: 3.1010.0 '@constructive-io/content-type-stream': specifier: workspace:^ version: link:../content-type-stream/dist @@ -2570,10 +2565,10 @@ importers: dependencies: '@aws-sdk/client-s3': specifier: ^3.1009.0 - version: 3.1009.0 + version: 3.1010.0 '@aws-sdk/lib-storage': specifier: ^3.1009.0 - version: 3.1009.0(@aws-sdk/client-s3@3.1009.0) + version: 3.1010.0(@aws-sdk/client-s3@3.1010.0) devDependencies: '@types/node': specifier: ^22.19.11 @@ -2658,25 +2653,16 @@ packages: '@aws-crypto/util@5.2.0': resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} - '@aws-sdk/client-s3@3.1009.0': - resolution: {integrity: sha512-luy8CxallkoiGWTqU86ca/BbvkWJjs0oala7uIIRN1JtQxMb5i4Yl/PBZVcQFhbK9kQi0PK0GfD8gIpLkI91fw==} + '@aws-sdk/client-s3@3.1010.0': + resolution: {integrity: sha512-XUqXFrn/FGLLzO5OXu9iAtt492kj9Z7Yk8b0iPFxeJoIhaa61YOgR84chOExvnjm2+JTYyGNZiVPmgnFB3jxXA==} engines: {node: '>=20.0.0'} '@aws-sdk/core@3.973.20': resolution: {integrity: sha512-i3GuX+lowD892F3IuJf8o6AbyDupMTdyTxQrCJGcn71ni5hTZ82L4nQhcdumxZ7XPJRJJVHS/CR3uYOIIs0PVA==} engines: {node: '>=20.0.0'} -<<<<<<< HEAD - '@aws-sdk/core@3.973.19': - resolution: {integrity: sha512-56KePyOcZnKTWCd89oJS1G6j3HZ9Kc+bh/8+EbvtaCCXdP6T7O7NzCiPuHRhFLWnzXIaXX3CxAz0nI5My9spHQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/crc64-nvme@3.972.3': - resolution: {integrity: sha512-UExeK+EFiq5LAcbHm96CQLSia+5pvpUVSAsVApscBzayb7/6dJBJKwV4/onsk4VbWSmqxDMcfuTD+pC4RxgZHg==} -======= '@aws-sdk/crc64-nvme@3.972.5': resolution: {integrity: sha512-2VbTstbjKdT+yKi8m7b3a9CiVac+pL/IY2PHJwsaGkkHmuuqkJZIErPck1h6P3T9ghQMLSdMPyW6Qp7Di5swFg==} ->>>>>>> main engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-env@3.972.18': @@ -2711,11 +2697,11 @@ packages: resolution: {integrity: sha512-rWCmh8o7QY4CsUj63qopzMzkDq/yPpkrpb+CnjBEFSOg/02T/we7sSTVg4QsDiVS9uwZ8VyONhq98qt+pIh3KA==} engines: {node: '>=20.0.0'} - '@aws-sdk/lib-storage@3.1009.0': - resolution: {integrity: sha512-gHQh1sNeTuxZxPSMSQWOq/Xli8I5499uWyRKMakMSv8N7IYfoyDdyT52Ul6697qcqVaoPHixmYTllfEWMo1AKg==} + '@aws-sdk/lib-storage@3.1010.0': + resolution: {integrity: sha512-jafLXyFGKrlMz6BaiTpfQQYn2Lro5mKMOzBaprwIs1zY4j+W299cB+vf2wFrUAqw+MAPj2+hHRZTze7nMDdwoQ==} engines: {node: '>=20.0.0'} peerDependencies: - '@aws-sdk/client-s3': ^3.1009.0 + '@aws-sdk/client-s3': ^3.1010.0 '@aws-sdk/middleware-bucket-endpoint@3.972.8': resolution: {integrity: sha512-WR525Rr2QJSETa9a050isktyWi/4yIGcmY3BQ1kpHqb0LqUglQHCS8R27dTJxxWNZvQ0RVGtEZjTCbZJpyF3Aw==} @@ -2725,8 +2711,8 @@ packages: resolution: {integrity: sha512-5DTBTiotEES1e2jOHAq//zyzCjeMB78lEHd35u15qnrid4Nxm7diqIf9fQQ3Ov0ChH1V3Vvt13thOnrACmfGVQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-flexible-checksums@3.973.6': - resolution: {integrity: sha512-0nYEgkJH7Yt9k+nZJyllTghnkKaz17TWFcr5Mi0XMVMzYlF4ytDZADQpF2/iJo36cKL5AYSzRsvlykE4M/ErTA==} + '@aws-sdk/middleware-flexible-checksums@3.974.0': + resolution: {integrity: sha512-BmdDjqvnuYaC4SY7ypHLXfCSsGYGUZkjCLSZyUAAYn1YT28vbNMJNDwhlfkvvE+hQHG5RJDlEmYuvBxcB9jX1g==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-host-header@3.972.8': @@ -2749,17 +2735,8 @@ packages: resolution: {integrity: sha512-yhva/xL5H4tWQgsBjwV+RRD0ByCzg0TcByDCLp3GXdn/wlyRNfy8zsswDtCvr1WSKQkSQYlyEzPuWkJG0f5HvQ==} engines: {node: '>=20.0.0'} -<<<<<<< HEAD - '@aws-sdk/middleware-sdk-s3@3.972.19': - resolution: {integrity: sha512-/CtOHHVFg4ZuN6CnLnYkrqWgVEnbOBC4kNiKa+4fldJ9cioDt3dD/f5vpq0cWLOXwmGL2zgVrVxNhjxWpxNMkg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-ssec@3.972.6': - resolution: {integrity: sha512-acvMUX9jF4I2Ew+Z/EA6gfaFaz9ehci5wxBmXCZeulLuv8m+iGf6pY9uKz8TPjg39bdAz3hxoE0eLP8Qz+IYlA==} -======= '@aws-sdk/middleware-ssec@3.972.8': resolution: {integrity: sha512-wqlK0yO/TxEC2UsY9wIlqeeutF6jjLe0f96Pbm40XscTo57nImUk9lBcw0dPgsm0sppFtAkSlDrfpK+pC30Wqw==} ->>>>>>> main engines: {node: '>=20.0.0'} '@aws-sdk/middleware-user-agent@3.972.21': @@ -2774,70 +2751,36 @@ packages: resolution: {integrity: sha512-1eD4uhTDeambO/PNIDVG19A6+v4NdD7xzwLHDutHsUqz0B+i661MwQB2eYO4/crcCvCiQG4SRm1k81k54FEIvw==} engines: {node: '>=20.0.0'} -<<<<<<< HEAD - '@aws-sdk/s3-request-presigner@3.1007.0': - resolution: {integrity: sha512-TZmNzomZxwmIlyi+h8i0j561j4ryDNazUnoEszJTYOuk57RA7NUKQzNvRYUoKOChbFfvDzTy6PR5SRXfu0vaVw==} + '@aws-sdk/s3-request-presigner@3.1010.0': + resolution: {integrity: sha512-EP+LRZ5+FM9IFZz9vmRSCHAqtaD39Y3TcMbBApn2NgHIogvYxuLx+KUV9rcjGOyJFSDlbSUa0R9s1bM/YmeSCg==} engines: {node: '>=20.0.0'} - '@aws-sdk/signature-v4-multi-region@3.996.4': - resolution: {integrity: sha512-MGa8ro0onekYIiesHX60LwKdkxK3Kd61p7TTbLwZemBqlnD9OLrk9sXZdFOIxXanJ+3AaJnV/jiX866eD/4PDg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/signature-v4-multi-region@3.996.7': - resolution: {integrity: sha512-mYhh7FY+7OOqjkYkd6+6GgJOsXK1xBWmuR+c5mxJPj2kr5TBNeZq+nUvE9kANWAux5UxDVrNOSiEM/wlHzC3Lg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.1001.0': - resolution: {integrity: sha512-09XAq/uIYgeZhohuGRrR/R+ek3+ljFNdzWCXdqb9rlIERDjSfNiLjTtpHgSK1xTPmC5G4yWoEAyMfTXiggS6wA==} -======= '@aws-sdk/signature-v4-multi-region@3.996.8': resolution: {integrity: sha512-n1qYFD+tbqZuyskVaxUE+t10AUz9g3qzDw3Tp6QZDKmqsjfDmZBd4GIk2EKJJNtcCBtE5YiUjDYA+3djFAFBBg==} engines: {node: '>=20.0.0'} '@aws-sdk/token-providers@3.1009.0': resolution: {integrity: sha512-KCPLuTqN9u0Rr38Arln78fRG9KXpzsPWmof+PZzfAHMMQq2QED6YjQrkrfiH7PDefLWEposY1o4/eGwrmKA4JA==} ->>>>>>> main engines: {node: '>=20.0.0'} '@aws-sdk/types@3.973.6': resolution: {integrity: sha512-Atfcy4E++beKtwJHiDln2Nby8W/mam64opFPTiHEqgsthqeydFS1pY+OUlN1ouNOmf8ArPU/6cDS65anOP3KQw==} engines: {node: '>=20.0.0'} -<<<<<<< HEAD - '@aws-sdk/types@3.973.5': - resolution: {integrity: sha512-hl7BGwDCWsjH8NkZfx+HgS7H2LyM2lTMAI7ba9c8O0KqdBLTdNJivsHpqjg9rNlAlPyREb6DeDRXUl0s8uFdmQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-arn-parser@3.972.2': - resolution: {integrity: sha512-VkykWbqMjlSgBFDyrY3nOSqupMc6ivXuGmvci6Q3NnLq5kC+mKQe2QBZ4nrWRE/jqOxeFP2uYzLtwncYYcvQDg==} - engines: {node: '>=20.0.0'} - '@aws-sdk/util-arn-parser@3.972.3': resolution: {integrity: sha512-HzSD8PMFrvgi2Kserxuff5VitNq2sgf3w9qxmskKDiDTThWfVteJxuCS9JXiPIPtmCrp+7N9asfIaVhBFORllA==} engines: {node: '>=20.0.0'} - '@aws-sdk/util-endpoints@3.996.3': - resolution: {integrity: sha512-yWIQSNiCjykLL+ezN5A+DfBb1gfXTytBxm57e64lYmwxDHNmInYHRJYYRAGWG1o77vKEiWaw4ui28e3yb1k5aQ==} + '@aws-sdk/util-endpoints@3.996.5': + resolution: {integrity: sha512-Uh93L5sXFNbyR5sEPMzUU8tJ++Ku97EY4udmC01nB8Zu+xfBPwpIwJ6F7snqQeq8h2pf+8SGN5/NoytfKgYPIw==} engines: {node: '>=20.0.0'} - '@aws-sdk/util-format-url@3.972.7': - resolution: {integrity: sha512-V+PbnWfUl93GuFwsOHsAq7hY/fnm9kElRqR8IexIJr5Rvif9e614X5sGSyz3mVSf1YAZ+VTy63W1/pGdA55zyA==} + '@aws-sdk/util-format-url@3.972.8': + resolution: {integrity: sha512-J6DS9oocrgxM8xlUTTmQOuwRF6rnAGEujAN9SAzllcrQmwn5iJ58ogxy3SEhD0Q7JZvlA5jvIXBkpQRqEqlE9A==} engines: {node: '>=20.0.0'} '@aws-sdk/util-locate-window@3.965.4': resolution: {integrity: sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog==} -======= - '@aws-sdk/util-arn-parser@3.972.3': - resolution: {integrity: sha512-HzSD8PMFrvgi2Kserxuff5VitNq2sgf3w9qxmskKDiDTThWfVteJxuCS9JXiPIPtmCrp+7N9asfIaVhBFORllA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-endpoints@3.996.5': - resolution: {integrity: sha512-Uh93L5sXFNbyR5sEPMzUU8tJ++Ku97EY4udmC01nB8Zu+xfBPwpIwJ6F7snqQeq8h2pf+8SGN5/NoytfKgYPIw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-locate-window@3.965.5': - resolution: {integrity: sha512-WhlJNNINQB+9qtLtZJcpQdgZw3SCDCpXdUJP7cToGwHbCWCnRckGlc6Bx/OhWwIYFNAn+FIydY8SZ0QmVu3xTQ==} ->>>>>>> main engines: {node: '>=20.0.0'} '@aws-sdk/util-user-agent-browser@3.972.8': @@ -2852,21 +2795,12 @@ packages: aws-crt: optional: true -<<<<<<< HEAD - '@aws-sdk/xml-builder@3.972.10': - resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/xml-builder@3.972.9': - resolution: {integrity: sha512-ItnlMgSqkPrUfJs7EsvU/01zw5UeIb2tNPhD09LBLHbg+g+HDiKibSLwpkuz/ZIlz4F2IMn+5XgE4AK/pfPuog==} -======= '@aws-sdk/xml-builder@3.972.11': resolution: {integrity: sha512-iitV/gZKQMvY9d7ovmyFnFuTHbBAtrmLnvaSb/3X8vOKyevwtpmEtyc8AdhVWZe0pI/1GsHxlEvQeOePFzy7KQ==} ->>>>>>> main engines: {node: '>=20.0.0'} - '@aws/lambda-invoke-store@0.2.4': - resolution: {integrity: sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ==} + '@aws/lambda-invoke-store@0.2.3': + resolution: {integrity: sha512-oLvsaPMTBejkkmHhjf09xTgk71mOqyr/409NKhRIL08If7AhVfUsJhVsx386uJaqNd42v9kWamQ9lFbkoC2dYw==} engines: {node: '>=18.0.0'} '@babel/code-frame@7.27.1': @@ -2877,10 +2811,6 @@ packages: resolution: {integrity: sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q==} engines: {node: '>=6.9.0'} - '@babel/code-frame@7.29.0': - resolution: {integrity: sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==} - engines: {node: '>=6.9.0'} - '@babel/compat-data@7.28.6': resolution: {integrity: sha512-2lfu57JtzctfIrcGMz992hyLlByuzgIk58+hhGCxjKZ3rWI82NnVLjXcaTqkI2NvlcvOskZaiZ5kjUALo3Lpxg==} engines: {node: '>=6.9.0'} @@ -2889,10 +2819,6 @@ packages: resolution: {integrity: sha512-H3mcG6ZDLTlYfaSNi0iOKkigqMFvkTKlGUYlD8GW7nNOYRrevuA46iTypPyv+06V3fEmvvazfntkBU34L0azAw==} engines: {node: '>=6.9.0'} - '@babel/core@7.29.0': - resolution: {integrity: sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==} - engines: {node: '>=6.9.0'} - '@babel/generator@7.29.1': resolution: {integrity: sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==} engines: {node: '>=6.9.0'} @@ -3086,10 +3012,6 @@ packages: resolution: {integrity: sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg==} engines: {node: '>=6.9.0'} - '@babel/traverse@7.29.0': - resolution: {integrity: sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==} - engines: {node: '>=6.9.0'} - '@babel/types@7.28.5': resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} engines: {node: '>=6.9.0'} @@ -3128,21 +3050,18 @@ packages: '@emnapi/core@1.7.1': resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==} - '@emnapi/core@1.9.0': - resolution: {integrity: sha512-0DQ98G9ZQZOxfUcQn1waV2yS8aWdZ6kJMbYCJB3oUBecjWYO1fqJ+a1DRfPF3O5JEkwqwP1A9QEN/9mYm2Yd0w==} + '@emnapi/core@1.8.1': + resolution: {integrity: sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==} '@emnapi/runtime@1.7.1': resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==} - '@emnapi/runtime@1.9.0': - resolution: {integrity: sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw==} + '@emnapi/runtime@1.8.1': + resolution: {integrity: sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==} '@emnapi/wasi-threads@1.1.0': resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==} - '@emnapi/wasi-threads@1.2.0': - resolution: {integrity: sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg==} - '@emotion/is-prop-valid@1.4.0': resolution: {integrity: sha512-QgD4fyscGcbbKwJmqNvUMSE02OsHUa+lAWKdEUIJKgqe5IwRSKd7+KhibEWdaKwgjLj0DRSHA9biAIqGBk05lw==} @@ -3511,14 +3430,14 @@ packages: resolution: {integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@floating-ui/core@1.7.5': - resolution: {integrity: sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==} + '@floating-ui/core@1.7.4': + resolution: {integrity: sha512-C3HlIdsBxszvm5McXlB8PeOEWfBhcGBTZGkGlWc2U0KFY5IwG5OQEuQ8rq52DZmcHDlPLd+YFBK+cZcytwIFWg==} - '@floating-ui/dom@1.7.6': - resolution: {integrity: sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ==} + '@floating-ui/dom@1.7.5': + resolution: {integrity: sha512-N0bD2kIPInNHUHehXhMke1rBGs1dwqvC9O9KYMyyjK7iXt7GAhnro7UlcuYcGdS/yYOlq0MAVgrow8IbWJwyqg==} - '@floating-ui/react-dom@2.1.8': - resolution: {integrity: sha512-cC52bHwM/n/CxS87FH0yWdngEZrjdtLW/qVruo68qg+prK7ZQ4YGdut2GyDVpoGeAYe/h899rVeOVm6Oi40k2A==} + '@floating-ui/react-dom@2.1.7': + resolution: {integrity: sha512-0tLRojf/1Go2JgEVm+3Frg9A3IW8bJgKgdO0BN5RkF//ufuz2joZM63Npau2ff3J6lUVYgDSNzNkR+aH3IVfjg==} peerDependencies: react: '>=16.8.0' react-dom: '>=16.8.0' @@ -3529,8 +3448,8 @@ packages: react: '>=16.8.0' react-dom: '>=16.8.0' - '@floating-ui/utils@0.2.11': - resolution: {integrity: sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg==} + '@floating-ui/utils@0.2.10': + resolution: {integrity: sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==} '@graphile-contrib/pg-many-to-many@2.0.0-rc.2': resolution: {integrity: sha512-aPu/oPWIsljTmlj58UNy95+JzXwHrClQA51bvfZUgj3l7kaUiwCCBYCFql2nSrMwdlFgexphs3faJbHiqsEDrw==} @@ -3788,7 +3707,6 @@ packages: '@lerna/create@8.2.4': resolution: {integrity: sha512-A8AlzetnS2WIuhijdAzKUyFpR5YbLLfV3luQ4lzBgIBgRfuoBDZeF+RSZPhra+7A6/zTUlrbhKZIOi/MNhqgvQ==} engines: {node: '>=18.0.0'} - deprecated: This package is an implementation detail of Lerna and is no longer published separately. '@n1ru4l/push-pull-async-iterable-iterator@3.2.0': resolution: {integrity: sha512-3fkKj25kEjsfObL6IlKPAlHYPq/oYwUkkQ03zsTTiDjD7vg/RxjdiLeCydqtxHZP0JgsXL3D/X5oAkMGzuUp/Q==} @@ -4153,8 +4071,8 @@ packages: '@pgsql/types@17.6.2': resolution: {integrity: sha512-1UtbELdbqNdyOShhrVfSz3a1gDi0s9XXiQemx+6QqtsrXe62a6zOGU+vjb2GRfG5jeEokI1zBBcfD42enRv0Rw==} - '@pgsql/utils@17.8.14': - resolution: {integrity: sha512-DpKLTbmwuej0z7VUDB9xWIIU8BnZuUmFBRWXuloromBpPdqko+me4SefjYW5F/cbi9aDrjdYHX01qWGla/BQbg==} + '@pgsql/utils@17.8.15': + resolution: {integrity: sha512-Q9szjg1ztXUhyoi49wt1kJvO/H+ohtaKXpkGxVlAlpmxh4/t7AXt1ldQX/LeKrlVqnisHrEKP9XgvR02pq+1oQ==} '@pkgjs/parseargs@0.11.0': resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} @@ -4539,14 +4457,14 @@ packages: '@radix-ui/rect@1.1.1': resolution: {integrity: sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==} - '@react-aria/focus@3.21.5': - resolution: {integrity: sha512-V18fwCyf8zqgJdpLQeDU5ZRNd9TeOfBbhLgmX77Zr5ae9XwaoJ1R3SFJG1wCJX60t34AW+aLZSEEK+saQElf3Q==} + '@react-aria/focus@3.21.4': + resolution: {integrity: sha512-6gz+j9ip0/vFRTKJMl3R30MHopn4i19HqqLfSQfElxJD+r9hBnYG1Q6Wd/kl/WRR1+CALn2F+rn06jUnf5sT8Q==} peerDependencies: react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/interactions@3.27.1': - resolution: {integrity: sha512-M3wLpTTmDflI0QGNK0PJNUaBXXfeBXue8ZxLMngfc1piHNiH4G5lUvWd9W14XVbqrSCVY8i8DfGrNYpyyZu0tw==} + '@react-aria/interactions@3.27.0': + resolution: {integrity: sha512-D27pOy+0jIfHK60BB26AgqjjRFOYdvVSkwC31b2LicIzRCSPOSP06V4gMHuGmkhNTF4+YWDi1HHYjxIvMeiSlA==} peerDependencies: react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 @@ -4557,8 +4475,8 @@ packages: peerDependencies: react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/utils@3.33.1': - resolution: {integrity: sha512-kIx1Sj6bbAT0pdqCegHuPanR9zrLn5zMRiM7LN12rgRf55S19ptd9g3ncahArifYTRkfEU9VIn+q0HjfMqS9/w==} + '@react-aria/utils@3.33.0': + resolution: {integrity: sha512-yvz7CMH8d2VjwbSa5nGXqjU031tYhD8ddax95VzJsHSPyqHDEGfxul8RkhGV6oO7bVqZxVs6xY66NIgae+FHjw==} peerDependencies: react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 @@ -4571,8 +4489,8 @@ packages: peerDependencies: react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/shared@3.33.1': - resolution: {integrity: sha512-oJHtjvLG43VjwemQDadlR5g/8VepK56B/xKO2XORPHt9zlW6IZs3tZrYlvH29BMvoqC7RtE7E5UjgbnbFtDGag==} + '@react-types/shared@3.33.0': + resolution: {integrity: sha512-xuUpP6MyuPmJtzNOqF5pzFUIHH2YogyOQfUQHag54PRmWB7AbjuGWBUv0l1UDmz6+AbzAYGmDVAzcRDOu2PFpw==} peerDependencies: react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 @@ -4757,17 +4675,8 @@ packages: resolution: {integrity: sha512-xolrFw6b+2iYGl6EcOL7IJY71vvyZ0DJ3mcKtpykqPe2uscwtzDZJa1uVQXyP7w9Dd+kGwYnPbMsJrGISKiY/Q==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/abort-controller@4.2.11': - resolution: {integrity: sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==} - engines: {node: '>=18.0.0'} - - '@smithy/chunked-blob-reader-native@4.2.2': - resolution: {integrity: sha512-QzzYIlf4yg0w5TQaC9VId3B3ugSk1MI/wb7tgcHtd7CBV9gNRKZrhc2EPSxSZuDy10zUZ0lomNMgkc6/VVe8xg==} -======= '@smithy/chunked-blob-reader-native@4.2.3': resolution: {integrity: sha512-jA5k5Udn7Y5717L86h4EIv06wIr3xn8GM1qHRi/Nf31annXcXHJjBKvgztnbn2TxH3xWrPBfgwHsOwZf0UmQWw==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/chunked-blob-reader@5.2.2': @@ -4778,21 +4687,12 @@ packages: resolution: {integrity: sha512-YxFiiG4YDAtX7WMN7RuhHZLeTmRRAOyCbr+zB8e3AQzHPnUhS8zXjB1+cniPVQI3xbWsQPM0X2aaIkO/ME0ymw==} engines: {node: '>=18.0.0'} - '@smithy/core@3.23.11': - resolution: {integrity: sha512-952rGf7hBRnhUIaeLp6q4MptKW8sPFe5VvkoZ5qIzFAtx6c/QZ/54FS3yootsyUSf9gJX/NBqEBNdNR7jMIlpQ==} + '@smithy/core@3.23.12': + resolution: {integrity: sha512-o9VycsYNtgC+Dy3I0yrwCqv9CWicDnke0L7EVOrZtJpjb2t0EjaEofmMrYc0T1Kn3yk32zm6cspxF9u9Bj7e5w==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/core@3.23.9': - resolution: {integrity: sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==} - engines: {node: '>=18.0.0'} - - '@smithy/credential-provider-imds@4.2.10': - resolution: {integrity: sha512-3bsMLJJLTZGZqVGGeBVFfLzuRulVsGTj12BzRKODTHqUABpIr0jMN1vN3+u6r2OfyhAQ2pXaMZWX/swBK5I6PQ==} -======= '@smithy/credential-provider-imds@4.2.12': resolution: {integrity: sha512-cr2lR792vNZcYMriSIj+Um3x9KWrjcu98kn234xA6reOAFMmbRpQMOv8KPgEmLLtx3eldU6c5wALKFqNOhugmg==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/eventstream-codec@4.2.12': @@ -4819,17 +4719,8 @@ packages: resolution: {integrity: sha512-T4jFU5N/yiIfrtrsb9uOQn7RdELdM/7HbyLNr6uO/mpkj1ctiVs7CihVr51w4LyQlXWDpXFn4BElf1WmQvZu/A==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/fetch-http-handler@5.3.13': - resolution: {integrity: sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==} - engines: {node: '>=18.0.0'} - - '@smithy/hash-blob-browser@4.2.11': - resolution: {integrity: sha512-DrcAx3PM6AEbWZxsKl6CWAGnVwiz28Wp1ZhNu+Hi4uI/6C1PIZBIaPM2VoqBDAsOWbM6ZVzOEQMxFLLdmb4eBQ==} -======= '@smithy/hash-blob-browser@4.2.13': resolution: {integrity: sha512-YrF4zWKh+ghLuquldj6e/RzE3xZYL8wIPfkt0MqCRphVICjyyjH8OwKD7LLlKpVEbk4FLizFfC1+gwK6XQdR3g==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/hash-node@4.2.12': @@ -4852,108 +4743,26 @@ packages: resolution: {integrity: sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/is-array-buffer@4.2.2': - resolution: {integrity: sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow==} - engines: {node: '>=18.0.0'} - - '@smithy/md5-js@4.2.10': - resolution: {integrity: sha512-Op+Dh6dPLWTjWITChFayDllIaCXRofOed8ecpggTC5fkh8yXes0vAEX7gRUfjGK+TlyxoCAA05gHbZW/zB9JwQ==} -======= '@smithy/md5-js@4.2.12': resolution: {integrity: sha512-W/oIpHCpWU2+iAkfZYyGWE+qkpuf3vEXHLxQQDx9FPNZTTdnul0dZ2d/gUFrtQ5je1G2kp4cjG0/24YueG2LbQ==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/middleware-content-length@4.2.12': resolution: {integrity: sha512-YE58Yz+cvFInWI/wOTrB+DbvUVz/pLn5mC5MvOV4fdRUc6qGwygyngcucRQjAhiCEbmfLOXX0gntSIcgMvAjmA==} engines: {node: '>=18.0.0'} - '@smithy/middleware-endpoint@4.4.25': - resolution: {integrity: sha512-dqjLwZs2eBxIUG6Qtw8/YZ4DvzHGIf0DA18wrgtfP6a50UIO7e2nY0FPdcbv5tVJKqWCCU5BmGMOUwT7Puan+A==} - engines: {node: '>=18.0.0'} - -<<<<<<< HEAD - '@smithy/middleware-endpoint@4.4.23': - resolution: {integrity: sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==} - engines: {node: '>=18.0.0'} - - '@smithy/middleware-retry@4.4.38': - resolution: {integrity: sha512-WdHvdhjE6Fj78vxFwDKFDwlqGOGRUWrwGeuENUbTVE46Su9mnQM+dXHtbnCaQvwuSYrRsjpe8zUsFpwUp/azlA==} -======= - '@smithy/middleware-retry@4.4.42': - resolution: {integrity: sha512-vbwyqHRIpIZutNXZpLAozakzamcINaRCpEy1MYmK6xBeW3xN+TyPRA123GjXnuxZIjc9848MRRCugVMTXxC4Eg==} ->>>>>>> main - engines: {node: '>=18.0.0'} - - '@smithy/middleware-serde@4.2.14': - resolution: {integrity: sha512-+CcaLoLa5apzSRtloOyG7lQvkUw2ZDml3hRh4QiG9WyEPfW5Ke/3tPOPiPjUneuT59Tpn8+c3RVaUvvkkwqZwg==} - engines: {node: '>=18.0.0'} - -<<<<<<< HEAD - '@smithy/middleware-serde@4.2.12': - resolution: {integrity: sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==} - engines: {node: '>=18.0.0'} - - '@smithy/middleware-stack@4.2.10': - resolution: {integrity: sha512-pmts/WovNcE/tlyHa8z/groPeOtqtEpp61q3W0nW1nDJuMq/x+hWa/OVQBtgU0tBqupeXq0VBOLA4UZwE8I0YA==} - engines: {node: '>=18.0.0'} - - '@smithy/middleware-stack@4.2.11': - resolution: {integrity: sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==} - engines: {node: '>=18.0.0'} - - '@smithy/node-config-provider@4.3.10': - resolution: {integrity: sha512-UALRbJtVX34AdP2VECKVlnNgidLHA2A7YgcJzwSBg1hzmnO/bZBHl/LDQQyYifzUwp1UOODnl9JJ3KNawpUJ9w==} + '@smithy/middleware-endpoint@4.4.26': + resolution: {integrity: sha512-8Qfikvd2GVKSm8S6IbjfwFlRY9VlMrj0Dp4vTwAuhqbX7NhJKE5DQc2bnfJIcY0B+2YKMDBWfvexbSZeejDgeg==} engines: {node: '>=18.0.0'} - '@smithy/node-config-provider@4.3.11': - resolution: {integrity: sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==} + '@smithy/middleware-retry@4.4.43': + resolution: {integrity: sha512-ZwsifBdyuNHrFGmbc7bAfP2b54+kt9J2rhFd18ilQGAB+GDiP4SrawqyExbB7v455QVR7Psyhb2kjULvBPIhvA==} engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.13': - resolution: {integrity: sha512-o8CP8w6tlUA0lk+Qfwm6Ed0jCWk3bEY6iBOJjdBaowbXKCSClk8zIHQvUL6RUZMvuNafF27cbRCMYqw6O1v4aA==} + '@smithy/middleware-serde@4.2.15': + resolution: {integrity: sha512-ExYhcltZSli0pgAKOpQQe1DLFBLryeZ22605y/YS+mQpdNWekum9Ujb/jMKfJKgjtz1AZldtwA/wCYuKJgjjlg==} engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.14': - resolution: {integrity: sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==} - engines: {node: '>=18.0.0'} - - '@smithy/property-provider@4.2.10': - resolution: {integrity: sha512-5jm60P0CU7tom0eNrZ7YrkgBaoLFXzmqB0wVS+4uK8PPGmosSrLNf6rRd50UBvukztawZ7zyA8TxlrKpF5z9jw==} - engines: {node: '>=18.0.0'} - - '@smithy/property-provider@4.2.11': - resolution: {integrity: sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==} - engines: {node: '>=18.0.0'} - - '@smithy/protocol-http@5.3.10': - resolution: {integrity: sha512-2NzVWpYY0tRdfeCJLsgrR89KE3NTWT2wGulhNUxYlRmtRmPwLQwKzhrfVaiNlA9ZpJvbW7cjTVChYKgnkqXj1A==} - engines: {node: '>=18.0.0'} - - '@smithy/protocol-http@5.3.11': - resolution: {integrity: sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==} - engines: {node: '>=18.0.0'} - - '@smithy/querystring-builder@4.2.10': - resolution: {integrity: sha512-HeN7kEvuzO2DmAzLukE9UryiUvejD3tMp9a1D1NJETerIfKobBUCLfviP6QEk500166eD2IATaXM59qgUI+YDA==} - engines: {node: '>=18.0.0'} - - '@smithy/querystring-builder@4.2.11': - resolution: {integrity: sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==} - engines: {node: '>=18.0.0'} - - '@smithy/querystring-parser@4.2.10': - resolution: {integrity: sha512-4Mh18J26+ao1oX5wXJfWlTT+Q1OpDR8ssiC9PDOuEgVBGloqg18Fw7h5Ct8DyT9NBYwJgtJ2nLjKKFU6RP1G1Q==} - engines: {node: '>=18.0.0'} - - '@smithy/querystring-parser@4.2.11': - resolution: {integrity: sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==} - engines: {node: '>=18.0.0'} - - '@smithy/service-error-classification@4.2.10': - resolution: {integrity: sha512-0R/+/Il5y8nB/By90o8hy/bWVYptbIfvoTYad0igYQO5RefhNCDmNzqxaMx7K1t/QWo0d6UynqpqN5cCQt1MCg==} -======= '@smithy/middleware-stack@4.2.12': resolution: {integrity: sha512-kruC5gRHwsCOuyCd4ouQxYjgRAym2uDlCvQ5acuMtRrcdfg7mFBg6blaxcJ09STpt3ziEkis6bhg1uwrWU7txw==} engines: {node: '>=18.0.0'} @@ -4962,8 +4771,8 @@ packages: resolution: {integrity: sha512-tr2oKX2xMcO+rBOjobSwVAkV05SIfUKz8iI53rzxEmgW3GOOPOv0UioSDk+J8OpRQnpnhsO3Af6IEBabQBVmiw==} engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.16': - resolution: {integrity: sha512-ULC8UCS/HivdCB3jhi+kLFYe4B5gxH2gi9vHBfEIiRrT2jfKiZNiETJSlzRtE6B26XbBHjPtc8iZKSNqMol9bw==} + '@smithy/node-http-handler@4.5.0': + resolution: {integrity: sha512-Rnq9vQWiR1+/I6NZZMNzJHV6pZYyEHt2ZnuV3MG8z2NNenC4i/8Kzttz7CjZiHSmsN5frhXhg17z3Zqjjhmz1A==} engines: {node: '>=18.0.0'} '@smithy/property-provider@4.2.12': @@ -4984,78 +4793,28 @@ packages: '@smithy/service-error-classification@4.2.12': resolution: {integrity: sha512-LlP29oSQN0Tw0b6D0Xo6BIikBswuIiGYbRACy5ujw/JgWSzTdYj46U83ssf6Ux0GyNJVivs2uReU8pt7Eu9okQ==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/shared-ini-file-loader@4.4.7': resolution: {integrity: sha512-HrOKWsUb+otTeo1HxVWeEb99t5ER1XrBi/xka2Wv6NVmTbuCUC1dvlrksdvxFtODLBjsC+PHK+fuy2x/7Ynyiw==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/shared-ini-file-loader@4.4.6': - resolution: {integrity: sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==} - engines: {node: '>=18.0.0'} - - '@smithy/signature-v4@5.3.10': - resolution: {integrity: sha512-Wab3wW8468WqTKIxI+aZe3JYO52/RYT/8sDOdzkUhjnLakLe9qoQqIcfih/qxcF4qWEFoWBszY0mj5uxffaVXA==} - engines: {node: '>=18.0.0'} - - '@smithy/signature-v4@5.3.11': - resolution: {integrity: sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==} - engines: {node: '>=18.0.0'} - - '@smithy/smithy-client@4.12.1': - resolution: {integrity: sha512-Xf9UFHlAihewfkmLNZ6I/Ek6kcYBKoU3cbRS9Z4q++9GWoW0YFbAHs7wMbuXm+nGuKHZ5OKheZMuDdaWPv8DJw==} - engines: {node: '>=18.0.0'} - - '@smithy/smithy-client@4.12.3': - resolution: {integrity: sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==} - engines: {node: '>=18.0.0'} - - '@smithy/types@4.13.0': - resolution: {integrity: sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==} -======= '@smithy/signature-v4@5.3.12': resolution: {integrity: sha512-B/FBwO3MVOL00DaRSXfXfa/TRXRheagt/q5A2NM13u7q+sHS59EOVGQNfG7DkmVtdQm5m3vOosoKAXSqn/OEgw==} engines: {node: '>=18.0.0'} - '@smithy/smithy-client@4.12.5': - resolution: {integrity: sha512-UqwYawyqSr/aog8mnLnfbPurS0gi4G7IYDcD28cUIBhsvWs1+rQcL2IwkUQ+QZ7dibaoRzhNF99fAQ9AUcO00w==} + '@smithy/smithy-client@4.12.6': + resolution: {integrity: sha512-aib3f0jiMsJ6+cvDnXipBsGDL7ztknYSVqJs1FdN9P+u9tr/VzOR7iygSh6EUOdaBeMCMSh3N0VdyYsG4o91DQ==} engines: {node: '>=18.0.0'} '@smithy/types@4.13.1': resolution: {integrity: sha512-787F3yzE2UiJIQ+wYW1CVg2odHjmaWLGksnKQHUrK/lYZSEcy1msuLVvxaR/sI2/aDe9U+TBuLsXnr3vod1g0g==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/url-parser@4.2.12': resolution: {integrity: sha512-wOPKPEpso+doCZGIlr+e1lVI6+9VAKfL4kZWFgzVgGWY2hZxshNKod4l2LXS3PRC9otH/JRSjtEHqQ/7eLciRA==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/url-parser@4.2.11': - resolution: {integrity: sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==} - engines: {node: '>=18.0.0'} - - '@smithy/util-base64@4.3.1': - resolution: {integrity: sha512-BKGuawX4Doq/bI/uEmg+Zyc36rJKWuin3py89PquXBIBqmbnJwBBsmKhdHfNEp0+A4TDgLmT/3MSKZ1SxHcR6w==} - engines: {node: '>=18.0.0'} - - '@smithy/util-base64@4.3.2': - resolution: {integrity: sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ==} - engines: {node: '>=18.0.0'} - - '@smithy/util-body-length-browser@4.2.1': - resolution: {integrity: sha512-SiJeLiozrAoCrgDBUgsVbmqHmMgg/2bA15AzcbcW+zan7SuyAVHN4xTSbq0GlebAIwlcaX32xacnrG488/J/6g==} - engines: {node: '>=18.0.0'} - - '@smithy/util-body-length-browser@4.2.2': - resolution: {integrity: sha512-JKCrLNOup3OOgmzeaKQwi4ZCTWlYR5H4Gm1r2uTMVBXoemo1UEghk5vtMi1xSu2ymgKVGW631e2fp9/R610ZjQ==} - engines: {node: '>=18.0.0'} - - '@smithy/util-body-length-node@4.2.2': - resolution: {integrity: sha512-4rHqBvxtJEBvsZcFQSPQqXP2b/yy/YlB66KlcEgcH2WNoOKCKB03DSLzXmOsXjbl8dJ4OEYTn31knhdznwk7zw==} -======= '@smithy/util-base64@4.3.2': resolution: {integrity: sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ==} engines: {node: '>=18.0.0'} @@ -5066,7 +4825,6 @@ packages: '@smithy/util-body-length-node@4.2.3': resolution: {integrity: sha512-ZkJGvqBzMHVHE7r/hcuCxlTY8pQr1kMtdsVPs7ex4mMU+EAbcXppfo5NmyxMYi2XU49eqaz56j2gsk4dHHPG/g==} ->>>>>>> main engines: {node: '>=18.0.0'} '@smithy/util-buffer-from@2.2.0': @@ -5077,33 +4835,16 @@ packages: resolution: {integrity: sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/util-buffer-from@4.2.2': - resolution: {integrity: sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q==} - engines: {node: '>=18.0.0'} - - '@smithy/util-config-provider@4.2.1': - resolution: {integrity: sha512-462id/00U8JWFw6qBuTSWfN5TxOHvDu4WliI97qOIOnuC/g+NDAknTU8eoGXEPlLkRVgWEr03jJBLV4o2FL8+A==} - engines: {node: '>=18.0.0'} - - '@smithy/util-config-provider@4.2.2': - resolution: {integrity: sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ==} - engines: {node: '>=18.0.0'} - - '@smithy/util-defaults-mode-browser@4.3.37': - resolution: {integrity: sha512-JlPZhV1kQCGNJgofRTU6E8kHrjCKsb6cps8gco8QDVaFl7biFYzHg0p1x89ytIWyVyCkY3nOpO8tJPM47Vqlww==} -======= '@smithy/util-config-provider@4.2.2': resolution: {integrity: sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-browser@4.3.41': - resolution: {integrity: sha512-M1w1Ux0rSVvBOxIIiqbxvZvhnjQ+VUjJrugtORE90BbadSTH+jsQL279KRL3Hv0w69rE7EuYkV/4Lepz/NBW9g==} ->>>>>>> main + '@smithy/util-defaults-mode-browser@4.3.42': + resolution: {integrity: sha512-0vjwmcvkWAUtikXnWIUOyV6IFHTEeQUYh3JUZcDgcszF+hD/StAsQ3rCZNZEPHgI9kVNcbnyc8P2CBHnwgmcwg==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-node@4.2.44': - resolution: {integrity: sha512-YPze3/lD1KmWuZsl9JlfhcgGLX7AXhSoaCDtiPntUjNW5/YY0lOHjkcgxyE9x/h5vvS1fzDifMGjzqnNlNiqOQ==} + '@smithy/util-defaults-mode-node@4.2.45': + resolution: {integrity: sha512-q5dOqqfTgUcLe38TAGiFn9srToKj2YCHJ34QGOLzM+xYLLA+qRZv7N+33kl1MERVusue36ZHnlNaNEvY/PzSrw==} engines: {node: '>=18.0.0'} '@smithy/util-endpoints@3.3.3': @@ -5114,46 +4855,16 @@ packages: resolution: {integrity: sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/util-hex-encoding@4.2.2': - resolution: {integrity: sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg==} - engines: {node: '>=18.0.0'} - - '@smithy/util-middleware@4.2.10': - resolution: {integrity: sha512-LxaQIWLp4y0r72eA8mwPNQ9va4h5KeLM0I3M/HV9klmFaY2kN766wf5vsTzmaOpNNb7GgXAd9a25P3h8T49PSA==} - engines: {node: '>=18.0.0'} - - '@smithy/util-middleware@4.2.11': - resolution: {integrity: sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==} - engines: {node: '>=18.0.0'} - - '@smithy/util-retry@4.2.10': - resolution: {integrity: sha512-HrBzistfpyE5uqTwiyLsFHscgnwB0kgv8vySp7q5kZ0Eltn/tjosaSGGDj/jJ9ys7pWzIP/icE2d+7vMKXLv7A==} -======= '@smithy/util-middleware@4.2.12': resolution: {integrity: sha512-Er805uFUOvgc0l8nv0e0su0VFISoxhJ/AwOn3gL2NWNY2LUEldP5WtVcRYSQBcjg0y9NfG8JYrCJaYDpupBHJQ==} engines: {node: '>=18.0.0'} '@smithy/util-retry@4.2.12': resolution: {integrity: sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ==} ->>>>>>> main - engines: {node: '>=18.0.0'} - - '@smithy/util-stream@4.5.19': - resolution: {integrity: sha512-v4sa+3xTweL1CLO2UP0p7tvIMH/Rq1X4KKOxd568mpe6LSLMQCnDHs4uv7m3ukpl3HvcN2JH6jiCS0SNRXKP/w==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/util-stream@4.5.17': - resolution: {integrity: sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==} - engines: {node: '>=18.0.0'} - - '@smithy/util-uri-escape@4.2.1': - resolution: {integrity: sha512-YmiUDn2eo2IOiWYYvGQkgX5ZkBSiTQu4FlDo5jNPpAxng2t6Sjb6WutnZV9l6VR4eJul1ABmCrnWBC9hKHQa6Q==} -======= - '@smithy/util-uri-escape@4.2.2': - resolution: {integrity: sha512-2kAStBlvq+lTXHyAZYfJRb/DfS3rsinLiwb+69SstC9Vb0s9vNWkRwpnj918Pfi85mzi42sOqdV72OLxWAISnw==} ->>>>>>> main + '@smithy/util-stream@4.5.20': + resolution: {integrity: sha512-4yXLm5n/B5SRBR2p8cZ90Sbv4zL4NKsgxdzCzp/83cXw2KxLEumt5p+GAVyRNZgQOSrzXn9ARpO0lUe8XSlSDw==} engines: {node: '>=18.0.0'} '@smithy/util-uri-escape@4.2.2': @@ -5168,21 +4879,8 @@ packages: resolution: {integrity: sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw==} engines: {node: '>=18.0.0'} -<<<<<<< HEAD - '@smithy/util-utf8@4.2.2': - resolution: {integrity: sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw==} - engines: {node: '>=18.0.0'} - - '@smithy/util-waiter@4.2.10': - resolution: {integrity: sha512-4eTWph/Lkg1wZEDAyObwme0kmhEb7J/JjibY2znJdrYRgKbKqB7YoEhhJVJ4R1g/SYih4zuwX7LpJaM8RsnTVg==} -======= '@smithy/util-waiter@4.2.13': resolution: {integrity: sha512-2zdZ9DTHngRtcYxJK1GUDxruNr53kv5W2Lupe0LMU+Imr6ohQg8M2T14MNkj1Y0wS3FFwpgpGQyvuaMF7CiTmQ==} ->>>>>>> main - engines: {node: '>=18.0.0'} - - '@smithy/uuid@1.1.2': - resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} engines: {node: '>=18.0.0'} '@smithy/uuid@1.1.2': @@ -5228,8 +4926,8 @@ packages: '@styled-system/variant@5.1.5': resolution: {integrity: sha512-Yn8hXAFoWIro8+Q5J8YJd/mP85Teiut3fsGVR9CAxwgNfIAiqlYxsk5iHU7VHJks/0KjL4ATSjmbtCDC/4l1qw==} - '@swc/helpers@0.5.19': - resolution: {integrity: sha512-QamiFeIK3txNjgUTNppE6MiG3p7TdninpZu0E0PbqVh1a9FNLT2FRhisaa4NcaX52XVhA5l7Pk58Ft7Sqi/2sA==} + '@swc/helpers@0.5.18': + resolution: {integrity: sha512-TXTnIcNJQEKwThMMqBXsZ4VGAza6bvN4pa41Rkqoio6QBKMvo+5lexeTMScGCIxtzgQJzElcvIltani+adC5PQ==} '@tanstack/query-core@5.90.20': resolution: {integrity: sha512-OMD2HLpNouXEfZJWcKeVKUgQ5n+n3A2JFmBaScpNDUqSrQSjiveC7dKMe53uJUg1nDG16ttFPz2xfilz6i2uVg==} @@ -5239,14 +4937,14 @@ packages: peerDependencies: react: ^18 || ^19 - '@tanstack/react-virtual@3.13.22': - resolution: {integrity: sha512-EaOrBBJLi3M0bTMQRjGkxLXRw7Gizwntoy5E2Q2UnSbML7Mo2a1P/Hfkw5tw9FLzK62bj34Jl6VNbQfRV6eJcA==} + '@tanstack/react-virtual@3.13.18': + resolution: {integrity: sha512-dZkhyfahpvlaV0rIKnvQiVoWPyURppl6w4m9IwMDpuIjcJ1sD9YGWrt0wISvgU7ewACXx2Ct46WPgI6qAD4v6A==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - '@tanstack/virtual-core@3.13.22': - resolution: {integrity: sha512-isuUGKsc5TAPDoHSbWTbl1SCil54zOS2MiWz/9GCWHPUQOvNTQx8qJEWC7UWR0lShhbK0Lmkcf0SZYxvch7G3g==} + '@tanstack/virtual-core@3.13.18': + resolution: {integrity: sha512-Mx86Hqu1k39icq2Zusq+Ey2J6dDWTjDvEv43PJtRCoEYTLyfaPnxIQ6iy7YAOK0NV/qOEmZQ/uCufrppZxTgcg==} '@testing-library/dom@7.31.2': resolution: {integrity: sha512-3UqjCpey6HiTZT92vODYLPxTBWlM8ZOOjr3LX5F37/VRipW2M1kX6I/Cm4VXzteZqfGfagg8yXywpcOgQBlNsQ==} @@ -5397,11 +5095,8 @@ packages: '@types/node@22.19.11': resolution: {integrity: sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==} - '@types/node@22.19.15': - resolution: {integrity: sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg==} - - '@types/node@25.5.0': - resolution: {integrity: sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==} + '@types/node@25.3.3': + resolution: {integrity: sha512-DpzbrH7wIcBaJibpKo9nnSQL0MTRdnWttGyE5haGwK86xgMOkFLp7vEyfQPGLOJh5wNYiJ3V9PmUMDhV9u8kkQ==} '@types/nodemailer@7.0.11': resolution: {integrity: sha512-E+U4RzR2dKrx+u3N4DlsmLaDC6mMZOM/TPROxA0UAPiTgI0y4CEFBmZE+coGWTjakDriRsXG368lNk1u9Q0a2g==} @@ -5471,63 +5166,63 @@ packages: '@types/yargs@17.0.35': resolution: {integrity: sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==} - '@typescript-eslint/eslint-plugin@8.57.0': - resolution: {integrity: sha512-qeu4rTHR3/IaFORbD16gmjq9+rEs9fGKdX0kF6BKSfi+gCuG3RCKLlSBYzn/bGsY9Tj7KE/DAQStbp8AHJGHEQ==} + '@typescript-eslint/eslint-plugin@8.57.1': + resolution: {integrity: sha512-Gn3aqnvNl4NGc6x3/Bqk1AOn0thyTU9bqDRhiRnUWezgvr2OnhYCWCgC8zXXRVqBsIL1pSDt7T9nJUe0oM0kDQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - '@typescript-eslint/parser': ^8.57.0 + '@typescript-eslint/parser': ^8.57.1 eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/parser@8.57.0': - resolution: {integrity: sha512-XZzOmihLIr8AD1b9hL9ccNMzEMWt/dE2u7NyTY9jJG6YNiNthaD5XtUHVF2uCXZ15ng+z2hT3MVuxnUYhq6k1g==} + '@typescript-eslint/parser@8.57.1': + resolution: {integrity: sha512-k4eNDan0EIMTT/dUKc/g+rsJ6wcHYhNPdY19VoX/EOtaAG8DLtKCykhrUnuHPYvinn5jhAPgD2Qw9hXBwrahsw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/project-service@8.57.0': - resolution: {integrity: sha512-pR+dK0BlxCLxtWfaKQWtYr7MhKmzqZxuii+ZjuFlZlIGRZm22HnXFqa2eY+90MUz8/i80YJmzFGDUsi8dMOV5w==} + '@typescript-eslint/project-service@8.57.1': + resolution: {integrity: sha512-vx1F37BRO1OftsYlmG9xay1TqnjNVlqALymwWVuYTdo18XuKxtBpCj1QlzNIEHlvlB27osvXFWptYiEWsVdYsg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/scope-manager@8.57.0': - resolution: {integrity: sha512-nvExQqAHF01lUM66MskSaZulpPL5pgy5hI5RfrxviLgzZVffB5yYzw27uK/ft8QnKXI2X0LBrHJFr1TaZtAibw==} + '@typescript-eslint/scope-manager@8.57.1': + resolution: {integrity: sha512-hs/QcpCwlwT2L5S+3fT6gp0PabyGk4Q0Rv2doJXA0435/OpnSR3VRgvrp8Xdoc3UAYSg9cyUjTeFXZEPg/3OKg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/tsconfig-utils@8.57.0': - resolution: {integrity: sha512-LtXRihc5ytjJIQEH+xqjB0+YgsV4/tW35XKX3GTZHpWtcC8SPkT/d4tqdf1cKtesryHm2bgp6l555NYcT2NLvA==} + '@typescript-eslint/tsconfig-utils@8.57.1': + resolution: {integrity: sha512-0lgOZB8cl19fHO4eI46YUx2EceQqhgkPSuCGLlGi79L2jwYY1cxeYc1Nae8Aw1xjgW3PKVDLlr3YJ6Bxx8HkWg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/type-utils@8.57.0': - resolution: {integrity: sha512-yjgh7gmDcJ1+TcEg8x3uWQmn8ifvSupnPfjP21twPKrDP/pTHlEQgmKcitzF/rzPSmv7QjJ90vRpN4U+zoUjwQ==} + '@typescript-eslint/type-utils@8.57.1': + resolution: {integrity: sha512-+Bwwm0ScukFdyoJsh2u6pp4S9ktegF98pYUU0hkphOOqdMB+1sNQhIz8y5E9+4pOioZijrkfNO/HUJVAFFfPKA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/types@8.57.0': - resolution: {integrity: sha512-dTLI8PEXhjUC7B9Kre+u0XznO696BhXcTlOn0/6kf1fHaQW8+VjJAVHJ3eTI14ZapTxdkOmc80HblPQLaEeJdg==} + '@typescript-eslint/types@8.57.1': + resolution: {integrity: sha512-S29BOBPJSFUiblEl6RzPPjJt6w25A6XsBqRVDt53tA/tlL8q7ceQNZHTjPeONt/3S7KRI4quk+yP9jK2WjBiPQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/typescript-estree@8.57.0': - resolution: {integrity: sha512-m7faHcyVg0BT3VdYTlX8GdJEM7COexXxS6KqGopxdtkQRvBanK377QDHr4W/vIPAR+ah9+B/RclSW5ldVniO1Q==} + '@typescript-eslint/typescript-estree@8.57.1': + resolution: {integrity: sha512-ybe2hS9G6pXpqGtPli9Gx9quNV0TWLOmh58ADlmZe9DguLq0tiAKVjirSbtM1szG6+QH6rVXyU6GTLQbWnMY+g==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/utils@8.57.0': - resolution: {integrity: sha512-5iIHvpD3CZe06riAsbNxxreP+MuYgVUsV0n4bwLH//VJmgtt54sQeY2GszntJ4BjYCpMzrfVh2SBnUQTtys2lQ==} + '@typescript-eslint/utils@8.57.1': + resolution: {integrity: sha512-XUNSJ/lEVFttPMMoDVA2r2bwrl8/oPx8cURtczkSEswY5T3AeLmCy+EKWQNdL4u0MmAHOjcWrqJp2cdvgjn8dQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/visitor-keys@8.57.0': - resolution: {integrity: sha512-zm6xx8UT/Xy2oSr2ZXD0pZo7Jx2XsCoID2IUh9YSTFRu7z+WdwYTRk6LhUftm1crwqbuoF6I8zAFeCMw0YjwDg==} + '@typescript-eslint/visitor-keys@8.57.1': + resolution: {integrity: sha512-YWnmJkXbofiz9KbnbbwuA2rpGkFPLbAIetcCNO6mJ8gdhdZ/v7WDXsoGFAJuM6ikUFKTlSQnjWnVO4ux+UzS6A==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@ungap/structured-clone@1.3.0': @@ -6015,10 +5710,6 @@ packages: resolution: {integrity: sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA==} engines: {node: '>=8'} - ci-info@4.4.0: - resolution: {integrity: sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg==} - engines: {node: '>=8'} - cjs-module-lexer@2.2.0: resolution: {integrity: sha512-4bHTS2YuzUvtoLjdy+98ykbNB5jS0+07EvFNXerqZQJ89F7DI6ET7OQo/HJuW6K0aVsKA9hj9/RVb2kQVOrPDQ==} @@ -6313,8 +6004,8 @@ packages: babel-plugin-macros: optional: true - dedent@1.7.2: - resolution: {integrity: sha512-WzMx3mW98SN+zn3hgemf4OzdmyNhhhKz5Ay0pUfQiMQ3e1g+xmTJWp/pKdwKVXhdSkAEGIIzqeuWrL3mV/AXbA==} + dedent@1.7.1: + resolution: {integrity: sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==} peerDependencies: babel-plugin-macros: ^3.1.0 peerDependenciesMeta: @@ -6797,8 +6488,8 @@ packages: fast-uri@3.1.0: resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} - fast-xml-builder@1.1.3: - resolution: {integrity: sha512-1o60KoFw2+LWKQu3IdcfcFlGTW4dpqEWmjhYec6H82AYZU2TVBXep6tMl8Z1Y+wM+ZrzCwe3BZ9Vyd9N2rIvmg==} + fast-xml-builder@1.0.0: + resolution: {integrity: sha512-fpZuDogrAgnyt9oDDz+5DBz0zgPdPZz6D4IR7iESxRXElrlGTRkHJ9eEt+SACRJwT0FNFrt71DFQIUFBJfX/uQ==} fast-xml-parser@5.4.1: resolution: {integrity: sha512-BQ30U1mKkvXQXXkAGcuyUA/GA26oEB7NzOtsxCDtyu62sjGw5QraKFhx2Em3WQNjPw9PG6MQ9yuIIgkSDfGu5A==} @@ -6889,8 +6580,8 @@ packages: resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} engines: {node: '>= 0.6'} - framer-motion@12.36.0: - resolution: {integrity: sha512-4PqYHAT7gev0ke0wos+PyrcFxI0HScjm3asgU8nSYa8YzJFuwgIvdj3/s3ZaxLq0bUSboIn19A2WS/MHwLCvfw==} + framer-motion@12.34.0: + resolution: {integrity: sha512-+/H49owhzkzQyxtn7nZeF4kdH++I2FWrESQ184Zbcw5cEqNHYkE5yxWxcTLSj5lNx3NWdbIRy5FHqUvetD8FWg==} peerDependencies: '@emotion/is-prop-valid': '*' react: ^18.0.0 || ^19.0.0 @@ -8259,11 +7950,11 @@ packages: monaco-editor: '>= 0.20.0 < 0.53' prettier: ^2.8.0 || ^3.0.0 - motion-dom@12.36.0: - resolution: {integrity: sha512-Ep1pq8P88rGJ75om8lTCA13zqd7ywPGwCqwuWwin6BKc0hMLkVfcS6qKlRqEo2+t0DwoUcgGJfXwaiFn4AOcQA==} + motion-dom@12.34.0: + resolution: {integrity: sha512-Lql3NuEcScRDxTAO6GgUsRHBZOWI/3fnMlkMcH5NftzcN37zJta+bpbMAV9px4Nj057TuvRooMK7QrzMCgtz6Q==} - motion-utils@12.36.0: - resolution: {integrity: sha512-eHWisygbiwVvf6PZ1vhaHCLamvkSbPIeAYxWUuL3a2PD/TROgE7FvfHWTIH4vMl798QLfMw15nRqIaRDXTlYRg==} + motion-utils@12.29.2: + resolution: {integrity: sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A==} ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} @@ -8618,10 +8309,6 @@ packages: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} - path-expression-matcher@1.1.3: - resolution: {integrity: sha512-qdVgY8KXmVdJZRSS1JdEPOKPdTiEK/pi0RkcT2sw1RhXxohdujUlJFPuS1TSkevZ9vzd3ZlL7ULl1MHGTApKzQ==} - engines: {node: '>=14.0.0'} - path-is-absolute@1.0.1: resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} engines: {node: '>=0.10.0'} @@ -8655,6 +8342,9 @@ packages: pg-cloudflare@1.3.0: resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} + pg-connection-string@2.11.0: + resolution: {integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ==} + pg-connection-string@2.12.0: resolution: {integrity: sha512-U7qg+bpswf3Cs5xLzRqbXbQl85ng0mfSV/J0nnA31MCLgvEaAo7CIhmeyrmJpOr7o+zm0rXK+hNnT5l9RHkCkQ==} @@ -8669,6 +8359,11 @@ packages: resolution: {integrity: sha512-PCqAR+q/IKmC5f/UpPS1/2lBDOs07ALPsor4PzdbACrOI/MdMarN5so3IOI7rh4SQMTDT9xXv67Ye4k2uWTQ+A==} engines: {node: '>=22'} + pg-pool@3.12.0: + resolution: {integrity: sha512-eIJ0DES8BLaziFHW7VgJEBPi5hg3Nyng5iKpYtj3wbcAUV9A1wLgWiY7ajf/f/oO1wfxt83phXPY8Emztg7ITg==} + peerDependencies: + pg: '>=8.0' + pg-pool@3.13.0: resolution: {integrity: sha512-gB+R+Xud1gLFuRD/QgOIgGOBE2KCQPaPwkzBBGC9oG69pHTkhQeIuejVIk3/cnDyX39av2AxomQiyPT13WKHQA==} peerDependencies: @@ -8691,6 +8386,15 @@ packages: resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} engines: {node: '>=4'} + pg@8.19.0: + resolution: {integrity: sha512-QIcLGi508BAHkQ3pJNptsFz5WQMlpGbuBGBaIaXsWK8mel2kQ/rThYI+DbgjUvZrIr7MiuEuc9LcChJoEZK1xQ==} + engines: {node: '>= 16.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + pg@8.20.0: resolution: {integrity: sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==} engines: {node: '>= 16.0.0'} @@ -8703,11 +8407,11 @@ packages: pgpass@1.0.5: resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} - pgsql-deparser@17.18.1: - resolution: {integrity: sha512-rFC4B4c+Zp9pOkR0p7UkqnR981djT5Tl7l0J36/Y9lP6OuIoGfhWr81LY0d89spRLoTOY7hZXz7gqCDHsGbhhQ==} + pgsql-deparser@17.18.2: + resolution: {integrity: sha512-mnoT6ti7IFLwSUxe0UkxMjfUtyyWmwClf2sJyLbbRGbZ53SgiT493INFyxmeRvQxc99lmpz6aCxUnjj0ZhGlJA==} - pgsql-parser@17.9.13: - resolution: {integrity: sha512-km0RuHt082Xoh7/UcUnCuHmu3/eMWA6h74Bcuqepv71yYbKDA3Yri36rGnUrAMUo263pHGd31oftl5eInWd9yw==} + pgsql-parser@17.9.14: + resolution: {integrity: sha512-2qhO2DXkIbqtRdXN4dj8dD/RmSRtNfWxK08dYQ630WwJe1AF6ExuDV0zYGN8BR2NhCHqWmU3qKqJJPBXdib5DQ==} picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} @@ -9392,8 +9096,8 @@ packages: resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} engines: {node: '>=10.0.0'} - strfy-js@3.2.1: - resolution: {integrity: sha512-HSw2lkUJVPZ75I+E3HM7UqHMKvBCwjRt1MIAxPPNtLFjuqCrnDVKQQGfotdj/3qHxuhB6NDQ1rYmNjVpPBiNEA==} + strfy-js@3.1.10: + resolution: {integrity: sha512-KQXNrvhnWpn4ya25WSG6EvJC6oqdeXlwMoitGl3qEJ2wnELV/sQO6uBy6CsIWTsVOMAt0B7/xvM40ucu5c8AuA==} string-length@4.0.2: resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} @@ -9700,8 +9404,8 @@ packages: undici-types@7.18.2: resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==} - undici@7.24.3: - resolution: {integrity: sha512-eJdUmK/Wrx2d+mnWWmwwLRyA7OQCkLap60sk3dOK4ViZR7DKwwptwuIvFBg2HaiP9ESaEdhtpSymQPvytpmkCA==} + undici@7.24.4: + resolution: {integrity: sha512-BM/JzwwaRXxrLdElV2Uo6cTLEjhSb3WXboncJamZ15NgUURmvlXvxa6xkwIOILIjPNo9i8ku136ZvWV0Uly8+w==} engines: {node: '>=20.18.1'} unique-filename@3.0.0: @@ -10043,7 +9747,7 @@ snapshots: '@aws-crypto/supports-web-crypto': 5.2.0 '@aws-crypto/util': 5.2.0 '@aws-sdk/types': 3.973.6 - '@aws-sdk/util-locate-window': 3.965.5 + '@aws-sdk/util-locate-window': 3.965.4 '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 @@ -10053,7 +9757,7 @@ snapshots: '@aws-crypto/supports-web-crypto': 5.2.0 '@aws-crypto/util': 5.2.0 '@aws-sdk/types': 3.973.6 - '@aws-sdk/util-locate-window': 3.965.5 + '@aws-sdk/util-locate-window': 3.965.4 '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 @@ -10073,7 +9777,7 @@ snapshots: '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 - '@aws-sdk/client-s3@3.1009.0': + '@aws-sdk/client-s3@3.1010.0': dependencies: '@aws-crypto/sha1-browser': 5.2.0 '@aws-crypto/sha256-browser': 5.2.0 @@ -10082,7 +9786,7 @@ snapshots: '@aws-sdk/credential-provider-node': 3.972.21 '@aws-sdk/middleware-bucket-endpoint': 3.972.8 '@aws-sdk/middleware-expect-continue': 3.972.8 - '@aws-sdk/middleware-flexible-checksums': 3.973.6 + '@aws-sdk/middleware-flexible-checksums': 3.974.0 '@aws-sdk/middleware-host-header': 3.972.8 '@aws-sdk/middleware-location-constraint': 3.972.8 '@aws-sdk/middleware-logger': 3.972.8 @@ -10097,7 +9801,7 @@ snapshots: '@aws-sdk/util-user-agent-browser': 3.972.8 '@aws-sdk/util-user-agent-node': 3.973.7 '@smithy/config-resolver': 4.4.11 - '@smithy/core': 3.23.11 + '@smithy/core': 3.23.12 '@smithy/eventstream-serde-browser': 4.2.12 '@smithy/eventstream-serde-config-resolver': 4.3.12 '@smithy/eventstream-serde-node': 4.2.12 @@ -10108,25 +9812,25 @@ snapshots: '@smithy/invalid-dependency': 4.2.12 '@smithy/md5-js': 4.2.12 '@smithy/middleware-content-length': 4.2.12 - '@smithy/middleware-endpoint': 4.4.25 - '@smithy/middleware-retry': 4.4.42 - '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-endpoint': 4.4.26 + '@smithy/middleware-retry': 4.4.43 + '@smithy/middleware-serde': 4.2.15 '@smithy/middleware-stack': 4.2.12 '@smithy/node-config-provider': 4.3.12 - '@smithy/node-http-handler': 4.4.16 + '@smithy/node-http-handler': 4.5.0 '@smithy/protocol-http': 5.3.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 '@smithy/url-parser': 4.2.12 '@smithy/util-base64': 4.3.2 '@smithy/util-body-length-browser': 4.2.2 '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.41 - '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-defaults-mode-browser': 4.3.42 + '@smithy/util-defaults-mode-node': 4.2.45 '@smithy/util-endpoints': 3.3.3 '@smithy/util-middleware': 4.2.12 '@smithy/util-retry': 4.2.12 - '@smithy/util-stream': 4.5.19 + '@smithy/util-stream': 4.5.20 '@smithy/util-utf8': 4.2.2 '@smithy/util-waiter': 4.2.13 tslib: 2.8.1 @@ -10137,39 +9841,19 @@ snapshots: dependencies: '@aws-sdk/types': 3.973.6 '@aws-sdk/xml-builder': 3.972.11 - '@smithy/core': 3.23.11 + '@smithy/core': 3.23.12 '@smithy/node-config-provider': 4.3.12 '@smithy/property-provider': 4.2.12 '@smithy/protocol-http': 5.3.12 '@smithy/signature-v4': 5.3.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 '@smithy/util-middleware': 4.2.12 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/core@3.973.19': - dependencies: - '@aws-sdk/types': 3.973.5 - '@aws-sdk/xml-builder': 3.972.10 - '@smithy/core': 3.23.9 - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - - '@aws-sdk/crc64-nvme@3.972.3': -======= '@aws-sdk/crc64-nvme@3.972.5': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 tslib: 2.8.1 @@ -10187,12 +9871,12 @@ snapshots: '@aws-sdk/core': 3.973.20 '@aws-sdk/types': 3.973.6 '@smithy/fetch-http-handler': 5.3.15 - '@smithy/node-http-handler': 4.4.16 + '@smithy/node-http-handler': 4.5.0 '@smithy/property-provider': 4.2.12 '@smithy/protocol-http': 5.3.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 - '@smithy/util-stream': 4.5.19 + '@smithy/util-stream': 4.5.20 tslib: 2.8.1 '@aws-sdk/credential-provider-ini@3.972.20': @@ -10278,12 +9962,12 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/lib-storage@3.1009.0(@aws-sdk/client-s3@3.1009.0)': + '@aws-sdk/lib-storage@3.1010.0(@aws-sdk/client-s3@3.1010.0)': dependencies: - '@aws-sdk/client-s3': 3.1009.0 + '@aws-sdk/client-s3': 3.1010.0 '@smithy/abort-controller': 4.2.12 - '@smithy/middleware-endpoint': 4.4.25 - '@smithy/smithy-client': 4.12.5 + '@smithy/middleware-endpoint': 4.4.26 + '@smithy/smithy-client': 4.12.6 buffer: 5.6.0 events: 3.3.0 stream-browserify: 3.0.0 @@ -10306,7 +9990,7 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 - '@aws-sdk/middleware-flexible-checksums@3.973.6': + '@aws-sdk/middleware-flexible-checksums@3.974.0': dependencies: '@aws-crypto/crc32': 5.2.0 '@aws-crypto/crc32c': 5.2.0 @@ -10319,7 +10003,7 @@ snapshots: '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 '@smithy/util-middleware': 4.2.12 - '@smithy/util-stream': 4.5.19 + '@smithy/util-stream': 4.5.20 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 @@ -10345,7 +10029,7 @@ snapshots: '@aws-sdk/middleware-recursion-detection@3.972.8': dependencies: '@aws-sdk/types': 3.973.6 - '@aws/lambda-invoke-store': 0.2.4 + '@aws/lambda-invoke-store': 0.2.3 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 tslib: 2.8.1 @@ -10355,40 +10039,19 @@ snapshots: '@aws-sdk/core': 3.973.20 '@aws-sdk/types': 3.973.6 '@aws-sdk/util-arn-parser': 3.972.3 - '@smithy/core': 3.23.11 + '@smithy/core': 3.23.12 '@smithy/node-config-provider': 4.3.12 '@smithy/protocol-http': 5.3.12 '@smithy/signature-v4': 5.3.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 '@smithy/util-config-provider': 4.2.2 '@smithy/util-middleware': 4.2.12 - '@smithy/util-stream': 4.5.19 + '@smithy/util-stream': 4.5.20 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/middleware-sdk-s3@3.972.19': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-arn-parser': 3.972.3 - '@smithy/core': 3.23.9 - '@smithy/node-config-provider': 4.3.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-config-provider': 4.2.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-stream': 4.5.17 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - - '@aws-sdk/middleware-ssec@3.972.6': -======= '@aws-sdk/middleware-ssec@3.972.8': ->>>>>>> main dependencies: '@aws-sdk/types': 3.973.6 '@smithy/types': 4.13.1 @@ -10399,7 +10062,7 @@ snapshots: '@aws-sdk/core': 3.973.20 '@aws-sdk/types': 3.973.6 '@aws-sdk/util-endpoints': 3.996.5 - '@smithy/core': 3.23.11 + '@smithy/core': 3.23.12 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 '@smithy/util-retry': 4.2.12 @@ -10420,26 +10083,26 @@ snapshots: '@aws-sdk/util-user-agent-browser': 3.972.8 '@aws-sdk/util-user-agent-node': 3.973.7 '@smithy/config-resolver': 4.4.11 - '@smithy/core': 3.23.11 + '@smithy/core': 3.23.12 '@smithy/fetch-http-handler': 5.3.15 '@smithy/hash-node': 4.2.12 '@smithy/invalid-dependency': 4.2.12 '@smithy/middleware-content-length': 4.2.12 - '@smithy/middleware-endpoint': 4.4.25 - '@smithy/middleware-retry': 4.4.42 - '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-endpoint': 4.4.26 + '@smithy/middleware-retry': 4.4.43 + '@smithy/middleware-serde': 4.2.15 '@smithy/middleware-stack': 4.2.12 '@smithy/node-config-provider': 4.3.12 - '@smithy/node-http-handler': 4.4.16 + '@smithy/node-http-handler': 4.5.0 '@smithy/protocol-http': 5.3.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 '@smithy/url-parser': 4.2.12 '@smithy/util-base64': 4.3.2 '@smithy/util-body-length-browser': 4.2.2 '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.41 - '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-defaults-mode-browser': 4.3.42 + '@smithy/util-defaults-mode-node': 4.2.45 '@smithy/util-endpoints': 3.3.3 '@smithy/util-middleware': 4.2.12 '@smithy/util-retry': 4.2.12 @@ -10456,22 +10119,18 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/s3-request-presigner@3.1007.0': + '@aws-sdk/s3-request-presigner@3.1010.0': dependencies: - '@aws-sdk/signature-v4-multi-region': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-format-url': 3.972.7 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 + '@aws-sdk/signature-v4-multi-region': 3.996.8 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-format-url': 3.972.8 + '@smithy/middleware-endpoint': 4.4.26 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.6 + '@smithy/types': 4.13.1 tslib: 2.8.1 - '@aws-sdk/signature-v4-multi-region@3.996.4': -======= '@aws-sdk/signature-v4-multi-region@3.996.8': ->>>>>>> main dependencies: '@aws-sdk/middleware-sdk-s3': 3.972.20 '@aws-sdk/types': 3.973.6 @@ -10480,20 +10139,7 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/signature-v4-multi-region@3.996.7': - dependencies: - '@aws-sdk/middleware-sdk-s3': 3.972.19 - '@aws-sdk/types': 3.973.5 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/token-providers@3.1001.0': -======= '@aws-sdk/token-providers@3.1009.0': ->>>>>>> main dependencies: '@aws-sdk/core': 3.973.20 '@aws-sdk/nested-clients': 3.996.10 @@ -10510,28 +10156,11 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/types@3.973.5': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/util-arn-parser@3.972.2': - dependencies: - tslib: 2.8.1 - - '@aws-sdk/util-arn-parser@3.972.3': - dependencies: - tslib: 2.8.1 - - '@aws-sdk/util-endpoints@3.996.3': -======= '@aws-sdk/util-arn-parser@3.972.3': dependencies: tslib: 2.8.1 '@aws-sdk/util-endpoints@3.996.5': ->>>>>>> main dependencies: '@aws-sdk/types': 3.973.6 '@smithy/types': 4.13.1 @@ -10539,18 +10168,14 @@ snapshots: '@smithy/util-endpoints': 3.3.3 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/util-format-url@3.972.7': + '@aws-sdk/util-format-url@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/util-locate-window@3.965.4': -======= - '@aws-sdk/util-locate-window@3.965.5': ->>>>>>> main dependencies: tslib: 2.8.1 @@ -10570,23 +10195,13 @@ snapshots: '@smithy/util-config-provider': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@aws-sdk/xml-builder@3.972.10': - dependencies: - '@smithy/types': 4.13.0 - fast-xml-parser: 5.4.1 - tslib: 2.8.1 - - '@aws-sdk/xml-builder@3.972.9': -======= '@aws-sdk/xml-builder@3.972.11': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 fast-xml-parser: 5.4.1 tslib: 2.8.1 - '@aws/lambda-invoke-store@0.2.4': {} + '@aws/lambda-invoke-store@0.2.3': {} '@babel/code-frame@7.27.1': dependencies: @@ -10600,12 +10215,6 @@ snapshots: js-tokens: 4.0.0 picocolors: 1.1.1 - '@babel/code-frame@7.29.0': - dependencies: - '@babel/helper-validator-identifier': 7.28.5 - js-tokens: 4.0.0 - picocolors: 1.1.1 - '@babel/compat-data@7.28.6': {} '@babel/core@7.28.6': @@ -10628,26 +10237,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/core@7.29.0': - dependencies: - '@babel/code-frame': 7.29.0 - '@babel/generator': 7.29.1 - '@babel/helper-compilation-targets': 7.28.6 - '@babel/helper-module-transforms': 7.28.6(@babel/core@7.29.0) - '@babel/helpers': 7.28.6 - '@babel/parser': 7.29.0 - '@babel/template': 7.28.6 - '@babel/traverse': 7.29.0 - '@babel/types': 7.29.0 - '@jridgewell/remapping': 2.3.5 - convert-source-map: 2.0.0 - debug: 4.4.3(supports-color@5.5.0) - gensync: 1.0.0-beta.2 - json5: 2.2.3 - semver: 6.3.1 - transitivePeerDependencies: - - supports-color - '@babel/generator@7.29.1': dependencies: '@babel/parser': 7.29.0 @@ -10693,15 +10282,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/helper-module-transforms@7.28.6(@babel/core@7.29.0)': - dependencies: - '@babel/core': 7.29.0 - '@babel/helper-module-imports': 7.28.6 - '@babel/helper-validator-identifier': 7.28.5 - '@babel/traverse': 7.28.6(supports-color@5.5.0) - transitivePeerDependencies: - - supports-color - '@babel/helper-plugin-utils@7.27.1': {} '@babel/helper-plugin-utils@7.28.6': {} @@ -10725,94 +10305,94 @@ snapshots: dependencies: '@babel/types': 7.29.0 - '@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.29.0)': + '@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.29.0)': + '@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.29.0)': + '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.29.0)': + '@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-import-attributes@7.28.6(@babel/core@7.29.0)': + '@babel/plugin-syntax-import-attributes@7.28.6(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.29.0)': + '@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.29.0)': + '@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-jsx@7.27.1(@babel/core@7.29.0)': + '@babel/plugin-syntax-jsx@7.27.1(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.27.1 - '@babel/plugin-syntax-jsx@7.28.6(@babel/core@7.29.0)': + '@babel/plugin-syntax-jsx@7.28.6(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.29.0)': + '@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.29.0)': + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.29.0)': + '@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.29.0)': + '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.29.0)': + '@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.29.0)': + '@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.29.0)': + '@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.29.0)': + '@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 - '@babel/plugin-syntax-typescript@7.28.6(@babel/core@7.29.0)': + '@babel/plugin-syntax-typescript@7.28.6(@babel/core@7.28.6)': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/helper-plugin-utils': 7.28.6 '@babel/plugin-transform-react-jsx-self@7.27.1(@babel/core@7.28.6)': @@ -10839,12 +10419,12 @@ snapshots: '@babel/traverse@7.28.5': dependencies: - '@babel/code-frame': 7.29.0 + '@babel/code-frame': 7.28.6 '@babel/generator': 7.29.1 '@babel/helper-globals': 7.28.0 '@babel/parser': 7.29.0 '@babel/template': 7.28.6 - '@babel/types': 7.28.5 + '@babel/types': 7.29.0 debug: 4.4.3(supports-color@5.5.0) transitivePeerDependencies: - supports-color @@ -10861,18 +10441,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@babel/traverse@7.29.0': - dependencies: - '@babel/code-frame': 7.29.0 - '@babel/generator': 7.29.1 - '@babel/helper-globals': 7.28.0 - '@babel/parser': 7.29.0 - '@babel/template': 7.28.6 - '@babel/types': 7.29.0 - debug: 4.4.3(supports-color@5.5.0) - transitivePeerDependencies: - - supports-color - '@babel/types@7.28.5': dependencies: '@babel/helper-string-parser': 7.27.1 @@ -10899,7 +10467,7 @@ snapshots: dependencies: '@dataplan/json': 1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)) '@graphile/lru': 5.0.0-rc.5 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 debug: 4.4.3(supports-color@5.5.0) eventemitter3: 5.0.4 @@ -10920,9 +10488,9 @@ snapshots: '@emnapi/wasi-threads': 1.1.0 tslib: 2.8.1 - '@emnapi/core@1.9.0': + '@emnapi/core@1.8.1': dependencies: - '@emnapi/wasi-threads': 1.2.0 + '@emnapi/wasi-threads': 1.1.0 tslib: 2.8.1 optional: true @@ -10930,7 +10498,7 @@ snapshots: dependencies: tslib: 2.8.1 - '@emnapi/runtime@1.9.0': + '@emnapi/runtime@1.8.1': dependencies: tslib: 2.8.1 optional: true @@ -10939,11 +10507,6 @@ snapshots: dependencies: tslib: 2.8.1 - '@emnapi/wasi-threads@1.2.0': - dependencies: - tslib: 2.8.1 - optional: true - '@emotion/is-prop-valid@1.4.0': dependencies: '@emotion/memoize': 0.9.0 @@ -11161,30 +10724,30 @@ snapshots: '@eslint/core': 0.17.0 levn: 0.4.1 - '@floating-ui/core@1.7.5': + '@floating-ui/core@1.7.4': dependencies: - '@floating-ui/utils': 0.2.11 + '@floating-ui/utils': 0.2.10 - '@floating-ui/dom@1.7.6': + '@floating-ui/dom@1.7.5': dependencies: - '@floating-ui/core': 1.7.5 - '@floating-ui/utils': 0.2.11 + '@floating-ui/core': 1.7.4 + '@floating-ui/utils': 0.2.10 - '@floating-ui/react-dom@2.1.8(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@floating-ui/react-dom@2.1.7(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@floating-ui/dom': 1.7.6 + '@floating-ui/dom': 1.7.5 react: 19.2.4 react-dom: 19.2.4(react@19.2.4) '@floating-ui/react@0.26.28(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@floating-ui/react-dom': 2.1.8(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - '@floating-ui/utils': 0.2.11 + '@floating-ui/react-dom': 2.1.7(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@floating-ui/utils': 0.2.10 react: 19.2.4 react-dom: 19.2.4(react@19.2.4) tabbable: 6.4.0 - '@floating-ui/utils@0.2.11': {} + '@floating-ui/utils@0.2.10': {} '@graphile-contrib/pg-many-to-many@2.0.0-rc.2': {} @@ -11208,9 +10771,9 @@ snapshots: - immer - use-sync-external-store - '@graphiql/plugin-doc-explorer@0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/react@19.2.14)(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))': + '@graphiql/plugin-doc-explorer@0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/react@19.2.14)(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))': dependencies: - '@graphiql/react': 0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + '@graphiql/react': 0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) '@headlessui/react': 2.2.9(react-dom@19.2.4(react@19.2.4))(react@19.2.4) graphql: 16.13.0 react: 19.2.4 @@ -11238,10 +10801,10 @@ snapshots: - immer - use-sync-external-store - '@graphiql/plugin-history@0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/node@25.5.0)(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))': + '@graphiql/plugin-history@0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/node@25.3.3)(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))': dependencies: - '@graphiql/react': 0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) - '@graphiql/toolkit': 0.11.3(@types/node@25.5.0)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0) + '@graphiql/react': 0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + '@graphiql/toolkit': 0.11.3(@types/node@25.3.3)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0) react: 19.2.4 react-compiler-runtime: 19.1.0-rc.1(react@19.2.4) react-dom: 19.2.4(react@19.2.4) @@ -11262,7 +10825,7 @@ snapshots: '@radix-ui/react-tooltip': 1.2.8(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-visually-hidden': 1.2.4(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) clsx: 1.2.1 - framer-motion: 12.36.0(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + framer-motion: 12.34.0(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) get-value: 3.0.1 graphql: 16.13.0 graphql-language-service: 5.5.0(graphql@16.13.0) @@ -11285,15 +10848,15 @@ snapshots: - immer - use-sync-external-store - '@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))': + '@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))': dependencies: - '@graphiql/toolkit': 0.11.3(@types/node@25.5.0)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0) + '@graphiql/toolkit': 0.11.3(@types/node@25.3.3)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0) '@radix-ui/react-dialog': 1.1.15(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-dropdown-menu': 2.1.16(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-tooltip': 1.2.8(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-visually-hidden': 1.2.4(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) clsx: 1.2.1 - framer-motion: 12.36.0(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + framer-motion: 12.34.0(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) get-value: 3.0.1 graphql: 16.13.0 graphql-language-service: 5.5.0(graphql@16.13.0) @@ -11326,11 +10889,11 @@ snapshots: transitivePeerDependencies: - '@types/node' - '@graphiql/toolkit@0.11.3(@types/node@25.5.0)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)': + '@graphiql/toolkit@0.11.3(@types/node@25.3.3)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)': dependencies: '@n1ru4l/push-pull-async-iterable-iterator': 3.2.0 graphql: 16.13.0 - meros: 1.3.2(@types/node@25.5.0) + meros: 1.3.2(@types/node@25.3.3) optionalDependencies: graphql-ws: 6.0.7(graphql@16.13.0)(ws@8.19.0) transitivePeerDependencies: @@ -11343,9 +10906,9 @@ snapshots: '@headlessui/react@2.2.9(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: '@floating-ui/react': 0.26.28(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - '@react-aria/focus': 3.21.5(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - '@react-aria/interactions': 3.27.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - '@tanstack/react-virtual': 3.13.22(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@react-aria/focus': 3.21.4(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@react-aria/interactions': 3.27.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@tanstack/react-virtual': 3.13.18(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react: 19.2.4 react-dom: 19.2.4(react@19.2.4) use-sync-external-store: 1.6.0(react@19.2.4) @@ -11409,7 +10972,7 @@ snapshots: '@jest/console@30.3.0': dependencies: '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 jest-message-util: 30.3.0 jest-util: 30.3.0 @@ -11423,14 +10986,14 @@ snapshots: '@jest/test-result': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 ansi-escapes: 4.3.2 chalk: 4.1.2 - ci-info: 4.4.0 + ci-info: 4.3.1 exit-x: 0.2.2 graceful-fs: 4.2.11 jest-changed-files: 30.3.0 - jest-config: 30.3.0(@types/node@22.19.15)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)) + jest-config: 30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)) jest-haste-map: 30.3.0 jest-message-util: 30.3.0 jest-regex-util: 30.0.1 @@ -11458,7 +11021,7 @@ snapshots: dependencies: '@jest/fake-timers': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 jest-mock: 30.3.0 '@jest/expect-utils@30.2.0': @@ -11480,7 +11043,7 @@ snapshots: dependencies: '@jest/types': 30.3.0 '@sinonjs/fake-timers': 15.1.1 - '@types/node': 22.19.15 + '@types/node': 22.19.11 jest-message-util: 30.3.0 jest-mock: 30.3.0 jest-util: 30.3.0 @@ -11498,7 +11061,7 @@ snapshots: '@jest/pattern@30.0.1': dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 jest-regex-util: 30.0.1 '@jest/reporters@30.3.0': @@ -11509,7 +11072,7 @@ snapshots: '@jest/transform': 30.3.0 '@jest/types': 30.3.0 '@jridgewell/trace-mapping': 0.3.31 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 collect-v8-coverage: 1.0.3 exit-x: 0.2.2 @@ -11566,7 +11129,7 @@ snapshots: '@jest/transform@30.3.0': dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@jest/types': 30.3.0 '@jridgewell/trace-mapping': 0.3.31 babel-plugin-istanbul: 7.0.1 @@ -11587,7 +11150,7 @@ snapshots: dependencies: '@types/istanbul-lib-coverage': 2.0.6 '@types/istanbul-reports': 3.0.4 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/yargs': 15.0.20 chalk: 4.1.2 @@ -11597,7 +11160,7 @@ snapshots: '@jest/schemas': 30.0.5 '@types/istanbul-lib-coverage': 2.0.6 '@types/istanbul-reports': 3.0.4 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/yargs': 17.0.35 chalk: 4.1.2 @@ -11607,7 +11170,7 @@ snapshots: '@jest/schemas': 30.0.5 '@types/istanbul-lib-coverage': 2.0.6 '@types/istanbul-reports': 3.0.4 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/yargs': 17.0.35 chalk: 4.1.2 @@ -11635,14 +11198,14 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.5 - '@launchql/mjml@0.1.1(@babel/core@7.29.0)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4)': + '@launchql/mjml@0.1.1(@babel/core@7.28.6)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4)': dependencies: '@babel/runtime': 7.28.4 mjml: 4.7.1(encoding@0.1.13) mjml-react: 1.0.59(mjml@4.7.1(encoding@0.1.13))(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react: 19.2.4 react-dom: 19.2.4(react@19.2.4) - styled-components: 5.3.11(@babel/core@7.29.0)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) + styled-components: 5.3.11(@babel/core@7.28.6)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) styled-system: 5.1.5 transitivePeerDependencies: - '@babel/core' @@ -11661,16 +11224,16 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 long: 5.3.2 - '@launchql/styled-email@0.1.0(@babel/core@7.29.0)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4)': + '@launchql/styled-email@0.1.0(@babel/core@7.28.6)(encoding@0.1.13)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4)': dependencies: '@babel/runtime': 7.28.4 juice: 7.0.0(encoding@0.1.13) react: 19.2.4 react-dom: 19.2.4(react@19.2.4) - styled-components: 5.3.11(@babel/core@7.29.0)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) + styled-components: 5.3.11(@babel/core@7.28.6)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) styled-system: 5.1.5 transitivePeerDependencies: - '@babel/core' @@ -11763,8 +11326,8 @@ snapshots: '@napi-rs/wasm-runtime@0.2.12': dependencies: - '@emnapi/core': 1.9.0 - '@emnapi/runtime': 1.9.0 + '@emnapi/core': 1.8.1 + '@emnapi/runtime': 1.8.1 '@tybys/wasm-util': 0.10.1 optional: true @@ -12123,7 +11686,7 @@ snapshots: '@pgsql/types@17.6.2': {} - '@pgsql/utils@17.8.14': + '@pgsql/utils@17.8.15': dependencies: '@pgsql/types': 17.6.2 nested-obj: 0.1.5 @@ -12303,7 +11866,7 @@ snapshots: '@radix-ui/react-popper@1.2.8(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@floating-ui/react-dom': 2.1.8(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@floating-ui/react-dom': 2.1.7(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-arrow': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.14)(react@19.2.4) '@radix-ui/react-context': 1.1.2(@types/react@19.2.14)(react@19.2.4) @@ -12476,52 +12039,52 @@ snapshots: '@radix-ui/rect@1.1.1': {} - '@react-aria/focus@3.21.5(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@react-aria/focus@3.21.4(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@react-aria/interactions': 3.27.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - '@react-aria/utils': 3.33.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - '@react-types/shared': 3.33.1(react@19.2.4) - '@swc/helpers': 0.5.19 + '@react-aria/interactions': 3.27.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@react-aria/utils': 3.33.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@react-types/shared': 3.33.0(react@19.2.4) + '@swc/helpers': 0.5.18 clsx: 2.1.1 react: 19.2.4 react-dom: 19.2.4(react@19.2.4) - '@react-aria/interactions@3.27.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@react-aria/interactions@3.27.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: '@react-aria/ssr': 3.9.10(react@19.2.4) - '@react-aria/utils': 3.33.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@react-aria/utils': 3.33.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@react-stately/flags': 3.1.2 - '@react-types/shared': 3.33.1(react@19.2.4) - '@swc/helpers': 0.5.19 + '@react-types/shared': 3.33.0(react@19.2.4) + '@swc/helpers': 0.5.18 react: 19.2.4 react-dom: 19.2.4(react@19.2.4) '@react-aria/ssr@3.9.10(react@19.2.4)': dependencies: - '@swc/helpers': 0.5.19 + '@swc/helpers': 0.5.18 react: 19.2.4 - '@react-aria/utils@3.33.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@react-aria/utils@3.33.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: '@react-aria/ssr': 3.9.10(react@19.2.4) '@react-stately/flags': 3.1.2 '@react-stately/utils': 3.11.0(react@19.2.4) - '@react-types/shared': 3.33.1(react@19.2.4) - '@swc/helpers': 0.5.19 + '@react-types/shared': 3.33.0(react@19.2.4) + '@swc/helpers': 0.5.18 clsx: 2.1.1 react: 19.2.4 react-dom: 19.2.4(react@19.2.4) '@react-stately/flags@3.1.2': dependencies: - '@swc/helpers': 0.5.19 + '@swc/helpers': 0.5.18 '@react-stately/utils@3.11.0(react@19.2.4)': dependencies: - '@swc/helpers': 0.5.19 + '@swc/helpers': 0.5.18 react: 19.2.4 - '@react-types/shared@3.33.1(react@19.2.4)': + '@react-types/shared@3.33.0(react@19.2.4)': dependencies: react: 19.2.4 @@ -12651,16 +12214,7 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/abort-controller@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/chunked-blob-reader-native@4.2.2': -======= '@smithy/chunked-blob-reader-native@4.2.3': ->>>>>>> main dependencies: '@smithy/util-base64': 4.3.2 tslib: 2.8.1 @@ -12678,7 +12232,7 @@ snapshots: '@smithy/util-middleware': 4.2.12 tslib: 2.8.1 - '@smithy/core@3.23.11': + '@smithy/core@3.23.12': dependencies: '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 @@ -12686,29 +12240,12 @@ snapshots: '@smithy/util-base64': 4.3.2 '@smithy/util-body-length-browser': 4.2.2 '@smithy/util-middleware': 4.2.12 - '@smithy/util-stream': 4.5.19 - '@smithy/util-utf8': 4.2.2 - '@smithy/uuid': 1.1.2 - tslib: 2.8.1 - -<<<<<<< HEAD - '@smithy/core@3.23.9': - dependencies: - '@smithy/middleware-serde': 4.2.12 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-stream': 4.5.17 + '@smithy/util-stream': 4.5.20 '@smithy/util-utf8': 4.2.2 '@smithy/uuid': 1.1.2 tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.10': -======= '@smithy/credential-provider-imds@4.2.12': ->>>>>>> main dependencies: '@smithy/node-config-provider': 4.3.12 '@smithy/property-provider': 4.2.12 @@ -12754,19 +12291,7 @@ snapshots: '@smithy/util-base64': 4.3.2 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/fetch-http-handler@5.3.13': - dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - tslib: 2.8.1 - - '@smithy/hash-blob-browser@4.2.11': -======= '@smithy/hash-blob-browser@4.2.13': ->>>>>>> main dependencies: '@smithy/chunked-blob-reader': 5.2.2 '@smithy/chunked-blob-reader-native': 4.2.3 @@ -12799,15 +12324,7 @@ snapshots: dependencies: tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/is-array-buffer@4.2.2': - dependencies: - tslib: 2.8.1 - - '@smithy/md5-js@4.2.10': -======= '@smithy/md5-js@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 '@smithy/util-utf8': 4.2.2 @@ -12819,10 +12336,10 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 - '@smithy/middleware-endpoint@4.4.25': + '@smithy/middleware-endpoint@4.4.26': dependencies: - '@smithy/core': 3.23.11 - '@smithy/middleware-serde': 4.2.14 + '@smithy/core': 3.23.12 + '@smithy/middleware-serde': 4.2.15 '@smithy/node-config-provider': 4.3.12 '@smithy/shared-ini-file-loader': 4.4.7 '@smithy/types': 4.13.1 @@ -12830,83 +12347,38 @@ snapshots: '@smithy/util-middleware': 4.2.12 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/middleware-endpoint@4.4.23': - dependencies: - '@smithy/core': 3.23.9 - '@smithy/middleware-serde': 4.2.12 - '@smithy/node-config-provider': 4.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-middleware': 4.2.11 - tslib: 2.8.1 - - '@smithy/middleware-retry@4.4.38': -======= - '@smithy/middleware-retry@4.4.42': ->>>>>>> main + '@smithy/middleware-retry@4.4.43': dependencies: '@smithy/node-config-provider': 4.3.12 '@smithy/protocol-http': 5.3.12 '@smithy/service-error-classification': 4.2.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 '@smithy/util-middleware': 4.2.12 '@smithy/util-retry': 4.2.12 '@smithy/uuid': 1.1.2 tslib: 2.8.1 - '@smithy/middleware-serde@4.2.14': + '@smithy/middleware-serde@4.2.15': dependencies: - '@smithy/core': 3.23.11 + '@smithy/core': 3.23.12 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/middleware-serde@4.2.12': - dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/middleware-stack@4.2.10': -======= '@smithy/middleware-stack@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/middleware-stack@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/node-config-provider@4.3.10': -======= '@smithy/node-config-provider@4.3.12': ->>>>>>> main dependencies: '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/node-config-provider@4.3.11': - dependencies: - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/node-http-handler@4.4.13': -======= - '@smithy/node-http-handler@4.4.16': ->>>>>>> main + '@smithy/node-http-handler@4.5.0': dependencies: '@smithy/abort-controller': 4.2.12 '@smithy/protocol-http': 5.3.12 @@ -12914,77 +12386,28 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/node-http-handler@4.4.14': - dependencies: - '@smithy/abort-controller': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/property-provider@4.2.10': -======= '@smithy/property-provider@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/property-provider@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/protocol-http@5.3.10': -======= '@smithy/protocol-http@5.3.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/protocol-http@5.3.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/querystring-builder@4.2.10': -======= '@smithy/querystring-builder@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 '@smithy/util-uri-escape': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/querystring-builder@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - '@smithy/util-uri-escape': 4.2.2 - tslib: 2.8.1 - - '@smithy/querystring-parser@4.2.10': -======= '@smithy/querystring-parser@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/querystring-parser@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/service-error-classification@4.2.10': -======= '@smithy/service-error-classification@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 @@ -12993,16 +12416,7 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/shared-ini-file-loader@4.4.6': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/signature-v4@5.3.10': -======= '@smithy/signature-v4@5.3.12': ->>>>>>> main dependencies: '@smithy/is-array-buffer': 4.2.2 '@smithy/protocol-http': 5.3.12 @@ -13013,46 +12427,17 @@ snapshots: '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/signature-v4@5.3.11': - dependencies: - '@smithy/is-array-buffer': 4.2.2 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-hex-encoding': 4.2.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-uri-escape': 4.2.2 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - - '@smithy/smithy-client@4.12.1': -======= - '@smithy/smithy-client@4.12.5': ->>>>>>> main + '@smithy/smithy-client@4.12.6': dependencies: - '@smithy/core': 3.23.11 - '@smithy/middleware-endpoint': 4.4.25 + '@smithy/core': 3.23.12 + '@smithy/middleware-endpoint': 4.4.26 '@smithy/middleware-stack': 4.2.12 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 - '@smithy/util-stream': 4.5.19 + '@smithy/util-stream': 4.5.20 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/smithy-client@4.12.3': - dependencies: - '@smithy/core': 3.23.9 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-stack': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-stream': 4.5.17 - tslib: 2.8.1 - - '@smithy/types@4.13.0': -======= '@smithy/types@4.13.1': ->>>>>>> main dependencies: tslib: 2.8.1 @@ -13062,45 +12447,17 @@ snapshots: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/url-parser@4.2.11': - dependencies: - '@smithy/querystring-parser': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/util-base64@4.3.1': -======= - '@smithy/util-base64@4.3.2': ->>>>>>> main - dependencies: - '@smithy/util-buffer-from': 4.2.2 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - -<<<<<<< HEAD '@smithy/util-base64@4.3.2': dependencies: '@smithy/util-buffer-from': 4.2.2 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 - '@smithy/util-body-length-browser@4.2.1': - dependencies: - tslib: 2.8.1 - - '@smithy/util-body-length-browser@4.2.2': - dependencies: - tslib: 2.8.1 - - '@smithy/util-body-length-node@4.2.2': -======= '@smithy/util-body-length-browser@4.2.2': dependencies: tslib: 2.8.1 '@smithy/util-body-length-node@4.2.3': ->>>>>>> main dependencies: tslib: 2.8.1 @@ -13114,41 +12471,24 @@ snapshots: '@smithy/is-array-buffer': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/util-buffer-from@4.2.2': - dependencies: - '@smithy/is-array-buffer': 4.2.2 - tslib: 2.8.1 - - '@smithy/util-config-provider@4.2.1': - dependencies: - tslib: 2.8.1 - '@smithy/util-config-provider@4.2.2': dependencies: tslib: 2.8.1 - '@smithy/util-defaults-mode-browser@4.3.37': -======= - '@smithy/util-config-provider@4.2.2': - dependencies: - tslib: 2.8.1 - - '@smithy/util-defaults-mode-browser@4.3.41': ->>>>>>> main + '@smithy/util-defaults-mode-browser@4.3.42': dependencies: '@smithy/property-provider': 4.2.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 tslib: 2.8.1 - '@smithy/util-defaults-mode-node@4.2.44': + '@smithy/util-defaults-mode-node@4.2.45': dependencies: '@smithy/config-resolver': 4.4.11 '@smithy/credential-provider-imds': 4.2.12 '@smithy/node-config-provider': 4.3.12 '@smithy/property-provider': 4.2.12 - '@smithy/smithy-client': 4.12.5 + '@smithy/smithy-client': 4.12.6 '@smithy/types': 4.13.1 tslib: 2.8.1 @@ -13162,38 +12502,21 @@ snapshots: dependencies: tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/util-hex-encoding@4.2.2': - dependencies: - tslib: 2.8.1 - - '@smithy/util-middleware@4.2.10': -======= '@smithy/util-middleware@4.2.12': ->>>>>>> main dependencies: '@smithy/types': 4.13.1 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/util-middleware@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@smithy/util-retry@4.2.10': -======= '@smithy/util-retry@4.2.12': ->>>>>>> main dependencies: '@smithy/service-error-classification': 4.2.12 '@smithy/types': 4.13.1 tslib: 2.8.1 - '@smithy/util-stream@4.5.19': + '@smithy/util-stream@4.5.20': dependencies: '@smithy/fetch-http-handler': 5.3.15 - '@smithy/node-http-handler': 4.4.16 + '@smithy/node-http-handler': 4.5.0 '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 '@smithy/util-buffer-from': 4.2.2 @@ -13201,25 +12524,6 @@ snapshots: '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/util-stream@4.5.17': - dependencies: - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/node-http-handler': 4.4.14 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-buffer-from': 4.2.2 - '@smithy/util-hex-encoding': 4.2.2 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - - '@smithy/util-uri-escape@4.2.1': -======= - '@smithy/util-uri-escape@4.2.2': ->>>>>>> main - dependencies: - tslib: 2.8.1 - '@smithy/util-uri-escape@4.2.2': dependencies: tslib: 2.8.1 @@ -13234,16 +12538,7 @@ snapshots: '@smithy/util-buffer-from': 4.2.2 tslib: 2.8.1 -<<<<<<< HEAD - '@smithy/util-utf8@4.2.2': - dependencies: - '@smithy/util-buffer-from': 4.2.2 - tslib: 2.8.1 - - '@smithy/util-waiter@4.2.10': -======= '@smithy/util-waiter@4.2.13': ->>>>>>> main dependencies: '@smithy/abort-controller': 4.2.12 '@smithy/types': 4.13.1 @@ -13253,10 +12548,6 @@ snapshots: dependencies: tslib: 2.8.1 - '@smithy/uuid@1.1.2': - dependencies: - tslib: 2.8.1 - '@styled-system/background@5.1.2': dependencies: '@styled-system/core': 5.1.2 @@ -13308,7 +12599,7 @@ snapshots: '@styled-system/core': 5.1.2 '@styled-system/css': 5.1.5 - '@swc/helpers@0.5.19': + '@swc/helpers@0.5.18': dependencies: tslib: 2.8.1 @@ -13319,13 +12610,13 @@ snapshots: '@tanstack/query-core': 5.90.20 react: 19.2.4 - '@tanstack/react-virtual@3.13.22(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@tanstack/react-virtual@3.13.18(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@tanstack/virtual-core': 3.13.22 + '@tanstack/virtual-core': 3.13.18 react: 19.2.4 react-dom: 19.2.4(react@19.2.4) - '@tanstack/virtual-core@3.13.22': {} + '@tanstack/virtual-core@3.13.18': {} '@testing-library/dom@7.31.2': dependencies: @@ -13384,7 +12675,7 @@ snapshots: '@types/accepts@1.3.7': dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/aria-query@4.2.2': {} @@ -13412,11 +12703,11 @@ snapshots: '@types/body-parser@1.19.6': dependencies: '@types/connect': 3.4.38 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/connect@3.4.38': dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/content-disposition@0.5.9': {} @@ -13427,7 +12718,7 @@ snapshots: '@types/connect': 3.4.38 '@types/express': 5.0.6 '@types/keygrip': 1.0.6 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/cors@2.8.19': dependencies: @@ -13437,7 +12728,7 @@ snapshots: '@types/express-serve-static-core@5.1.0': dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 @@ -13463,7 +12754,7 @@ snapshots: '@types/interpret@1.1.4': dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/istanbul-lib-coverage@2.0.6': {} @@ -13504,7 +12795,7 @@ snapshots: '@types/http-errors': 2.0.5 '@types/keygrip': 1.0.6 '@types/koa-compose': 3.2.9 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/methods@1.1.4': {} @@ -13520,11 +12811,7 @@ snapshots: dependencies: undici-types: 6.21.0 - '@types/node@22.19.15': - dependencies: - undici-types: 6.21.0 - - '@types/node@25.5.0': + '@types/node@25.3.3': dependencies: undici-types: 7.18.2 @@ -13567,12 +12854,12 @@ snapshots: '@types/send@1.2.1': dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/serve-static@2.2.0': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/shelljs@0.10.0': dependencies: @@ -13590,7 +12877,7 @@ snapshots: dependencies: '@types/cookiejar': 2.1.5 '@types/methods': 1.1.4 - '@types/node': 22.19.15 + '@types/node': 22.19.11 form-data: 4.0.5 '@types/supertest@7.2.0': @@ -13612,14 +12899,14 @@ snapshots: dependencies: '@types/yargs-parser': 21.0.3 - '@typescript-eslint/eslint-plugin@8.57.0(@typescript-eslint/parser@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/eslint-plugin@8.57.1(@typescript-eslint/parser@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-community/regexpp': 4.12.2 - '@typescript-eslint/parser': 8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/scope-manager': 8.57.0 - '@typescript-eslint/type-utils': 8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/utils': 8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.57.0 + '@typescript-eslint/parser': 8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.57.1 + '@typescript-eslint/type-utils': 8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.57.1 eslint: 9.39.2(jiti@2.6.1) ignore: 7.0.5 natural-compare: 1.4.0 @@ -13628,41 +12915,41 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/parser@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@typescript-eslint/scope-manager': 8.57.0 - '@typescript-eslint/types': 8.57.0 - '@typescript-eslint/typescript-estree': 8.57.0(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.57.0 + '@typescript-eslint/scope-manager': 8.57.1 + '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/typescript-estree': 8.57.1(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.57.1 debug: 4.4.3(supports-color@5.5.0) eslint: 9.39.2(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/project-service@8.57.0(typescript@5.9.3)': + '@typescript-eslint/project-service@8.57.1(typescript@5.9.3)': dependencies: - '@typescript-eslint/tsconfig-utils': 8.57.0(typescript@5.9.3) - '@typescript-eslint/types': 8.57.0 + '@typescript-eslint/tsconfig-utils': 8.57.1(typescript@5.9.3) + '@typescript-eslint/types': 8.57.1 debug: 4.4.3(supports-color@5.5.0) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/scope-manager@8.57.0': + '@typescript-eslint/scope-manager@8.57.1': dependencies: - '@typescript-eslint/types': 8.57.0 - '@typescript-eslint/visitor-keys': 8.57.0 + '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/visitor-keys': 8.57.1 - '@typescript-eslint/tsconfig-utils@8.57.0(typescript@5.9.3)': + '@typescript-eslint/tsconfig-utils@8.57.1(typescript@5.9.3)': dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/type-utils@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@typescript-eslint/types': 8.57.0 - '@typescript-eslint/typescript-estree': 8.57.0(typescript@5.9.3) - '@typescript-eslint/utils': 8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/typescript-estree': 8.57.1(typescript@5.9.3) + '@typescript-eslint/utils': 8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) debug: 4.4.3(supports-color@5.5.0) eslint: 9.39.2(jiti@2.6.1) ts-api-utils: 2.4.0(typescript@5.9.3) @@ -13670,14 +12957,14 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/types@8.57.0': {} + '@typescript-eslint/types@8.57.1': {} - '@typescript-eslint/typescript-estree@8.57.0(typescript@5.9.3)': + '@typescript-eslint/typescript-estree@8.57.1(typescript@5.9.3)': dependencies: - '@typescript-eslint/project-service': 8.57.0(typescript@5.9.3) - '@typescript-eslint/tsconfig-utils': 8.57.0(typescript@5.9.3) - '@typescript-eslint/types': 8.57.0 - '@typescript-eslint/visitor-keys': 8.57.0 + '@typescript-eslint/project-service': 8.57.1(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.57.1(typescript@5.9.3) + '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/visitor-keys': 8.57.1 debug: 4.4.3(supports-color@5.5.0) minimatch: 10.2.4 semver: 7.7.4 @@ -13687,20 +12974,20 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/utils@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2(jiti@2.6.1)) - '@typescript-eslint/scope-manager': 8.57.0 - '@typescript-eslint/types': 8.57.0 - '@typescript-eslint/typescript-estree': 8.57.0(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.57.1 + '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/typescript-estree': 8.57.1(typescript@5.9.3) eslint: 9.39.2(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/visitor-keys@8.57.0': + '@typescript-eslint/visitor-keys@8.57.1': dependencies: - '@typescript-eslint/types': 8.57.0 + '@typescript-eslint/types': 8.57.1 eslint-visitor-keys: 5.0.1 '@ungap/structured-clone@1.3.0': {} @@ -13764,7 +13051,7 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vitejs/plugin-react@4.7.0(vite@6.4.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitejs/plugin-react@4.7.0(vite@6.4.1(@types/node@25.3.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.6 '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.28.6) @@ -13772,7 +13059,7 @@ snapshots: '@rolldown/pluginutils': 1.0.0-beta.27 '@types/babel__core': 7.20.5 react-refresh: 0.17.0 - vite: 6.4.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) + vite: 6.4.1(@types/node@25.3.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color @@ -13908,13 +13195,13 @@ snapshots: transitivePeerDependencies: - debug - babel-jest@30.3.0(@babel/core@7.29.0): + babel-jest@30.3.0(@babel/core@7.28.6): dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@jest/transform': 30.3.0 '@types/babel__core': 7.20.5 babel-plugin-istanbul: 7.0.1 - babel-preset-jest: 30.3.0(@babel/core@7.29.0) + babel-preset-jest: 30.3.0(@babel/core@7.28.6) chalk: 4.1.2 graceful-fs: 4.2.11 slash: 3.0.0 @@ -13935,42 +13222,42 @@ snapshots: dependencies: '@types/babel__core': 7.20.5 - babel-plugin-styled-components@2.1.4(@babel/core@7.29.0)(styled-components@5.3.11(@babel/core@7.29.0)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4))(supports-color@5.5.0): + babel-plugin-styled-components@2.1.4(@babel/core@7.28.6)(styled-components@5.3.11(@babel/core@7.28.6)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4))(supports-color@5.5.0): dependencies: '@babel/helper-annotate-as-pure': 7.27.3 '@babel/helper-module-imports': 7.27.1(supports-color@5.5.0) - '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.29.0) + '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.6) lodash: 4.17.23 picomatch: 2.3.1 - styled-components: 5.3.11(@babel/core@7.29.0)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) + styled-components: 5.3.11(@babel/core@7.28.6)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4) transitivePeerDependencies: - '@babel/core' - supports-color - babel-preset-current-node-syntax@1.2.0(@babel/core@7.29.0): - dependencies: - '@babel/core': 7.29.0 - '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.29.0) - '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.29.0) - '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.29.0) - '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.29.0) - '@babel/plugin-syntax-import-attributes': 7.28.6(@babel/core@7.29.0) - '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.29.0) - '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.29.0) - '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.29.0) - '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.29.0) - '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.29.0) - '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.29.0) - '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.29.0) - '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.29.0) - '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.29.0) - '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.29.0) - - babel-preset-jest@30.3.0(@babel/core@7.29.0): - dependencies: - '@babel/core': 7.29.0 + babel-preset-current-node-syntax@1.2.0(@babel/core@7.28.6): + dependencies: + '@babel/core': 7.28.6 + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.28.6) + '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.28.6) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.28.6) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.28.6) + '@babel/plugin-syntax-import-attributes': 7.28.6(@babel/core@7.28.6) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.28.6) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.28.6) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.28.6) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.28.6) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.28.6) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.28.6) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.28.6) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.28.6) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.28.6) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.28.6) + + babel-preset-jest@30.3.0(@babel/core@7.28.6): + dependencies: + '@babel/core': 7.28.6 babel-plugin-jest-hoist: 30.3.0 - babel-preset-current-node-syntax: 1.2.0(@babel/core@7.29.0) + babel-preset-current-node-syntax: 1.2.0(@babel/core@7.28.6) babel-runtime@6.25.0: dependencies: @@ -14190,7 +13477,7 @@ snapshots: parse5: 7.3.0 parse5-htmlparser2-tree-adapter: 7.1.0 parse5-parser-stream: 7.1.2 - undici: 7.24.3 + undici: 7.24.4 whatwg-mimetype: 4.0.0 chokidar@3.6.0: @@ -14211,8 +13498,6 @@ snapshots: ci-info@4.3.1: {} - ci-info@4.4.0: {} - cjs-module-lexer@2.2.0: {} clean-ansi@0.2.1: {} @@ -14350,7 +13635,7 @@ snapshots: handlebars: 4.7.8 json-stringify-safe: 5.0.1 meow: 8.1.2 - semver: 7.7.4 + semver: 7.7.3 split: 1.0.1 conventional-commits-filter@3.0.0: @@ -14491,7 +13776,7 @@ snapshots: dedent@1.5.3: {} - dedent@1.7.2: {} + dedent@1.7.1: {} deep-is@0.1.4: {} @@ -14608,10 +13893,10 @@ snapshots: dotenv@16.4.7: {} - drizzle-orm@0.45.1(@types/pg@8.18.0)(pg@8.20.0): + drizzle-orm@0.45.1(@types/pg@8.18.0)(pg@8.19.0): optionalDependencies: '@types/pg': 8.18.0 - pg: 8.20.0 + pg: 8.19.0 dunder-proto@1.0.1: dependencies: @@ -14781,11 +14066,11 @@ snapshots: dependencies: eslint: 9.39.2(jiti@2.6.1) - eslint-plugin-unused-imports@4.4.1(@typescript-eslint/eslint-plugin@8.57.0(@typescript-eslint/parser@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1)): + eslint-plugin-unused-imports@4.4.1(@typescript-eslint/eslint-plugin@8.57.1(@typescript-eslint/parser@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1)): dependencies: eslint: 9.39.2(jiti@2.6.1) optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.57.0(@typescript-eslint/parser@8.57.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/eslint-plugin': 8.57.1(@typescript-eslint/parser@8.57.1(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) eslint-scope@8.4.0: dependencies: @@ -14964,13 +14249,11 @@ snapshots: fast-uri@3.1.0: {} - fast-xml-builder@1.1.3: - dependencies: - path-expression-matcher: 1.1.3 + fast-xml-builder@1.0.0: {} fast-xml-parser@5.4.1: dependencies: - fast-xml-builder: 1.1.3 + fast-xml-builder: 1.0.0 strnum: 2.2.0 fastq@1.20.1: @@ -15062,10 +14345,10 @@ snapshots: forwarded@0.2.0: {} - framer-motion@12.36.0(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + framer-motion@12.34.0(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4): dependencies: - motion-dom: 12.36.0 - motion-utils: 12.36.0 + motion-dom: 12.34.0 + motion-utils: 12.29.2 tslib: 2.8.1 optionalDependencies: '@emotion/is-prop-valid': 1.4.0 @@ -15174,7 +14457,7 @@ snapshots: git-semver-tags@5.0.1: dependencies: meow: 8.1.2 - semver: 7.7.4 + semver: 7.7.3 git-up@7.0.0: dependencies: @@ -15281,7 +14564,7 @@ snapshots: - supports-color - use-sync-external-store - grafserv@1.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0): + grafserv@1.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0): dependencies: '@graphile/lru': 5.0.0-rc.5 debug: 4.4.3(supports-color@5.5.0) @@ -15290,7 +14573,7 @@ snapshots: graphile-config: 1.0.0-rc.6 graphql: 16.13.0 graphql-ws: 6.0.7(graphql@16.13.0)(ws@8.19.0) - ruru: 2.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(debug@4.4.3)(graphile-config@1.0.0-rc.6)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + ruru: 2.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(debug@4.4.3)(graphile-config@1.0.0-rc.6)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) tslib: 2.8.1 optionalDependencies: ws: 8.19.0 @@ -15309,7 +14592,7 @@ snapshots: graphile-build-pg@5.0.0-rc.8(@dataplan/pg@1.0.0-rc.8(@dataplan/json@1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-build@5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0)(tamedevil@0.1.0-rc.6): dependencies: '@dataplan/pg': 1.0.0-rc.8(@dataplan/json@1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0) - '@types/node': 22.19.15 + '@types/node': 22.19.11 debug: 4.4.3(supports-color@5.5.0) grafast: 1.0.0-rc.9(graphql@16.13.0) graphile-build: 5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0) @@ -15327,7 +14610,7 @@ snapshots: graphile-build@5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0): dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/pluralize': 0.0.33 '@types/semver': 7.7.1 chalk: 4.1.2 @@ -15347,7 +14630,7 @@ snapshots: graphile-config@1.0.0-rc.6: dependencies: '@types/interpret': 1.1.4 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/semver': 7.7.1 chalk: 4.1.2 debug: 4.4.3(supports-color@5.5.0) @@ -15392,11 +14675,11 @@ snapshots: - immer - use-sync-external-store - graphiql@5.2.2(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)): + graphiql@5.2.2(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)): dependencies: - '@graphiql/plugin-doc-explorer': 0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/react@19.2.14)(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) - '@graphiql/plugin-history': 0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/node@25.5.0)(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) - '@graphiql/react': 0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + '@graphiql/plugin-doc-explorer': 0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/react@19.2.14)(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + '@graphiql/plugin-history': 0.4.1(@graphiql/react@0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/node@25.3.3)(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + '@graphiql/react': 0.37.3(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-compiler-runtime@19.1.0-rc.1(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) graphql: 16.13.0 react: 19.2.4 react-compiler-runtime: 19.1.0-rc.1(react@19.2.4) @@ -15742,7 +15025,7 @@ snapshots: istanbul-lib-instrument@6.0.3: dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/parser': 7.29.0 '@istanbuljs/schema': 0.1.3 istanbul-lib-coverage: 3.2.2 @@ -15799,10 +15082,10 @@ snapshots: '@jest/expect': 30.3.0 '@jest/test-result': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 co: 4.6.0 - dedent: 1.7.2 + dedent: 1.7.1 is-generator-fn: 2.1.0 jest-each: 30.3.0 jest-matcher-utils: 30.3.0 @@ -15840,14 +15123,14 @@ snapshots: jest-config@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)): dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@jest/get-type': 30.1.0 '@jest/pattern': 30.0.1 '@jest/test-sequencer': 30.3.0 '@jest/types': 30.3.0 - babel-jest: 30.3.0(@babel/core@7.29.0) + babel-jest: 30.3.0(@babel/core@7.28.6) chalk: 4.1.2 - ci-info: 4.4.0 + ci-info: 4.3.1 deepmerge: 4.3.1 glob: 10.5.0 graceful-fs: 4.2.11 @@ -15870,38 +15153,6 @@ snapshots: - babel-plugin-macros - supports-color - jest-config@30.3.0(@types/node@22.19.15)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)): - dependencies: - '@babel/core': 7.29.0 - '@jest/get-type': 30.1.0 - '@jest/pattern': 30.0.1 - '@jest/test-sequencer': 30.3.0 - '@jest/types': 30.3.0 - babel-jest: 30.3.0(@babel/core@7.29.0) - chalk: 4.1.2 - ci-info: 4.4.0 - deepmerge: 4.3.1 - glob: 10.5.0 - graceful-fs: 4.2.11 - jest-circus: 30.3.0 - jest-docblock: 30.2.0 - jest-environment-node: 30.3.0 - jest-regex-util: 30.0.1 - jest-resolve: 30.3.0 - jest-runner: 30.3.0 - jest-util: 30.3.0 - jest-validate: 30.3.0 - parse-json: 5.2.0 - pretty-format: 30.3.0 - slash: 3.0.0 - strip-json-comments: 3.1.1 - optionalDependencies: - '@types/node': 22.19.15 - ts-node: 10.9.2(@types/node@22.19.11)(typescript@5.9.3) - transitivePeerDependencies: - - babel-plugin-macros - - supports-color - jest-diff@29.7.0: dependencies: chalk: 4.1.2 @@ -15940,7 +15191,7 @@ snapshots: '@jest/environment': 30.3.0 '@jest/fake-timers': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 jest-mock: 30.3.0 jest-util: 30.3.0 jest-validate: 30.3.0 @@ -15950,7 +15201,7 @@ snapshots: jest-haste-map@30.3.0: dependencies: '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 anymatch: 3.1.3 fb-watchman: 2.0.2 graceful-fs: 4.2.11 @@ -15997,7 +15248,7 @@ snapshots: jest-message-util@30.3.0: dependencies: - '@babel/code-frame': 7.29.0 + '@babel/code-frame': 7.28.6 '@jest/types': 30.3.0 '@types/stack-utils': 2.0.3 chalk: 4.1.2 @@ -16010,13 +15261,13 @@ snapshots: jest-mock@30.2.0: dependencies: '@jest/types': 30.2.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 jest-util: 30.2.0 jest-mock@30.3.0: dependencies: '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 jest-util: 30.3.0 jest-pnp-resolver@1.2.3(jest-resolve@30.3.0): @@ -16050,7 +15301,7 @@ snapshots: '@jest/test-result': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 emittery: 0.13.1 exit-x: 0.2.2 @@ -16079,7 +15330,7 @@ snapshots: '@jest/test-result': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 cjs-module-lexer: 2.2.0 collect-v8-coverage: 1.0.3 @@ -16099,17 +15350,17 @@ snapshots: jest-snapshot@30.3.0: dependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@babel/generator': 7.29.1 - '@babel/plugin-syntax-jsx': 7.28.6(@babel/core@7.29.0) - '@babel/plugin-syntax-typescript': 7.28.6(@babel/core@7.29.0) + '@babel/plugin-syntax-jsx': 7.28.6(@babel/core@7.28.6) + '@babel/plugin-syntax-typescript': 7.28.6(@babel/core@7.28.6) '@babel/types': 7.29.0 '@jest/expect-utils': 30.3.0 '@jest/get-type': 30.1.0 '@jest/snapshot-utils': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - babel-preset-current-node-syntax: 1.2.0(@babel/core@7.29.0) + babel-preset-current-node-syntax: 1.2.0(@babel/core@7.28.6) chalk: 4.1.2 expect: 30.3.0 graceful-fs: 4.2.11 @@ -16126,7 +15377,7 @@ snapshots: jest-util@30.2.0: dependencies: '@jest/types': 30.2.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 ci-info: 4.3.1 graceful-fs: 4.2.11 @@ -16135,9 +15386,9 @@ snapshots: jest-util@30.3.0: dependencies: '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 chalk: 4.1.2 - ci-info: 4.4.0 + ci-info: 4.3.1 graceful-fs: 4.2.11 picomatch: 4.0.3 @@ -16154,7 +15405,7 @@ snapshots: dependencies: '@jest/test-result': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 22.19.15 + '@types/node': 22.19.11 ansi-escapes: 4.3.2 chalk: 4.1.2 emittery: 0.13.1 @@ -16163,7 +15414,7 @@ snapshots: jest-worker@30.3.0: dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@ungap/structured-clone': 1.3.0 jest-util: 30.3.0 merge-stream: 2.0.0 @@ -16600,9 +15851,9 @@ snapshots: optionalDependencies: '@types/node': 22.19.11 - meros@1.3.2(@types/node@25.5.0): + meros@1.3.2(@types/node@25.3.3): optionalDependencies: - '@types/node': 25.5.0 + '@types/node': 25.3.3 methods@1.1.2: {} @@ -17025,11 +16276,11 @@ snapshots: picomatch-browser: 2.2.6 prettier: 3.8.1 - motion-dom@12.36.0: + motion-dom@12.34.0: dependencies: - motion-utils: 12.36.0 + motion-utils: 12.29.2 - motion-utils@12.36.0: {} + motion-utils@12.29.2: {} ms@2.1.3: {} @@ -17099,7 +16350,7 @@ snapshots: make-fetch-happen: 13.0.1 nopt: 7.2.1 proc-log: 4.2.0 - semver: 7.7.4 + semver: 7.7.3 tar: 6.2.1 which: 4.0.0 transitivePeerDependencies: @@ -17154,7 +16405,7 @@ snapshots: dependencies: hosted-git-info: 4.1.0 is-core-module: 2.16.1 - semver: 7.7.4 + semver: 7.7.3 validate-npm-package-license: 3.0.4 normalize-package-data@6.0.2: @@ -17477,7 +16728,7 @@ snapshots: parse5@3.0.3: dependencies: - '@types/node': 22.19.15 + '@types/node': 22.19.11 parse5@7.3.0: dependencies: @@ -17489,8 +16740,6 @@ snapshots: path-exists@4.0.0: {} - path-expression-matcher@1.1.3: {} - path-is-absolute@1.0.1: {} path-key@3.1.1: {} @@ -17521,6 +16770,8 @@ snapshots: pg-cloudflare@1.3.0: optional: true + pg-connection-string@2.11.0: {} + pg-connection-string@2.12.0: {} pg-copy-streams@7.0.0: {} @@ -17531,6 +16782,10 @@ snapshots: dependencies: tslib: 2.8.1 + pg-pool@3.12.0(pg@8.19.0): + dependencies: + pg: 8.19.0 + pg-pool@3.13.0(pg@8.20.0): dependencies: pg: 8.20.0 @@ -17545,7 +16800,7 @@ snapshots: case: 1.6.3 deepmerge: 4.3.1 nested-obj: 0.1.10 - strfy-js: 3.2.1 + strfy-js: 3.1.10 transitivePeerDependencies: - supports-color @@ -17566,6 +16821,16 @@ snapshots: postgres-date: 1.0.7 postgres-interval: 1.2.0 + pg@8.19.0: + dependencies: + pg-connection-string: 2.11.0 + pg-pool: 3.12.0(pg@8.19.0) + pg-protocol: 1.12.0 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.3.0 + pg@8.20.0: dependencies: pg-connection-string: 2.12.0 @@ -17580,16 +16845,16 @@ snapshots: dependencies: split2: 4.2.0 - pgsql-deparser@17.18.1: + pgsql-deparser@17.18.2: dependencies: '@pgsql/quotes': 17.1.0 '@pgsql/types': 17.6.2 - pgsql-parser@17.9.13: + pgsql-parser@17.9.14: dependencies: '@pgsql/types': 17.6.2 libpg-query: 17.7.3 - pgsql-deparser: 17.18.1 + pgsql-deparser: 17.18.2 picocolors@1.1.1: {} @@ -17641,16 +16906,16 @@ snapshots: '@tsconfig/node20': 20.1.9 tslib: 2.8.1 - postgraphile@5.0.0-rc.10(01f6c3872a4afea0bb2f9f52d380dd87): + postgraphile@5.0.0-rc.10(0096e7e6f6d7d6a9d120788f0238f495): dependencies: '@dataplan/json': 1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)) '@dataplan/pg': 1.0.0-rc.8(@dataplan/json@1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0) '@graphile/lru': 5.0.0-rc.5 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/pg': 8.18.0 debug: 4.4.3(supports-color@5.5.0) grafast: 1.0.0-rc.9(graphql@16.13.0) - grafserv: 1.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) + grafserv: 1.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))(ws@8.19.0) graphile-build: 5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0) graphile-build-pg: 5.0.0-rc.8(@dataplan/pg@1.0.0-rc.8(@dataplan/json@1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-build@5.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0)(tamedevil@0.1.0-rc.6) graphile-config: 1.0.0-rc.6 @@ -17673,7 +16938,7 @@ snapshots: '@dataplan/json': 1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)) '@dataplan/pg': 1.0.0-rc.8(@dataplan/json@1.0.0-rc.6(grafast@1.0.0-rc.9(graphql@16.13.0)))(grafast@1.0.0-rc.9(graphql@16.13.0))(graphile-config@1.0.0-rc.6)(graphql@16.13.0)(pg-sql2@5.0.0-rc.5)(pg@8.20.0) '@graphile/lru': 5.0.0-rc.5 - '@types/node': 22.19.15 + '@types/node': 22.19.11 '@types/pg': 8.18.0 debug: 4.4.3(supports-color@5.5.0) grafast: 1.0.0-rc.9(graphql@16.13.0) @@ -18060,10 +17325,10 @@ snapshots: - immer - use-sync-external-store - ruru-types@2.0.0-rc.6(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)): + ruru-types@2.0.0-rc.6(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)): dependencies: - '@graphiql/toolkit': 0.11.3(@types/node@25.5.0)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0) - graphiql: 5.2.2(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + '@graphiql/toolkit': 0.11.3(@types/node@25.3.3)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0) + graphiql: 5.2.2(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) graphql: 16.13.0 optionalDependencies: react: 19.2.4 @@ -18098,13 +17363,13 @@ snapshots: - immer - use-sync-external-store - ruru@2.0.0-rc.7(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(debug@4.4.3)(graphile-config@1.0.0-rc.6)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)): + ruru@2.0.0-rc.7(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(debug@4.4.3)(graphile-config@1.0.0-rc.6)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)): dependencies: '@emotion/is-prop-valid': 1.4.0 graphile-config: 1.0.0-rc.6 graphql: 16.13.0 http-proxy: 1.18.1(debug@4.4.3) - ruru-types: 2.0.0-rc.6(@emotion/is-prop-valid@1.4.0)(@types/node@25.5.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) + ruru-types: 2.0.0-rc.6(@emotion/is-prop-valid@1.4.0)(@types/node@25.3.3)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(graphql-ws@6.0.7(graphql@16.13.0)(ws@8.19.0))(graphql@16.13.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)) tslib: 2.8.1 yargs: 17.7.2 optionalDependencies: @@ -18331,7 +17596,7 @@ snapshots: streamsearch@1.1.0: {} - strfy-js@3.2.1: + strfy-js@3.1.10: dependencies: minimatch: 10.2.4 @@ -18384,14 +17649,14 @@ snapshots: strnum@2.2.0: {} - styled-components@5.3.11(@babel/core@7.29.0)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4): + styled-components@5.3.11(@babel/core@7.28.6)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4): dependencies: '@babel/helper-module-imports': 7.27.1(supports-color@5.5.0) '@babel/traverse': 7.28.6(supports-color@5.5.0) '@emotion/is-prop-valid': 1.4.0 '@emotion/stylis': 0.8.5 '@emotion/unitless': 0.7.5 - babel-plugin-styled-components: 2.1.4(@babel/core@7.29.0)(styled-components@5.3.11(@babel/core@7.29.0)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4))(supports-color@5.5.0) + babel-plugin-styled-components: 2.1.4(@babel/core@7.28.6)(styled-components@5.3.11(@babel/core@7.28.6)(react-dom@19.2.4(react@19.2.4))(react-is@19.2.4)(react@19.2.4))(supports-color@5.5.0) css-to-react-native: 3.2.0 hoist-non-react-statics: 3.3.2 react: 19.2.4 @@ -18535,7 +17800,7 @@ snapshots: dependencies: typescript: 5.9.3 - ts-jest@29.4.6(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(jest-util@30.3.0)(jest@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)))(typescript@5.9.3): + ts-jest@29.4.6(@babel/core@7.28.6)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.28.6))(jest-util@30.3.0)(jest@30.3.0(@types/node@22.19.11)(ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3)))(typescript@5.9.3): dependencies: bs-logger: 0.2.6 fast-json-stable-stringify: 2.1.0 @@ -18549,10 +17814,10 @@ snapshots: typescript: 5.9.3 yargs-parser: 21.1.1 optionalDependencies: - '@babel/core': 7.29.0 + '@babel/core': 7.28.6 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - babel-jest: 30.3.0(@babel/core@7.29.0) + babel-jest: 30.3.0(@babel/core@7.28.6) jest-util: 30.3.0 ts-node@10.9.2(@types/node@22.19.11)(typescript@5.9.3): @@ -18573,14 +17838,14 @@ snapshots: v8-compile-cache-lib: 3.0.1 yn: 3.1.1 - ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3): + ts-node@10.9.2(@types/node@25.3.3)(typescript@5.9.3): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.12 '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.4 - '@types/node': 25.5.0 + '@types/node': 25.3.3 acorn: 8.15.0 acorn-walk: 8.3.4 arg: 4.1.3 @@ -18663,7 +17928,7 @@ snapshots: undici-types@7.18.2: {} - undici@7.24.3: {} + undici@7.24.4: {} unique-filename@3.0.0: dependencies: @@ -18768,7 +18033,7 @@ snapshots: vary@1.1.2: {} - vite@6.4.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2): + vite@6.4.1(@types/node@25.3.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: esbuild: 0.25.12 fdir: 6.5.0(picomatch@4.0.3) @@ -18777,7 +18042,7 @@ snapshots: rollup: 4.57.1 tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 25.5.0 + '@types/node': 25.3.3 fsevents: 2.3.3 jiti: 2.6.1 tsx: 4.21.0 diff --git a/uploads/s3-streamer/package.json b/uploads/s3-streamer/package.json index 82c1462dd..e2b7380cc 100644 --- a/uploads/s3-streamer/package.json +++ b/uploads/s3-streamer/package.json @@ -37,6 +37,7 @@ "dependencies": { "@aws-sdk/client-s3": "^3.1009.0", "@aws-sdk/lib-storage": "^3.1009.0", + "@aws-sdk/s3-request-presigner": "^3.1010.0", "@constructive-io/content-type-stream": "workspace:^", "@pgpmjs/types": "workspace:^" }, From 1e8d866e5b3847052634a8c9ba6175548ef3f81d Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 07:29:18 +0800 Subject: [PATCH 12/15] fixed db id type --- graphile/graphile-settings/src/upload-resolver.ts | 2 +- migrations/files_store.sql | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index fd84d6a00..238828989 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -126,7 +126,7 @@ async function insertFileRecord( `INSERT INTO files_store_public.files (id, database_id, bucket_key, key, etag, created_by, mime_type) VALUES ($1, $2, $3, $4, $5, $6, $7)`, - [fileId, Number(databaseId), bucketKey, key, etag, createdBy, contentType], + [fileId, databaseId, bucketKey, key, etag, createdBy, contentType], ); } diff --git a/migrations/files_store.sql b/migrations/files_store.sql index 2a352c897..a68a7cf51 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -56,7 +56,7 @@ COMMENT ON TYPE files_store_public.file_status IS CREATE TABLE files_store_public.files ( id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, + database_id uuid NOT NULL, bucket_key text NOT NULL DEFAULT 'default', key text NOT NULL, status files_store_public.file_status NOT NULL DEFAULT 'pending', @@ -125,7 +125,7 @@ ALTER TABLE files_store_public.files CREATE TABLE files_store_public.buckets ( id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, + database_id uuid NOT NULL, key text NOT NULL, name text NOT NULL, is_public boolean NOT NULL DEFAULT false, @@ -200,6 +200,7 @@ CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processin RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( + NEW.database_id, 'process-image', json_build_object( 'file_id', NEW.id, @@ -265,6 +266,7 @@ CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion( RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( + NEW.database_id, 'delete-s3-object', json_build_object( 'file_id', NEW.id, @@ -292,6 +294,7 @@ CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( + NEW.database_id, 'process-image', json_build_object( 'file_id', NEW.id, @@ -325,8 +328,8 @@ ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; CREATE POLICY files_tenant_isolation ON files_store_public.files AS RESTRICTIVE FOR ALL - USING (database_id = current_setting('app.database_id')::integer) - WITH CHECK (database_id = current_setting('app.database_id')::integer); + USING (database_id = current_setting('app.database_id')::uuid) + WITH CHECK (database_id = current_setting('app.database_id')::uuid); -- Policy 2: Visibility for SELECT (authenticated + service_role only) -- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling @@ -426,7 +429,7 @@ DECLARE versions_json json; BEGIN -- Get the database_id from session context - db_id := current_setting('app.database_id')::integer; + db_id := current_setting('app.database_id')::uuid; -- Extract the jsonb value from the specified column (dynamic) EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; @@ -532,7 +535,7 @@ DECLARE table_name text := TG_ARGV[1]; db_id integer; BEGIN - db_id := current_setting('app.database_id')::integer; + db_id := current_setting('app.database_id')::uuid; -- Mark all files for this source row + column as deleting UPDATE files_store_public.files From db44a4969200e7133e52bcbcc4f6c4011f752b3f Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 18:25:54 +0800 Subject: [PATCH 13/15] adapt the jsonb image field --- migrations/files_store.sql | 137 ++++++++++--------------------------- 1 file changed, 35 insertions(+), 102 deletions(-) diff --git a/migrations/files_store.sql b/migrations/files_store.sql index a68a7cf51..03c0d21c6 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -67,7 +67,7 @@ CREATE TABLE files_store_public.files ( source_id uuid, processing_started_at timestamptz, created_by uuid, - origin_id uuid, + versions jsonb, mime_type text, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), @@ -87,7 +87,7 @@ CREATE TABLE files_store_public.files ( ); COMMENT ON TABLE files_store_public.files IS - 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; + 'Operational index for S3 objects. One row per uploaded file. Generated versions (thumbnail, medium) stored inline in the versions JSONB column.'; COMMENT ON COLUMN files_store_public.files.key IS 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; COMMENT ON COLUMN files_store_public.files.etag IS @@ -102,22 +102,11 @@ COMMENT ON COLUMN files_store_public.files.source_column IS 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; COMMENT ON COLUMN files_store_public.files.source_id IS 'Primary key of the row in the source table. NULL until domain trigger populates it.'; -COMMENT ON COLUMN files_store_public.files.origin_id IS - 'Self-referential FK to the origin file. NULL for origin rows, set for version rows (thumbnail, medium).'; +COMMENT ON COLUMN files_store_public.files.versions IS + 'JSONB array of generated versions. Each entry: { key, mime, width, height }. NULL until process-image completes. Non-image files remain NULL.'; COMMENT ON COLUMN files_store_public.files.mime_type IS 'Detected MIME type of the file. Set at upload time for origins, at processing time for versions.'; --- Self-referential FK (version -> origin, same table) --- ON DELETE CASCADE: DB-level safety net. The primary deletion path is --- per-row delete-s3-object jobs (each row gets its own job via trigger). --- CASCADE only fires if an origin row is directly DELETEd before its --- version rows -- in that case, version DB rows are removed but version --- S3 objects are still cleaned up by their already-enqueued jobs. -ALTER TABLE files_store_public.files - ADD CONSTRAINT files_origin_fk - FOREIGN KEY (origin_id, database_id) - REFERENCES files_store_public.files (id, database_id) - ON DELETE CASCADE; -- --------------------------------------------------------------------------- -- 3. Buckets Table @@ -182,19 +171,12 @@ CREATE INDEX files_deleting_idx CREATE INDEX files_created_at_brin_idx ON files_store_public.files USING brin (created_at); --- Version lookups: "find all versions of this origin" -CREATE INDEX files_origin_id_idx - ON files_store_public.files (origin_id, database_id) - WHERE origin_id IS NOT NULL; -- --------------------------------------------------------------------------- -- 5. Triggers -- --------------------------------------------------------------------------- -- 5a. AFTER INSERT -- enqueue process-image job --- NOTE: Version rows are inserted with status = 'ready', which intentionally --- bypasses this trigger (condition: NEW.status = 'pending'). Only origin --- uploads (status = 'pending') need processing. CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() RETURNS trigger AS $$ @@ -219,7 +201,7 @@ CREATE TRIGGER files_after_insert_queue_processing EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS - 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; + 'Enqueues process-image job for new uploads (status=pending).'; -- 5b. BEFORE UPDATE -- timestamp + state machine @@ -264,14 +246,25 @@ COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ +DECLARE + version_keys json; BEGIN + -- Collect version S3 keys from the versions JSONB column + IF NEW.versions IS NOT NULL THEN + SELECT json_agg(v->>'key') + INTO version_keys + FROM jsonb_array_elements(NEW.versions) v + WHERE v->>'key' IS NOT NULL; + END IF; + PERFORM app_jobs.add_job( NEW.database_id, 'delete-s3-object', json_build_object( 'file_id', NEW.id, 'database_id', NEW.database_id, - 'key', NEW.key + 'key', NEW.key, + 'version_keys', COALESCE(version_keys, '[]'::json) ), job_key := 'delete:' || NEW.id::text ); @@ -286,7 +279,7 @@ CREATE TRIGGER files_after_update_queue_deletion EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + 'Enqueues delete-s3-object job when a file transitions to deleting status. Version S3 keys from the versions JSONB column are included in the job payload.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry @@ -423,10 +416,10 @@ DECLARE old_val jsonb; new_key text; old_key text; - db_id integer; - origin_file_id uuid; - old_origin_file_id uuid; - versions_json json; + db_id uuid; + file_id uuid; + old_file_id uuid; + versions_json jsonb; BEGIN -- Get the database_id from session context db_id := current_setting('app.database_id')::uuid; @@ -444,71 +437,36 @@ BEGIN RETURN NEW; END IF; - -- Handle file replacement: mark old files as deleting + -- Handle file replacement: mark old file as deleting IF old_key IS NOT NULL AND old_key <> '' THEN - -- Find old origin by exact key match - SELECT id INTO old_origin_file_id + SELECT id INTO old_file_id FROM files_store_public.files WHERE key = old_key AND database_id = db_id; - IF old_origin_file_id IS NOT NULL THEN - -- Mark old origin as deleting - UPDATE files_store_public.files - SET status = 'deleting', status_reason = 'replaced by new file' - WHERE id = old_origin_file_id AND database_id = db_id - AND status NOT IN ('deleting'); - - -- Mark old versions as deleting (index hit on origin_id) + IF old_file_id IS NOT NULL THEN UPDATE files_store_public.files SET status = 'deleting', status_reason = 'replaced by new file' - WHERE origin_id = old_origin_file_id AND database_id = db_id + WHERE id = old_file_id AND database_id = db_id AND status NOT IN ('deleting'); END IF; END IF; - -- Populate back-reference on new file (origin + versions) + -- Populate back-reference on new file IF new_key IS NOT NULL AND new_key <> '' THEN - -- Find origin by exact key match - SELECT id INTO origin_file_id + SELECT id, versions INTO file_id, versions_json FROM files_store_public.files WHERE key = new_key AND database_id = db_id; - IF origin_file_id IS NOT NULL THEN - -- Update origin row - UPDATE files_store_public.files - SET source_table = table_name, source_column = col_name, source_id = NEW.id - WHERE id = origin_file_id AND database_id = db_id; - - -- Update version rows (index hit on origin_id) + IF file_id IS NOT NULL THEN + -- Update file row with source back-reference UPDATE files_store_public.files SET source_table = table_name, source_column = col_name, source_id = NEW.id - WHERE origin_id = origin_file_id AND database_id = db_id; + WHERE id = file_id AND database_id = db_id; -- Backfill versions into domain JSONB if process-image already completed. - -- This fixes the race condition where process-image runs before domain - -- association (two-step upload path) and can't write back versions. - -- Uses mime_type column for accurate MIME (not hardcoded). - SELECT json_agg(json_build_object( - 'key', f.key, - 'mime', COALESCE(f.mime_type, 'image/jpeg'), - 'width', 0, - 'height', 0 - )) - INTO versions_json - FROM files_store_public.files f - WHERE f.origin_id = origin_file_id - AND f.database_id = db_id - AND f.status = 'ready'; - + -- RECURSION GUARD: Only the 'versions' subfield is modified -- the 'key' + -- field is unchanged, so the IS NOT DISTINCT FROM check above returns early. IF versions_json IS NOT NULL THEN - -- RECURSION GUARD: This UPDATE re-fires the current trigger on the - -- domain table. It is safe because only the 'versions' subfield of - -- the JSONB column is modified -- the 'key' field is unchanged. - -- The IS NOT DISTINCT FROM check at the top of this function - -- compares old_key vs new_key (both extracted via ->> 'key'), - -- detects they are equal, and returns early. - -- DO NOT change the early-return comparison to use the full JSONB - -- value instead of just the 'key' field, or this will infinite-loop. EXECUTE format( 'UPDATE %s SET %I = jsonb_set(COALESCE(%I, ''{}''::jsonb), ''{versions}'', $1::jsonb) WHERE id = $2', table_name, col_name, col_name @@ -533,7 +491,7 @@ RETURNS trigger AS $$ DECLARE col_name text := TG_ARGV[0]; table_name text := TG_ARGV[1]; - db_id integer; + db_id uuid; BEGIN db_id := current_setting('app.database_id')::uuid; @@ -553,32 +511,7 @@ $$ LANGUAGE plpgsql; COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; --- 7c. Propagate deleting status from origin to version rows. --- When an origin transitions to 'deleting', mark all its versions as 'deleting' too. --- Each version row's AFTER UPDATE trigger then enqueues its own delete-s3-object job. - -CREATE OR REPLACE FUNCTION files_store_public.files_propagate_deleting_to_versions() -RETURNS trigger AS $$ -BEGIN - UPDATE files_store_public.files - SET status = 'deleting', status_reason = COALESCE(NEW.status_reason, 'origin marked deleting') - WHERE origin_id = NEW.id - AND database_id = NEW.database_id - AND status NOT IN ('deleting'); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_update_propagate_deleting - AFTER UPDATE ON files_store_public.files - FOR EACH ROW - WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting' AND NEW.origin_id IS NULL) - EXECUTE FUNCTION files_store_public.files_propagate_deleting_to_versions(); - -COMMENT ON TRIGGER files_after_update_propagate_deleting ON files_store_public.files IS - 'When an origin file transitions to deleting, propagate that status to all version rows via origin_id. Each version then gets its own delete-s3-object job via the existing files_after_update_queue_deletion trigger. The WHEN clause filters to origin rows only (origin_id IS NULL).'; - --- 7d. CREATE TRIGGER statements for all 6 tables, 9 columns +-- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns -- -- Each domain column gets two triggers: -- - AFTER UPDATE: back-reference population + file replacement From 653628cf04644319df60f6d1e028d3a748a6b98d Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 20:01:30 +0800 Subject: [PATCH 14/15] fixed backfill logic --- .../graphile-settings/src/upload-resolver.ts | 20 ++++-- graphql/server/src/middleware/upload.ts | 61 +++++++++++++++++++ 2 files changed, 76 insertions(+), 5 deletions(-) diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index 238828989..92190e6ca 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -120,13 +120,18 @@ async function insertFileRecord( etag: string, createdBy: string | null, contentType: string | null, + source?: { table: string; column: string; id: string } | null, ): Promise { const pool = getPgPool(); await pool.query( `INSERT INTO files_store_public.files - (id, database_id, bucket_key, key, etag, created_by, mime_type) - VALUES ($1, $2, $3, $4, $5, $6, $7)`, - [fileId, databaseId, bucketKey, key, etag, createdBy, contentType], + (id, database_id, bucket_key, key, etag, created_by, mime_type, + source_table, source_column, source_id) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)`, + [ + fileId, databaseId, bucketKey, key, etag, createdBy, contentType, + source?.table || null, source?.column || null, source?.id || null, + ], ); } @@ -149,7 +154,12 @@ function extractContextInfo(context: any): { databaseId: string | null; userId: export async function streamToStorage( readStream: Readable, filename: string, - opts?: { databaseId?: string; userId?: string; bucketKey?: string }, + opts?: { + databaseId?: string; + userId?: string; + bucketKey?: string; + source?: { table: string; column: string; id: string } | null; + }, ): Promise<{ url: string; filename: string; mime: string; key?: string }> { const storage = getStorageProvider(); const bucketKey = opts?.bucketKey || 'default'; @@ -166,7 +176,7 @@ export async function streamToStorage( const result = await storage.upload(key, detected.stream, { contentType }); - await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, opts?.userId || null, contentType); + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, opts?.userId || null, contentType, opts?.source); const url = await storage.presignGet(key, 3600); return { key, url, filename, mime: contentType }; diff --git a/graphql/server/src/middleware/upload.ts b/graphql/server/src/middleware/upload.ts index 0d34775c4..c9d4fd5a5 100644 --- a/graphql/server/src/middleware/upload.ts +++ b/graphql/server/src/middleware/upload.ts @@ -290,10 +290,71 @@ export const uploadRoute: RequestHandler[] = [ try { const readStream = fs.createReadStream(req.file.path); + + // Source back-reference from form fields (optional). + // Dashboard sends GraphQL-level names; we resolve PG-level names via metaschema. + const gqlTableName = req.body?.source_table; // GraphQL type, e.g. "Photo" + const gqlColumnName = req.body?.source_column; // GraphQL field, e.g. "img" + const sourceId = req.body?.source_id; // Row UUID + let source: { table: string; column: string; id: string } | null = null; + + if (gqlTableName && gqlColumnName && sourceId && req.api?.databaseId) { + try { + const pgCacheModule = await import('pg-cache'); + const pool = pgCacheModule.getPgPool({ + host: process.env.PGHOST || 'localhost', + port: Number(process.env.PGPORT || 5432), + database: process.env.PGDATABASE || 'constructive', + user: process.env.PGUSER || 'postgres', + password: process.env.PGPASSWORD || 'password', + }); + // Look up PG schema.table from metaschema. + // Try exact match on lowercase plural (Photo → photos), then singular. + const candidates = [ + gqlTableName.toLowerCase() + 's', // Photo → photos + gqlTableName.toLowerCase(), // Photo → photo + ]; + const { rows } = await pool.query( + `SELECT s.name AS schema_name, t.name AS table_name + FROM metaschema_public."table" t + JOIN metaschema_public.schema s ON s.id = t.schema_id + WHERE s.database_id = $1 + AND t.name = ANY($2) + LIMIT 1`, + [req.api.databaseId, candidates] + ); + if (rows.length > 0) { + source = { + table: `${rows[0].schema_name}.${rows[0].table_name}`, + column: gqlColumnName, + id: sourceId, + }; + } else { + // Fallback: search information_schema for matching table name + const fallback = await pool.query( + `SELECT table_schema, table_name FROM information_schema.tables + WHERE table_name = ANY($1) AND table_schema NOT IN ('pg_catalog', 'information_schema') + LIMIT 1`, + [candidates] + ); + if (fallback.rows.length > 0) { + source = { + table: `${fallback.rows[0].table_schema}.${fallback.rows[0].table_name}`, + column: gqlColumnName, + id: sourceId, + }; + } + } + } catch (resolveErr) { + uploadLog.debug('[upload] Failed to resolve source back-reference', resolveErr); + } + } + const result = await streamToStorage(readStream, req.file.originalname, { databaseId: req.api?.databaseId, userId: req.token.user_id, bucketKey: 'default', + source, }); uploadLog.debug( From d4ab4811bd3748d645e306c4ee5819385f52babd Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 21:19:42 +0800 Subject: [PATCH 15/15] fixed missing fields --- graphql/server/src/middleware/upload.ts | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/graphql/server/src/middleware/upload.ts b/graphql/server/src/middleware/upload.ts index c9d4fd5a5..f4efd5ab8 100644 --- a/graphql/server/src/middleware/upload.ts +++ b/graphql/server/src/middleware/upload.ts @@ -309,13 +309,14 @@ export const uploadRoute: RequestHandler[] = [ password: process.env.PGPASSWORD || 'password', }); // Look up PG schema.table from metaschema. + // s.schema_name is the physical PG schema (e.g. "testdb-31d39e18-app-public"). // Try exact match on lowercase plural (Photo → photos), then singular. const candidates = [ gqlTableName.toLowerCase() + 's', // Photo → photos gqlTableName.toLowerCase(), // Photo → photo ]; const { rows } = await pool.query( - `SELECT s.name AS schema_name, t.name AS table_name + `SELECT s.schema_name AS pg_schema, t.name AS table_name FROM metaschema_public."table" t JOIN metaschema_public.schema s ON s.id = t.schema_id WHERE s.database_id = $1 @@ -323,27 +324,12 @@ export const uploadRoute: RequestHandler[] = [ LIMIT 1`, [req.api.databaseId, candidates] ); - if (rows.length > 0) { + if (rows.length > 0 && rows[0].pg_schema) { source = { - table: `${rows[0].schema_name}.${rows[0].table_name}`, + table: `${rows[0].pg_schema}.${rows[0].table_name}`, column: gqlColumnName, id: sourceId, }; - } else { - // Fallback: search information_schema for matching table name - const fallback = await pool.query( - `SELECT table_schema, table_name FROM information_schema.tables - WHERE table_name = ANY($1) AND table_schema NOT IN ('pg_catalog', 'information_schema') - LIMIT 1`, - [candidates] - ); - if (fallback.rows.length > 0) { - source = { - table: `${fallback.rows[0].table_schema}.${fallback.rows[0].table_name}`, - column: gqlColumnName, - id: sourceId, - }; - } } } catch (resolveErr) { uploadLog.debug('[upload] Failed to resolve source back-reference', resolveErr);