From 3e10f853c1608e7a46de47335ceeddb3a4dde932 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 12 Nov 2025 04:24:13 +0000 Subject: [PATCH 1/5] Add provider architecture for OSS/hosted split Implements flexible provider pattern for dual deployment modes: - Provider interfaces for context, billing, analytics, storage - No-op providers for OSS (unlimited, no tracking) - Database migrations for projects and project_members - RLS policies supporting both user-owned and project-owned collections - Feature flags and comprehensive documentation Note: Supabase types will be regenerated after migrations are applied --- .env.example | 42 +++ lib/providers/README.md | 290 ++++++++++++++++++ lib/providers/analytics/noop.ts | 21 ++ lib/providers/analytics/types.ts | 45 +++ lib/providers/billing/noop.ts | 30 ++ lib/providers/billing/types.ts | 55 ++++ lib/providers/context/types.ts | 53 ++++ lib/providers/context/user-context.ts | 59 ++++ lib/providers/features.ts | 47 +++ lib/providers/index.ts | 116 +++++++ lib/providers/storage/noop.ts | 27 ++ lib/providers/storage/types.ts | 35 +++ .../20251112000000_add_hosted_features.sql | 228 ++++++++++++++ ...112000001_migrate_existing_collections.sql | 58 ++++ 14 files changed, 1106 insertions(+) create mode 100644 lib/providers/README.md create mode 100644 lib/providers/analytics/noop.ts create mode 100644 lib/providers/analytics/types.ts create mode 100644 lib/providers/billing/noop.ts create mode 100644 lib/providers/billing/types.ts create mode 100644 lib/providers/context/types.ts create mode 100644 lib/providers/context/user-context.ts create mode 100644 lib/providers/features.ts create mode 100644 lib/providers/index.ts create mode 100644 lib/providers/storage/noop.ts create mode 100644 lib/providers/storage/types.ts create mode 100644 supabase/migrations/20251112000000_add_hosted_features.sql create mode 100644 supabase/migrations/20251112000001_migrate_existing_collections.sql diff --git a/.env.example b/.env.example index 3639306..bfb5fb1 100644 --- a/.env.example +++ b/.env.example @@ -1,5 +1,47 @@ +# ===================================================================== +# SUPABASE CONFIGURATION +# ===================================================================== # Update these with your Supabase details from your project settings > API # https://app.supabase.com/project/_/settings/api + NEXT_PUBLIC_SUPABASE_URL=your-project-url NEXT_PUBLIC_SUPABASE_PUBLISHABLE_KEY=your-publishable-or-anon-key SUPABASE_SERVICE_ROLE_KEY=your-service-role-key + +# ===================================================================== +# HOSTED MODE (Optional - for paid hosted version only) +# ===================================================================== +# Set to 'true' to enable hosted features (projects, billing, analytics) +# Leave commented out or set to 'false' for self-hosted/OSS mode + +# PAPERCLIP_HOSTED=true +# NEXT_PUBLIC_PAPERCLIP_HOSTED=true + +# ===================================================================== +# BILLING (Hosted Mode Only) +# ===================================================================== +# Uncomment the provider you're using + +# Polar.sh +# POLAR_API_KEY=your-polar-api-key +# POLAR_WEBHOOK_SECRET=your-polar-webhook-secret +# POLAR_PRODUCT_ID=your-polar-product-id + +# Flowglad (alternative) +# FLOWGLAD_API_KEY=your-flowglad-api-key +# FLOWGLAD_WEBHOOK_SECRET=your-flowglad-webhook-secret + +# Stripe (alternative) +# STRIPE_SECRET_KEY=your-stripe-secret-key +# STRIPE_WEBHOOK_SECRET=your-stripe-webhook-secret +# STRIPE_PRICE_ID=your-stripe-price-id + +# ===================================================================== +# ANALYTICS (Hosted Mode Only) +# ===================================================================== + +# PostHog +# POSTHOG_API_KEY=your-posthog-api-key +# POSTHOG_HOST=https://app.posthog.com +# NEXT_PUBLIC_POSTHOG_KEY=your-posthog-public-key +# NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com diff --git a/lib/providers/README.md b/lib/providers/README.md new file mode 100644 index 0000000..19a3d6a --- /dev/null +++ b/lib/providers/README.md @@ -0,0 +1,290 @@ +# Provider Architecture + +This directory contains the provider pattern implementation that enables the split between open-source (self-hosted) and hosted (paid) versions of Paperclip CMS. + +## Overview + +The provider pattern allows the same codebase to support two deployment modes: + +- **OSS Mode** (default): User-owned collections, no limits, no billing +- **Hosted Mode**: Project-based multi-tenancy, billing, analytics, team collaboration + +## How It Works + +### Environment-Based Activation + +Providers are initialized based on the `PAPERCLIP_HOSTED` environment variable: + +```bash +# OSS Mode (default) +# No environment variable needed + +# Hosted Mode +PAPERCLIP_HOSTED=true +NEXT_PUBLIC_PAPERCLIP_HOSTED=true +``` + +### Provider Types + +#### 1. Context Provider + +Abstracts the ownership model (user-owned vs project-owned). + +**OSS**: Collections belong directly to users +**Hosted**: Collections belong to projects, users access via membership + +```typescript +import { getContextProvider } from '@/lib/providers' + +const context = getContextProvider() +const collections = await context.getOwnedCollections(userId) +const canCreate = await context.canCreateCollection(userId) +``` + +#### 2. Billing Provider + +Manages subscriptions and payment processing. + +**OSS**: No-op, always returns active +**Hosted**: Integrates with Polar/Flowglad/Stripe + +```typescript +import { getBillingProvider } from '@/lib/providers' + +const billing = getBillingProvider() +const isActive = await billing.hasActiveSubscription(projectId) +``` + +#### 3. Analytics Provider + +Tracks user behavior and usage metrics. + +**OSS**: Silent no-op +**Hosted**: PostHog integration + +```typescript +import { getAnalyticsProvider } from '@/lib/providers' + +const analytics = getAnalyticsProvider() +await analytics.track({ + event: 'collection_created', + userId, + projectId, + properties: { collection_name: 'Blog Posts' } +}) +``` + +#### 4. Storage Provider + +Monitors storage usage and enforces limits. + +**OSS**: Unlimited, no tracking +**Hosted**: Tracks usage, soft limits + +```typescript +import { getStorageProvider } from '@/lib/providers' + +const storage = getStorageProvider() +const info = await storage.getStorageInfo(projectId) +const canUpload = await storage.canUpload(projectId, fileSizeBytes) +``` + +## Directory Structure + +``` +/lib/providers/ +├── context/ +│ ├── types.ts # ContextProvider interface +│ └── user-context.ts # OSS: User-owned collections +├── billing/ +│ ├── types.ts # BillingProvider interface +│ └── noop.ts # OSS: No billing +├── analytics/ +│ ├── types.ts # AnalyticsProvider interface +│ └── noop.ts # OSS: No analytics +├── storage/ +│ ├── types.ts # StorageProvider interface +│ └── noop.ts # OSS: No limits +├── index.ts # Provider registry +├── features.ts # Client-side feature flags +└── README.md # This file + +/lib/hosted/ # Hosted implementations (BSL/proprietary) +├── context/ +│ └── project-context.ts # Hosted: Project-owned collections +├── billing/ +│ ├── polar-provider.ts # Polar.sh integration +│ ├── flowglad-provider.ts # Flowglad integration +│ └── stripe-provider.ts # Stripe integration +├── analytics/ +│ └── posthog-provider.ts # PostHog integration +└── storage/ + └── storage-provider.ts # Storage tracking & limits +``` + +## Database Schema + +### OSS Mode + +```sql +collections ( + id, slug, name, config, + user_id NOT NULL, -- Direct ownership + project_id NULL +) +``` + +### Hosted Mode + +```sql +projects ( + id, name, slug, + subscription_id, subscription_status, subscription_provider +) + +project_members ( + project_id, user_id, role +) + +collections ( + id, slug, name, config, + user_id NULL, + project_id NOT NULL -- Project ownership +) +``` + +Both modes coexist in the same database - a collection must have either `user_id` OR `project_id` (enforced by check constraint). + +## Usage in Application Code + +### API Routes + +```typescript +// app/api/collections/route.ts +import { getContextProvider, getAnalyticsProvider } from '@/lib/providers' + +export async function POST(req: Request) { + const user = await getUser(req) + const context = getContextProvider() + const analytics = getAnalyticsProvider() + + // Check permissions (mode-aware) + const canCreate = await context.canCreateCollection(user.id) + if (!canCreate) { + return Response.json({ error: 'Upgrade required' }, { status: 402 }) + } + + // Get context (OSS: {userId}, Hosted: {userId, projectId, role}) + const appContext = await context.getContext(user.id) + + // Create collection + const { data } = await supabase.from('collections').insert({ + ...body, + user_id: appContext.projectId ? null : appContext.userId, + project_id: appContext.projectId || null + }) + + // Track event (OSS: no-op, Hosted: PostHog) + await analytics.track({ + event: 'collection_created', + userId: user.id, + projectId: appContext.projectId, + properties: { collection_id: data.id } + }) + + return Response.json(data) +} +``` + +### Components + +```typescript +// components/settings/settings-page.tsx +import { useFeatures } from '@/lib/providers/features' + +export function SettingsPage() { + const features = useFeatures() + + return ( +
+

Settings

+ + {/* Always shown */} + + + {/* Hosted-only */} + {features.projects && } + {features.billing && } + {features.analytics && } +
+ ) +} +``` + +## Adding New Hosted Features + +1. **Define the interface** in `/lib/providers/{feature}/types.ts` +2. **Create no-op implementation** in `/lib/providers/{feature}/noop.ts` +3. **Implement hosted version** in `/lib/hosted/{feature}/{provider}.ts` +4. **Register in provider registry** (`/lib/providers/index.ts`) +5. **Add feature flag** to `/lib/providers/features.ts` +6. **Use in application** via `get{Feature}Provider()` + +## Licensing + +- `/lib/providers/` → MIT (open source interfaces) +- `/lib/hosted/` → BSL 1.1 or Proprietary (hosted implementations) + +## Environment Variables + +See `.env.example` for full configuration options: + +```bash +# Hosted Mode +PAPERCLIP_HOSTED=true +NEXT_PUBLIC_PAPERCLIP_HOSTED=true + +# Billing (choose one) +POLAR_API_KEY=... +POLAR_WEBHOOK_SECRET=... +POLAR_PRODUCT_ID=... + +# Analytics +POSTHOG_API_KEY=... +POSTHOG_HOST=https://app.posthog.com +NEXT_PUBLIC_POSTHOG_KEY=... +NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com +``` + +## Testing Both Modes + +### Test OSS Mode + +```bash +# No PAPERCLIP_HOSTED variable +npm run dev +``` + +### Test Hosted Mode + +```bash +# Set environment variable +PAPERCLIP_HOSTED=true npm run dev +``` + +Note: Hosted mode requires the `/lib/hosted/` implementations to be present. + +## Next Steps + +To complete the hosted implementation: + +1. **Implement hosted providers** in `/lib/hosted/` +2. **Add API routes** for billing webhooks (`/api/billing/webhook`) +3. **Build settings UI** for project/billing management +4. **Create onboarding flow** for new hosted users +5. **Add project creation** on first signup (hosted mode) +6. **Implement team invitation** system + +## Questions? + +See the main architecture document or open an issue. diff --git a/lib/providers/analytics/noop.ts b/lib/providers/analytics/noop.ts new file mode 100644 index 0000000..c9fe654 --- /dev/null +++ b/lib/providers/analytics/noop.ts @@ -0,0 +1,21 @@ +import type { AnalyticsProvider, AnalyticsEvent, UserTraits } from './types' + +/** + * No-Op Analytics Provider (OSS Default) + * + * In self-hosted mode, we don't track analytics. + * All methods are silent no-ops. + */ +export class NoOpAnalyticsProvider implements AnalyticsProvider { + async track(_event: AnalyticsEvent): Promise { + // Silent no-op + } + + async identify(_userId: string, _traits?: UserTraits): Promise { + // Silent no-op + } + + async page(_userId: string, _pageName: string, _properties?: Record): Promise { + // Silent no-op + } +} diff --git a/lib/providers/analytics/types.ts b/lib/providers/analytics/types.ts new file mode 100644 index 0000000..7a9cad3 --- /dev/null +++ b/lib/providers/analytics/types.ts @@ -0,0 +1,45 @@ +/** + * Analytics event + */ +export interface AnalyticsEvent { + event: string + properties?: Record + userId?: string + projectId?: string +} + +/** + * User traits for identification + */ +export interface UserTraits { + email?: string + name?: string + createdAt?: Date + [key: string]: any +} + +/** + * Analytics provider interface + */ +export interface AnalyticsProvider { + /** + * Track an event + * OSS: No-op + * Hosted: Sends to PostHog + */ + track(event: AnalyticsEvent): Promise + + /** + * Identify a user + * OSS: No-op + * Hosted: Sends to PostHog + */ + identify(userId: string, traits?: UserTraits): Promise + + /** + * Track a page view + * OSS: No-op + * Hosted: Sends to PostHog + */ + page(userId: string, pageName: string, properties?: Record): Promise +} diff --git a/lib/providers/billing/noop.ts b/lib/providers/billing/noop.ts new file mode 100644 index 0000000..4375069 --- /dev/null +++ b/lib/providers/billing/noop.ts @@ -0,0 +1,30 @@ +import type { BillingProvider, Subscription } from './types' + +/** + * No-Op Billing Provider (OSS Default) + * + * In self-hosted mode, there is no billing. + * All features are available, no subscriptions required. + */ +export class NoOpBillingProvider implements BillingProvider { + async hasActiveSubscription(_contextId: string): Promise { + // Self-hosted = always active + return true + } + + async getSubscription(_contextId: string): Promise { + return { + id: 'self-hosted', + status: 'active', + provider: 'none', + } + } + + async createCheckoutUrl(_contextId: string, _userId: string): Promise { + throw new Error('Billing is not available in self-hosted mode') + } + + async handleWebhook(_payload: any, _signature: string): Promise { + throw new Error('Billing webhooks are not available in self-hosted mode') + } +} diff --git a/lib/providers/billing/types.ts b/lib/providers/billing/types.ts new file mode 100644 index 0000000..2e005b7 --- /dev/null +++ b/lib/providers/billing/types.ts @@ -0,0 +1,55 @@ +/** + * Subscription status + */ +export type SubscriptionStatus = 'active' | 'canceled' | 'past_due' | 'trialing' + +/** + * Payment provider type + */ +export type PaymentProvider = 'polar' | 'flowglad' | 'stripe' | 'none' + +/** + * Subscription information + */ +export interface Subscription { + id: string + status: SubscriptionStatus + provider: PaymentProvider + + // Optional fields + currentPeriodEnd?: Date + cancelAtPeriodEnd?: boolean +} + +/** + * Billing provider interface + */ +export interface BillingProvider { + /** + * Check if a project/user has an active subscription + * OSS: Always returns true + * Hosted: Checks actual subscription status + */ + hasActiveSubscription(contextId: string): Promise + + /** + * Get subscription details + * OSS: Returns 'none' provider + * Hosted: Returns actual subscription + */ + getSubscription(contextId: string): Promise + + /** + * Create a checkout URL for subscribing + * OSS: Throws error + * Hosted: Returns Polar/Flowglad/Stripe checkout URL + */ + createCheckoutUrl(contextId: string, userId: string): Promise + + /** + * Handle webhook from payment provider + * OSS: Throws error + * Hosted: Processes subscription events + */ + handleWebhook(payload: any, signature: string): Promise +} diff --git a/lib/providers/context/types.ts b/lib/providers/context/types.ts new file mode 100644 index 0000000..7f5406a --- /dev/null +++ b/lib/providers/context/types.ts @@ -0,0 +1,53 @@ +import { Collection } from '@/lib/types' + +/** + * Application context - represents the current user's scope + * OSS: Just userId + * Hosted: userId + projectId + role + */ +export interface AppContext { + userId: string + + // Hosted-only fields + projectId?: string + projectSlug?: string + role?: 'owner' | 'admin' | 'member' +} + +/** + * Context provider - abstracts ownership model + * OSS: User owns collections directly + * Hosted: User accesses collections via project membership + */ +export interface ContextProvider { + /** + * Get the current application context for a user + */ + getContext(userId: string): Promise + + /** + * Get collections accessible to the user + */ + getOwnedCollections(userId: string): Promise + + /** + * Check if user can create a collection + * OSS: Always true + * Hosted: Checks role + subscription status + */ + canCreateCollection(userId: string): Promise + + /** + * Check if user can edit a specific collection + * OSS: Checks user_id ownership + * Hosted: Checks project membership + */ + canEditCollection(userId: string, collectionId: string): Promise + + /** + * Check if user can delete a specific collection + * OSS: Checks user_id ownership + * Hosted: Checks role (owner/admin only) + */ + canDeleteCollection(userId: string, collectionId: string): Promise +} diff --git a/lib/providers/context/user-context.ts b/lib/providers/context/user-context.ts new file mode 100644 index 0000000..039f2e9 --- /dev/null +++ b/lib/providers/context/user-context.ts @@ -0,0 +1,59 @@ +import { createClient } from '@/lib/supabase/server' +import type { AppContext, ContextProvider } from './types' +import type { Database } from '@/lib/supabase/types' + +type Collection = Database['public']['Tables']['collections']['Row'] + +/** + * User Context Provider (OSS Default) + * + * In self-hosted mode, users own collections directly. + * No projects, no team collaboration - just simple user → collection ownership. + */ +export class UserContextProvider implements ContextProvider { + async getContext(userId: string): Promise { + return { + userId, + } + } + + async getOwnedCollections(userId: string): Promise { + const supabase = await createClient() + + const { data, error } = await supabase + .from('collections') + .select('*') + .eq('user_id', userId) + .order('created_at', { ascending: false }) + + if (error) { + console.error('Error fetching collections:', error) + return [] + } + + return data || [] + } + + async canCreateCollection(userId: string): Promise { + // OSS: No limits, always allow + return true + } + + async canEditCollection(userId: string, collectionId: string): Promise { + const supabase = await createClient() + + const { data } = await supabase + .from('collections') + .select('user_id') + .eq('id', collectionId) + .single() + + // User can edit if they own the collection + return data?.user_id === userId + } + + async canDeleteCollection(userId: string, collectionId: string): Promise { + // Same as edit - user must own it + return this.canEditCollection(userId, collectionId) + } +} diff --git a/lib/providers/features.ts b/lib/providers/features.ts new file mode 100644 index 0000000..d9fe802 --- /dev/null +++ b/lib/providers/features.ts @@ -0,0 +1,47 @@ +'use client' + +/** + * Feature flags for conditional UI rendering + * These are based on environment variables and determine what features are available + */ +export interface Features { + /** Whether projects/teams are available (hosted only) */ + projects: boolean + + /** Whether billing is available (hosted only) */ + billing: boolean + + /** Whether analytics tracking is active (hosted only) */ + analytics: boolean + + /** Whether storage quotas are enforced (hosted only) */ + storageQuotas: boolean +} + +/** + * Get available features based on environment + */ +export function getFeatures(): Features { + const isHosted = process.env.NEXT_PUBLIC_PAPERCLIP_HOSTED === 'true' + + return { + projects: isHosted, + billing: isHosted, + analytics: isHosted, + storageQuotas: isHosted, + } +} + +/** + * React hook for accessing feature flags + */ +export function useFeatures(): Features { + return getFeatures() +} + +/** + * Check if running in hosted mode + */ +export function isHostedMode(): boolean { + return process.env.NEXT_PUBLIC_PAPERCLIP_HOSTED === 'true' +} diff --git a/lib/providers/index.ts b/lib/providers/index.ts new file mode 100644 index 0000000..415567e --- /dev/null +++ b/lib/providers/index.ts @@ -0,0 +1,116 @@ +import type { ContextProvider } from './context/types' +import type { BillingProvider } from './billing/types' +import type { AnalyticsProvider } from './analytics/types' +import type { StorageProvider } from './storage/types' + +// OSS implementations +import { UserContextProvider } from './context/user-context' +import { NoOpBillingProvider } from './billing/noop' +import { NoOpAnalyticsProvider } from './analytics/noop' +import { NoOpStorageProvider } from './storage/noop' + +// Global provider instances +let contextProvider: ContextProvider | null = null +let billingProvider: BillingProvider | null = null +let analyticsProvider: AnalyticsProvider | null = null +let storageProvider: StorageProvider | null = null + +/** + * Check if running in hosted mode + */ +export function isHostedMode(): boolean { + return process.env.PAPERCLIP_HOSTED === 'true' +} + +/** + * Initialize all providers based on environment + * Call this once at application startup + */ +export function initializeProviders() { + if (isHostedMode()) { + try { + // Load hosted implementations dynamically + // This allows the OSS build to work without the hosted code + const { ProjectContextProvider } = require('@/lib/hosted/context/project-context') + const { PolarBillingProvider } = require('@/lib/hosted/billing/polar-provider') + const { PostHogAnalyticsProvider } = require('@/lib/hosted/analytics/posthog-provider') + const { HostedStorageProvider } = require('@/lib/hosted/storage/storage-provider') + + contextProvider = new ProjectContextProvider() + billingProvider = new PolarBillingProvider() + analyticsProvider = new PostHogAnalyticsProvider() + storageProvider = new HostedStorageProvider() + + console.log('✓ Hosted mode enabled') + console.log(' - Projects: enabled') + console.log(' - Billing: Polar') + console.log(' - Analytics: PostHog') + } catch (err) { + console.error('Failed to load hosted providers:', err) + console.error('Falling back to OSS mode') + initializeOSSProviders() + } + } else { + initializeOSSProviders() + } +} + +/** + * Initialize OSS providers + */ +function initializeOSSProviders() { + contextProvider = new UserContextProvider() + billingProvider = new NoOpBillingProvider() + analyticsProvider = new NoOpAnalyticsProvider() + storageProvider = new NoOpStorageProvider() + + console.log('ℹ Self-hosted mode') + console.log(' - User-owned collections') + console.log(' - No billing or limits') +} + +/** + * Get the context provider instance + */ +export function getContextProvider(): ContextProvider { + if (!contextProvider) { + initializeProviders() + } + return contextProvider! +} + +/** + * Get the billing provider instance + */ +export function getBillingProvider(): BillingProvider { + if (!billingProvider) { + initializeProviders() + } + return billingProvider! +} + +/** + * Get the analytics provider instance + */ +export function getAnalyticsProvider(): AnalyticsProvider { + if (!analyticsProvider) { + initializeProviders() + } + return analyticsProvider! +} + +/** + * Get the storage provider instance + */ +export function getStorageProvider(): StorageProvider { + if (!storageProvider) { + initializeProviders() + } + return storageProvider! +} + +// Re-export types for convenience +export type { ContextProvider, AppContext } from './context/types' +export type { BillingProvider, Subscription } from './billing/types' +export type { AnalyticsProvider, AnalyticsEvent } from './analytics/types' +export type { StorageProvider, StorageInfo } from './storage/types' diff --git a/lib/providers/storage/noop.ts b/lib/providers/storage/noop.ts new file mode 100644 index 0000000..ebada5f --- /dev/null +++ b/lib/providers/storage/noop.ts @@ -0,0 +1,27 @@ +import type { StorageProvider, StorageInfo } from './types' + +/** + * No-Op Storage Provider (OSS Default) + * + * In self-hosted mode, storage is unlimited (within Supabase project limits). + * No tracking or enforcement. + */ +export class NoOpStorageProvider implements StorageProvider { + async getStorageInfo(_contextId: string): Promise { + return { + usedBytes: 0, + limitBytes: null, // Unlimited + withinLimits: true, + usedGb: 0, + } + } + + async trackUpload(_contextId: string, _bytes: number, _fileType: string): Promise { + // Silent no-op + } + + async canUpload(_contextId: string, _bytes: number): Promise { + // Always allow + return true + } +} diff --git a/lib/providers/storage/types.ts b/lib/providers/storage/types.ts new file mode 100644 index 0000000..5a30a99 --- /dev/null +++ b/lib/providers/storage/types.ts @@ -0,0 +1,35 @@ +/** + * Storage usage information + */ +export interface StorageInfo { + usedBytes: number + limitBytes: number | null // null = unlimited + withinLimits: boolean + usedGb: number // Convenience field +} + +/** + * Storage provider interface + */ +export interface StorageProvider { + /** + * Get storage usage information + * OSS: Returns unlimited + * Hosted: Returns actual usage from Supabase + */ + getStorageInfo(contextId: string): Promise + + /** + * Track an upload (for analytics/monitoring) + * OSS: No-op + * Hosted: Sends to analytics + */ + trackUpload(contextId: string, bytes: number, fileType: string): Promise + + /** + * Check if an upload is allowed + * OSS: Always true + * Hosted: Checks against soft limits (doesn't block yet) + */ + canUpload(contextId: string, bytes: number): Promise +} diff --git a/supabase/migrations/20251112000000_add_hosted_features.sql b/supabase/migrations/20251112000000_add_hosted_features.sql new file mode 100644 index 0000000..1d9b8e5 --- /dev/null +++ b/supabase/migrations/20251112000000_add_hosted_features.sql @@ -0,0 +1,228 @@ +-- ===================================================================== +-- HOSTED FEATURES MIGRATION +-- ===================================================================== +-- This migration adds support for project-based multi-tenancy (hosted mode) +-- while maintaining backward compatibility with user-owned collections (OSS mode) +-- +-- OSS Mode: Collections are owned by users directly (user_id NOT NULL, project_id NULL) +-- Hosted Mode: Collections are owned by projects (project_id NOT NULL, user_id NULL) +-- ===================================================================== + +-- ===================================================================== +-- 1. PROJECTS TABLE (Hosted Only) +-- ===================================================================== + +create table if not exists public.projects ( + id uuid primary key default gen_random_uuid(), + name text not null, + slug text unique not null, + + -- Billing fields + subscription_id text, + subscription_status text default 'trial' check (subscription_status in ('trial', 'active', 'canceled', 'past_due')), + subscription_provider text check (subscription_provider in ('polar', 'flowglad', 'stripe')), + + -- Metadata + created_at timestamptz not null default now(), + updated_at timestamptz not null default now() +); + +alter table public.projects enable row level security; + +-- Trigger to update updated_at +drop trigger if exists set_projects_updated_at on public.projects; +create trigger set_projects_updated_at + before update on public.projects + for each row execute function public.set_updated_at(); + +-- ===================================================================== +-- 2. PROJECT MEMBERS TABLE (Hosted Only) +-- ===================================================================== + +create table if not exists public.project_members ( + id uuid primary key default gen_random_uuid(), + project_id uuid not null references public.projects(id) on delete cascade, + user_id uuid not null references auth.users(id) on delete cascade, + role text not null default 'member' check (role in ('owner', 'admin', 'member')), + + created_at timestamptz not null default now(), + + unique(project_id, user_id) +); + +alter table public.project_members enable row level security; + +-- Index for fast membership lookups +create index if not exists idx_project_members_user on public.project_members(user_id); +create index if not exists idx_project_members_project on public.project_members(project_id); + +-- ===================================================================== +-- 3. UPDATE COLLECTIONS TABLE +-- ===================================================================== + +-- Add ownership columns (both nullable for now) +alter table public.collections add column if not exists user_id uuid references auth.users(id) on delete cascade; +alter table public.collections add column if not exists project_id uuid references public.projects(id) on delete cascade; + +-- Add constraint: Must have EITHER user_id OR project_id (not both, not neither) +alter table public.collections add constraint collections_ownership_check + check ( + (user_id is not null and project_id is null) or + (user_id is null and project_id is not null) + ); + +-- Add indexes for ownership lookups +create index if not exists idx_collections_user on public.collections(user_id); +create index if not exists idx_collections_project on public.collections(project_id); + +-- Add timestamps if missing +alter table public.collections add column if not exists created_at timestamptz not null default now(); + +-- Trigger to update updated_at +drop trigger if exists set_collections_updated_at on public.collections; +create trigger set_collections_updated_at + before update on public.collections + for each row execute function public.set_updated_at(); + +-- ===================================================================== +-- 4. ROW LEVEL SECURITY POLICIES +-- ===================================================================== + +-- Drop old permissive policy +drop policy if exists collections_editor_rw on public.collections; + +-- OSS Mode: Users can manage their own collections +create policy collections_user_owned_rw on public.collections + for all to authenticated + using (user_id = auth.uid()) + with check (user_id = auth.uid()); + +-- Hosted Mode: Users can manage collections in their projects +create policy collections_project_owned_rw on public.collections + for all to authenticated + using ( + project_id in ( + select project_id + from public.project_members + where user_id = auth.uid() + ) + ) + with check ( + project_id in ( + select project_id + from public.project_members + where user_id = auth.uid() + ) + ); + +-- Projects: Users can view their own projects +create policy projects_member_read on public.projects + for select to authenticated + using ( + id in ( + select project_id + from public.project_members + where user_id = auth.uid() + ) + ); + +-- Projects: Only owners can update projects +create policy projects_owner_update on public.projects + for update to authenticated + using ( + id in ( + select project_id + from public.project_members + where user_id = auth.uid() and role = 'owner' + ) + ); + +-- Projects: Only owners can delete projects +create policy projects_owner_delete on public.projects + for delete to authenticated + using ( + id in ( + select project_id + from public.project_members + where user_id = auth.uid() and role = 'owner' + ) + ); + +-- Project Members: Users can view members of their projects +create policy project_members_read on public.project_members + for select to authenticated + using ( + project_id in ( + select project_id + from public.project_members + where user_id = auth.uid() + ) + ); + +-- Project Members: Only owners/admins can manage members +create policy project_members_manage on public.project_members + for all to authenticated + using ( + project_id in ( + select project_id + from public.project_members + where user_id = auth.uid() and role in ('owner', 'admin') + ) + ); + +-- ===================================================================== +-- 5. HELPER FUNCTIONS +-- ===================================================================== + +-- Function to get user's active project (for hosted mode) +-- Returns the first project the user is a member of, or NULL +create or replace function public.get_user_active_project(p_user_id uuid) +returns uuid +language plpgsql +security definer +as $$ +declare + v_project_id uuid; +begin + select project_id into v_project_id + from public.project_members + where user_id = p_user_id + limit 1; + + return v_project_id; +end; +$$; + +-- Function to check if user can create collections +-- OSS: Always true +-- Hosted: Must be in a project with active subscription +create or replace function public.can_create_collection(p_user_id uuid, p_project_id uuid default null) +returns boolean +language plpgsql +security definer +as $$ +declare + v_subscription_status text; +begin + -- If no project_id, assume OSS mode - always allow + if p_project_id is null then + return true; + end if; + + -- Check project subscription status + select subscription_status into v_subscription_status + from public.projects + where id = p_project_id; + + -- Allow if subscription is active or trialing + return v_subscription_status in ('active', 'trial'); +end; +$$; + +-- ===================================================================== +-- MIGRATION COMPLETE +-- ===================================================================== +-- Note: Existing collections will have NULL user_id and project_id. +-- You'll need to run a data migration to assign user_id to existing collections. +-- See: scripts/migrate-collections-to-user-owned.sql +-- ===================================================================== diff --git a/supabase/migrations/20251112000001_migrate_existing_collections.sql b/supabase/migrations/20251112000001_migrate_existing_collections.sql new file mode 100644 index 0000000..e101b83 --- /dev/null +++ b/supabase/migrations/20251112000001_migrate_existing_collections.sql @@ -0,0 +1,58 @@ +-- ===================================================================== +-- MIGRATE EXISTING COLLECTIONS TO USER-OWNED (OSS MODE) +-- ===================================================================== +-- This migration assigns existing collections (with NULL user_id/project_id) +-- to the user who created the first item in that collection. +-- +-- If a collection has no items, it will be assigned to the first user +-- in the system (for simplicity). +-- ===================================================================== + +do $$ +declare + v_first_user_id uuid; + v_collection record; + v_first_item_author uuid; +begin + -- Get the first user in the system (fallback) + select id into v_first_user_id + from auth.users + order by created_at asc + limit 1; + + -- Only proceed if we have at least one user + if v_first_user_id is not null then + -- Loop through collections with NULL ownership + for v_collection in + select id + from public.collections + where user_id is null and project_id is null + loop + -- Try to find the author of the first item in this collection + select author into v_first_item_author + from public.items + where collection_id = v_collection.id + order by created_at asc + limit 1; + + -- Assign collection to item author, or fallback to first user + update public.collections + set user_id = coalesce(v_first_item_author, v_first_user_id) + where id = v_collection.id; + + raise notice 'Assigned collection % to user %', + v_collection.id, + coalesce(v_first_item_author, v_first_user_id); + end loop; + else + raise notice 'No users found - skipping collection migration'; + end if; +end; +$$; + +-- ===================================================================== +-- MIGRATION COMPLETE +-- ===================================================================== +-- All existing collections should now be owned by users (OSS mode). +-- In hosted mode, collections will be created with project_id instead. +-- ===================================================================== From 28062ed20f805d343f896fa190590df04c408a53 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 12 Nov 2025 04:28:42 +0000 Subject: [PATCH 2/5] Move hosted migrations to separate directory Separates hosted-only migrations into /migrations-hosted/ to prevent OSS users from accidentally running them. Supabase CLI only reads from /migrations/ by default, so this ensures clean separation. Changes: - Move projects/project_members migrations to migrations-hosted/ - Add warning comments to hosted migration files - Add README explaining hosted-only migrations --- .../20251112000000_add_hosted_features.sql | 9 ++++--- ...112000001_migrate_existing_collections.sql | 7 +++++- supabase/migrations-hosted/README.md | 24 +++++++++++++++++++ 3 files changed, 36 insertions(+), 4 deletions(-) rename supabase/{migrations => migrations-hosted}/20251112000000_add_hosted_features.sql (95%) rename supabase/{migrations => migrations-hosted}/20251112000001_migrate_existing_collections.sql (91%) create mode 100644 supabase/migrations-hosted/README.md diff --git a/supabase/migrations/20251112000000_add_hosted_features.sql b/supabase/migrations-hosted/20251112000000_add_hosted_features.sql similarity index 95% rename from supabase/migrations/20251112000000_add_hosted_features.sql rename to supabase/migrations-hosted/20251112000000_add_hosted_features.sql index 1d9b8e5..5a18e05 100644 --- a/supabase/migrations/20251112000000_add_hosted_features.sql +++ b/supabase/migrations-hosted/20251112000000_add_hosted_features.sql @@ -1,8 +1,11 @@ -- ===================================================================== --- HOSTED FEATURES MIGRATION +-- ⚠️ HOSTED-ONLY MIGRATION - DO NOT RUN IN OSS MODE ⚠️ -- ===================================================================== --- This migration adds support for project-based multi-tenancy (hosted mode) --- while maintaining backward compatibility with user-owned collections (OSS mode) +-- This migration is ONLY for the hosted (paid) version of Paperclip CMS. +-- OSS/self-hosted users should NOT run this migration. +-- +-- This adds support for project-based multi-tenancy (hosted mode) while +-- maintaining backward compatibility with user-owned collections (OSS mode). -- -- OSS Mode: Collections are owned by users directly (user_id NOT NULL, project_id NULL) -- Hosted Mode: Collections are owned by projects (project_id NOT NULL, user_id NULL) diff --git a/supabase/migrations/20251112000001_migrate_existing_collections.sql b/supabase/migrations-hosted/20251112000001_migrate_existing_collections.sql similarity index 91% rename from supabase/migrations/20251112000001_migrate_existing_collections.sql rename to supabase/migrations-hosted/20251112000001_migrate_existing_collections.sql index e101b83..41ab3f4 100644 --- a/supabase/migrations/20251112000001_migrate_existing_collections.sql +++ b/supabase/migrations-hosted/20251112000001_migrate_existing_collections.sql @@ -1,6 +1,11 @@ -- ===================================================================== --- MIGRATE EXISTING COLLECTIONS TO USER-OWNED (OSS MODE) +-- ⚠️ HOSTED-ONLY MIGRATION - DO NOT RUN IN OSS MODE ⚠️ -- ===================================================================== +-- This migration is ONLY for the hosted (paid) version of Paperclip CMS. +-- OSS/self-hosted users should NOT run this migration. +-- +-- MIGRATE EXISTING COLLECTIONS TO USER-OWNED (OSS MODE) +-- -- This migration assigns existing collections (with NULL user_id/project_id) -- to the user who created the first item in that collection. -- diff --git a/supabase/migrations-hosted/README.md b/supabase/migrations-hosted/README.md new file mode 100644 index 0000000..3cf7149 --- /dev/null +++ b/supabase/migrations-hosted/README.md @@ -0,0 +1,24 @@ +# Hosted-Only Migrations + +These migrations are **only for the hosted (paid) version** of Paperclip CMS. + +**OSS users should NOT run these migrations.** + +## What's in here? + +- `20251112000000_add_hosted_features.sql` - Creates `projects` and `project_members` tables +- `20251112000001_migrate_existing_collections.sql` - Migrates existing collections to user-owned mode + +## When to run these + +Only run these if you're deploying the hosted version with `PAPERCLIP_HOSTED=true`. + +## How to run + +```bash +# From your hosted Supabase instance +npx supabase migration up --file supabase/migrations-hosted/20251112000000_add_hosted_features.sql +npx supabase migration up --file supabase/migrations-hosted/20251112000001_migrate_existing_collections.sql +``` + +Or manually apply via Supabase SQL Editor. From 94fb839c297d978809812d32e03c1b5236d1b57a Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 12 Nov 2025 06:19:41 +0000 Subject: [PATCH 3/5] Add cache provider (opt-in for OSS and hosted) Adds content caching for fast queries without database hits. Unlike other providers, cache is opt-in for BOTH OSS and hosted. Features: - DisabledCacheProvider (default for OSS) - MemoryCacheProvider (in-process, dev/single-instance) - FileSystemCacheProvider (persistent, no external deps) - RedisCacheProvider stub (Redis/Upstash for production) OSS users can enable caching with CACHE_PROVIDER env var. Supports memory, filesystem, redis, and upstash options. Use case: Cache published items for public API without DB queries Example: Publish item -> cache.set(slug, data) -> fast retrieval --- .env.example | 32 ++++++++ .gitignore | 3 + lib/hosted/cache/redis-provider.ts | 78 ++++++++++++++++++ lib/providers/README.md | 71 +++++++++++++++- lib/providers/cache/disabled.ts | 33 ++++++++ lib/providers/cache/filesystem.ts | 128 +++++++++++++++++++++++++++++ lib/providers/cache/memory.ts | 96 ++++++++++++++++++++++ lib/providers/cache/types.ts | 75 +++++++++++++++++ lib/providers/index.ts | 70 ++++++++++++++++ 9 files changed, 584 insertions(+), 2 deletions(-) create mode 100644 lib/hosted/cache/redis-provider.ts create mode 100644 lib/providers/cache/disabled.ts create mode 100644 lib/providers/cache/filesystem.ts create mode 100644 lib/providers/cache/memory.ts create mode 100644 lib/providers/cache/types.ts diff --git a/.env.example b/.env.example index bfb5fb1..370e49f 100644 --- a/.env.example +++ b/.env.example @@ -45,3 +45,35 @@ SUPABASE_SERVICE_ROLE_KEY=your-service-role-key # POSTHOG_HOST=https://app.posthog.com # NEXT_PUBLIC_POSTHOG_KEY=your-posthog-public-key # NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com + +# ===================================================================== +# CACHE (Optional - available for both OSS and Hosted) +# ===================================================================== +# Content caching for fast queries (e.g., published items) +# Choose a cache provider or leave disabled + +# Options: disabled (default), memory, filesystem, redis, upstash +# CACHE_PROVIDER=disabled + +# Memory cache (simple, in-process, not persistent) +# Good for development or single-instance deployments +# CACHE_PROVIDER=memory + +# File system cache (persistent, no external dependencies) +# Good for single-instance deployments with disk access +# CACHE_PROVIDER=filesystem +# CACHE_FILESYSTEM_DIR=./.cache + +# Redis cache (distributed, persistent, recommended for production) +# Requires ioredis package: npm install ioredis +# CACHE_PROVIDER=redis +# REDIS_URL=redis://localhost:6379 + +# Upstash Redis (serverless-friendly, recommended for hosted) +# Requires @upstash/redis package: npm install @upstash/redis +# CACHE_PROVIDER=upstash +# UPSTASH_REDIS_URL=your-upstash-redis-url +# UPSTASH_REDIS_TOKEN=your-upstash-redis-token + +# Default cache TTL in seconds (default: 3600 = 1 hour) +# CACHE_DEFAULT_TTL=3600 diff --git a/.gitignore b/.gitignore index 39223f7..77c2845 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ next-env.d.ts .vscode notes + +# cache +.cache diff --git a/lib/hosted/cache/redis-provider.ts b/lib/hosted/cache/redis-provider.ts new file mode 100644 index 0000000..883ee98 --- /dev/null +++ b/lib/hosted/cache/redis-provider.ts @@ -0,0 +1,78 @@ +import type { CacheProvider, CacheKey } from '@/lib/providers/cache/types' + +/** + * Redis Cache Provider (Hosted) + * + * Uses Redis/Upstash/Vercel KV for fast, distributed caching. + * Suitable for multi-instance deployments and serverless environments. + * + * Install: npm install @upstash/redis + * Or: npm install ioredis + */ +export class RedisCacheProvider implements CacheProvider { + private redis: any // Redis client (Upstash or ioredis) + private defaultTtl: number + + constructor(config: { url: string; token?: string; defaultTtl?: number }) { + this.defaultTtl = config.defaultTtl || 3600 // 1 hour default + + // Initialize Redis client based on available packages + if (config.token) { + // Upstash Redis (serverless-friendly) + // const { Redis } = require('@upstash/redis') + // this.redis = new Redis({ url: config.url, token: config.token }) + throw new Error('Upstash Redis not yet implemented. Install @upstash/redis and uncomment.') + } else { + // Standard Redis (ioredis) + // const Redis = require('ioredis') + // this.redis = new Redis(config.url) + throw new Error('Redis not yet implemented. Install ioredis and uncomment.') + } + } + + async get(key: CacheKey): Promise { + const value = await this.redis.get(key) + + if (!value) { + return null + } + + try { + return JSON.parse(value) as T + } catch { + return value as T + } + } + + async set(key: CacheKey, value: T, ttl?: number): Promise { + const serialized = JSON.stringify(value) + const expiresIn = ttl || this.defaultTtl + + if (expiresIn > 0) { + await this.redis.setex(key, expiresIn, serialized) + } else { + await this.redis.set(key, serialized) + } + } + + async delete(key: CacheKey): Promise { + await this.redis.del(key) + } + + async deletePattern(pattern: string): Promise { + // Scan for keys matching pattern + const keys = await this.redis.keys(pattern) + + if (keys.length > 0) { + await this.redis.del(...keys) + } + } + + isEnabled(): boolean { + return true + } + + async clear(): Promise { + await this.redis.flushdb() + } +} diff --git a/lib/providers/README.md b/lib/providers/README.md index 19a3d6a..66a0a6e 100644 --- a/lib/providers/README.md +++ b/lib/providers/README.md @@ -89,6 +89,66 @@ const info = await storage.getStorageInfo(projectId) const canUpload = await storage.canUpload(projectId, fileSizeBytes) ``` +#### 5. Cache Provider (Opt-in for Both Modes) + +Caches published content for fast queries. + +**OSS**: Disabled by default, can opt-in to memory/filesystem/Redis +**Hosted**: Redis/Upstash for distributed caching + +Unlike other providers, cache is **opt-in for both OSS and hosted**. OSS users can enable caching by setting environment variables. + +```typescript +import { getCacheProvider } from '@/lib/providers' + +const cache = getCacheProvider() + +// Cache a published item +await cache.set(`item:${slug}`, itemData, 3600) // 1 hour TTL + +// Retrieve from cache +const cachedItem = await cache.get(`item:${slug}`) + +// Invalidate cache +await cache.delete(`item:${slug}`) + +// Invalidate pattern (e.g., all blog posts) +await cache.deletePattern('collection:blog:*') +``` + +**Cache Options:** + +| Provider | OSS | Hosted | Persistent | Distributed | External Deps | +|----------|-----|--------|------------|-------------|---------------| +| `disabled` | ✓ (default) | - | - | - | None | +| `memory` | ✓ | ✓ | ❌ | ❌ | None | +| `filesystem` | ✓ | - | ✓ | ❌ | None | +| `redis` | ✓ | ✓ | ✓ | ✓ | ioredis | +| `upstash` | ✓ | ✓ (recommended) | ✓ | ✓ | @upstash/redis | + +**Configuration:** + +```bash +# Disabled (default) +CACHE_PROVIDER=disabled + +# Memory cache (good for dev) +CACHE_PROVIDER=memory + +# File system cache (good for single-instance) +CACHE_PROVIDER=filesystem +CACHE_FILESYSTEM_DIR=./.cache + +# Redis cache (good for production) +CACHE_PROVIDER=redis +REDIS_URL=redis://localhost:6379 + +# Upstash (serverless-friendly) +CACHE_PROVIDER=upstash +UPSTASH_REDIS_URL=your-url +UPSTASH_REDIS_TOKEN=your-token +``` + ## Directory Structure ``` @@ -105,6 +165,11 @@ const canUpload = await storage.canUpload(projectId, fileSizeBytes) ├── storage/ │ ├── types.ts # StorageProvider interface │ └── noop.ts # OSS: No limits +├── cache/ +│ ├── types.ts # CacheProvider interface +│ ├── disabled.ts # OSS default: No caching +│ ├── memory.ts # Opt-in: In-memory cache +│ └── filesystem.ts # Opt-in: File system cache ├── index.ts # Provider registry ├── features.ts # Client-side feature flags └── README.md # This file @@ -118,8 +183,10 @@ const canUpload = await storage.canUpload(projectId, fileSizeBytes) │ └── stripe-provider.ts # Stripe integration ├── analytics/ │ └── posthog-provider.ts # PostHog integration -└── storage/ - └── storage-provider.ts # Storage tracking & limits +├── storage/ +│ └── storage-provider.ts # Storage tracking & limits +└── cache/ + └── redis-provider.ts # Redis/Upstash cache ``` ## Database Schema diff --git a/lib/providers/cache/disabled.ts b/lib/providers/cache/disabled.ts new file mode 100644 index 0000000..e1c5b2c --- /dev/null +++ b/lib/providers/cache/disabled.ts @@ -0,0 +1,33 @@ +import type { CacheProvider, CacheKey } from './types' + +/** + * Disabled Cache Provider (Default for OSS) + * + * This provider does nothing - caching is disabled. + * OSS users can opt-in to caching by configuring a cache provider. + */ +export class DisabledCacheProvider implements CacheProvider { + async get(_key: CacheKey): Promise { + return null + } + + async set(_key: CacheKey, _value: T, _ttl?: number): Promise { + // No-op + } + + async delete(_key: CacheKey): Promise { + // No-op + } + + async deletePattern(_pattern: string): Promise { + // No-op + } + + isEnabled(): boolean { + return false + } + + async clear(): Promise { + // No-op + } +} diff --git a/lib/providers/cache/filesystem.ts b/lib/providers/cache/filesystem.ts new file mode 100644 index 0000000..5485310 --- /dev/null +++ b/lib/providers/cache/filesystem.ts @@ -0,0 +1,128 @@ +import type { CacheProvider, CacheKey, CacheEntry } from './types' +import { promises as fs } from 'fs' +import path from 'path' + +/** + * File System Cache Provider (OSS Opt-in) + * + * Stores cache entries as JSON files on disk. + * Persistent across restarts, no external dependencies. + * Good for single-instance deployments with disk access. + * + * WARNING: Not suitable for serverless environments or multi-instance deployments. + */ +export class FileSystemCacheProvider implements CacheProvider { + private cacheDir: string + + constructor(cacheDir: string = './.cache') { + this.cacheDir = path.resolve(cacheDir) + } + + async get(key: CacheKey): Promise { + try { + const filePath = this.getFilePath(key) + const data = await fs.readFile(filePath, 'utf-8') + const entry: CacheEntry = JSON.parse(data) + + // Check if expired + if (entry.expiresAt && entry.expiresAt < Date.now()) { + await this.delete(key) + return null + } + + return entry.value + } catch (err) { + // File doesn't exist or read error + return null + } + } + + async set(key: CacheKey, value: T, ttl?: number): Promise { + const entry: CacheEntry = { + value, + expiresAt: ttl ? Date.now() + ttl * 1000 : undefined, + } + + const filePath = this.getFilePath(key) + + // Ensure directory exists + await fs.mkdir(path.dirname(filePath), { recursive: true }) + + // Write to temp file then rename (atomic operation) + const tempPath = `${filePath}.tmp` + await fs.writeFile(tempPath, JSON.stringify(entry), 'utf-8') + await fs.rename(tempPath, filePath) + } + + async delete(key: CacheKey): Promise { + try { + const filePath = this.getFilePath(key) + await fs.unlink(filePath) + } catch (err) { + // File doesn't exist - that's fine + } + } + + async deletePattern(pattern: string): Promise { + // Convert glob pattern to regex + const regex = new RegExp('^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$') + + try { + await this.walkDir(this.cacheDir, async (filePath: string) => { + const relativePath = path.relative(this.cacheDir, filePath) + const key = this.filePathToKey(relativePath) + + if (regex.test(key)) { + await fs.unlink(filePath) + } + }) + } catch (err) { + // Directory might not exist + } + } + + isEnabled(): boolean { + return true + } + + async clear(): Promise { + try { + await fs.rm(this.cacheDir, { recursive: true, force: true }) + } catch (err) { + // Directory might not exist + } + } + + /** + * Convert cache key to file path + */ + private getFilePath(key: CacheKey): string { + // Replace special characters to make it filesystem-safe + const safeName = key.replace(/[^a-zA-Z0-9-_:]/g, '_') + return path.join(this.cacheDir, `${safeName}.json`) + } + + /** + * Convert file path back to cache key + */ + private filePathToKey(filePath: string): CacheKey { + return path.basename(filePath, '.json') + } + + /** + * Recursively walk directory + */ + private async walkDir(dir: string, callback: (filePath: string) => Promise): Promise { + const entries = await fs.readdir(dir, { withFileTypes: true }) + + for (const entry of entries) { + const fullPath = path.join(dir, entry.name) + + if (entry.isDirectory()) { + await this.walkDir(fullPath, callback) + } else if (entry.isFile() && entry.name.endsWith('.json')) { + await callback(fullPath) + } + } + } +} diff --git a/lib/providers/cache/memory.ts b/lib/providers/cache/memory.ts new file mode 100644 index 0000000..6f57454 --- /dev/null +++ b/lib/providers/cache/memory.ts @@ -0,0 +1,96 @@ +import type { CacheProvider, CacheKey, CacheEntry } from './types' + +/** + * Memory Cache Provider (OSS Opt-in) + * + * Simple in-memory cache using a Map. + * Fast but not persistent across restarts. + * Good for development or single-instance deployments. + * + * WARNING: Not suitable for multi-instance deployments (cache will be per-instance). + */ +export class MemoryCacheProvider implements CacheProvider { + private cache: Map = new Map() + private cleanupInterval: NodeJS.Timeout | null = null + + constructor() { + // Clean up expired entries every 60 seconds + this.cleanupInterval = setInterval(() => { + this.cleanupExpired() + }, 60000) + } + + async get(key: CacheKey): Promise { + const entry = this.cache.get(key) + + if (!entry) { + return null + } + + // Check if expired + if (entry.expiresAt && entry.expiresAt < Date.now()) { + this.cache.delete(key) + return null + } + + return entry.value as T + } + + async set(key: CacheKey, value: T, ttl?: number): Promise { + const entry: CacheEntry = { + value, + expiresAt: ttl ? Date.now() + ttl * 1000 : undefined, + } + + this.cache.set(key, entry) + } + + async delete(key: CacheKey): Promise { + this.cache.delete(key) + } + + async deletePattern(pattern: string): Promise { + // Convert glob pattern to regex + const regex = new RegExp('^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$') + + const keysToDelete: CacheKey[] = [] + for (const key of this.cache.keys()) { + if (regex.test(key)) { + keysToDelete.push(key) + } + } + + keysToDelete.forEach((key) => this.cache.delete(key)) + } + + isEnabled(): boolean { + return true + } + + async clear(): Promise { + this.cache.clear() + } + + private cleanupExpired(): void { + const now = Date.now() + const keysToDelete: CacheKey[] = [] + + for (const [key, entry] of this.cache.entries()) { + if (entry.expiresAt && entry.expiresAt < now) { + keysToDelete.push(key) + } + } + + keysToDelete.forEach((key) => this.cache.delete(key)) + } + + /** + * Clean up interval on shutdown + */ + destroy(): void { + if (this.cleanupInterval) { + clearInterval(this.cleanupInterval) + this.cleanupInterval = null + } + } +} diff --git a/lib/providers/cache/types.ts b/lib/providers/cache/types.ts new file mode 100644 index 0000000..e87dc88 --- /dev/null +++ b/lib/providers/cache/types.ts @@ -0,0 +1,75 @@ +/** + * Cache provider types + * + * Used for caching published content for fast queries without hitting the database. + * Example: When an item is published, cache the serialized version for public API access. + */ + +export type CacheKey = string + +export interface CacheEntry { + value: T + expiresAt?: number // Unix timestamp in milliseconds +} + +/** + * Cache provider interface + */ +export interface CacheProvider { + /** + * Get a value from cache + * Returns null if not found or expired + */ + get(key: CacheKey): Promise + + /** + * Set a value in cache + * @param key Cache key + * @param value Value to cache + * @param ttl Time to live in seconds (optional) + */ + set(key: CacheKey, value: T, ttl?: number): Promise + + /** + * Delete a value from cache + */ + delete(key: CacheKey): Promise + + /** + * Delete multiple keys matching a pattern + * Example: deletePattern('collection:blog:*') + */ + deletePattern(pattern: string): Promise + + /** + * Check if cache is enabled/available + */ + isEnabled(): boolean + + /** + * Clear all cache entries (use with caution) + */ + clear(): Promise +} + +/** + * Cache configuration + */ +export interface CacheConfig { + enabled: boolean + provider: 'disabled' | 'memory' | 'filesystem' | 'redis' | 'upstash' | 'vercel-kv' + + // Redis/Upstash configuration + redis?: { + url: string + token?: string + } + + // File system configuration + filesystem?: { + directory: string + } + + // Default TTL in seconds + defaultTtl?: number +} diff --git a/lib/providers/index.ts b/lib/providers/index.ts index 415567e..b67af61 100644 --- a/lib/providers/index.ts +++ b/lib/providers/index.ts @@ -2,18 +2,23 @@ import type { ContextProvider } from './context/types' import type { BillingProvider } from './billing/types' import type { AnalyticsProvider } from './analytics/types' import type { StorageProvider } from './storage/types' +import type { CacheProvider } from './cache/types' // OSS implementations import { UserContextProvider } from './context/user-context' import { NoOpBillingProvider } from './billing/noop' import { NoOpAnalyticsProvider } from './analytics/noop' import { NoOpStorageProvider } from './storage/noop' +import { DisabledCacheProvider } from './cache/disabled' +import { MemoryCacheProvider } from './cache/memory' +import { FileSystemCacheProvider } from './cache/filesystem' // Global provider instances let contextProvider: ContextProvider | null = null let billingProvider: BillingProvider | null = null let analyticsProvider: AnalyticsProvider | null = null let storageProvider: StorageProvider | null = null +let cacheProvider: CacheProvider | null = null /** * Check if running in hosted mode @@ -53,6 +58,9 @@ export function initializeProviders() { } else { initializeOSSProviders() } + + // Initialize cache provider (opt-in for both OSS and hosted) + initializeCacheProvider() } /** @@ -69,6 +77,57 @@ function initializeOSSProviders() { console.log(' - No billing or limits') } +/** + * Initialize cache provider based on configuration + * Cache is opt-in for both OSS and hosted modes + */ +function initializeCacheProvider() { + const cacheType = process.env.CACHE_PROVIDER || 'disabled' + + switch (cacheType) { + case 'memory': + cacheProvider = new MemoryCacheProvider() + console.log(' - Cache: memory (in-process)') + break + + case 'filesystem': + const cacheDir = process.env.CACHE_FILESYSTEM_DIR || './.cache' + cacheProvider = new FileSystemCacheProvider(cacheDir) + console.log(` - Cache: filesystem (${cacheDir})`) + break + + case 'redis': + case 'upstash': + try { + const { RedisCacheProvider } = require('@/lib/hosted/cache/redis-provider') + const redisUrl = process.env.REDIS_URL || process.env.UPSTASH_REDIS_URL + const redisToken = process.env.UPSTASH_REDIS_TOKEN + + if (!redisUrl) { + throw new Error('REDIS_URL or UPSTASH_REDIS_URL is required') + } + + cacheProvider = new RedisCacheProvider({ + url: redisUrl, + token: redisToken, + defaultTtl: parseInt(process.env.CACHE_DEFAULT_TTL || '3600'), + }) + console.log(` - Cache: ${cacheType}`) + } catch (err) { + console.error('Failed to initialize Redis cache:', err) + console.log(' - Cache: disabled (fallback)') + cacheProvider = new DisabledCacheProvider() + } + break + + case 'disabled': + default: + cacheProvider = new DisabledCacheProvider() + console.log(' - Cache: disabled') + break + } +} + /** * Get the context provider instance */ @@ -109,8 +168,19 @@ export function getStorageProvider(): StorageProvider { return storageProvider! } +/** + * Get the cache provider instance + */ +export function getCacheProvider(): CacheProvider { + if (!cacheProvider) { + initializeProviders() + } + return cacheProvider! +} + // Re-export types for convenience export type { ContextProvider, AppContext } from './context/types' export type { BillingProvider, Subscription } from './billing/types' export type { AnalyticsProvider, AnalyticsEvent } from './analytics/types' export type { StorageProvider, StorageInfo } from './storage/types' +export type { CacheProvider, CacheKey } from './cache/types' From f05e441c57e1e4afbf280c3fab811ac4e4112a01 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 12 Nov 2025 06:33:38 +0000 Subject: [PATCH 4/5] Refactor cache to use object storage (remove Redis) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace Redis/memory caching with object storage approach: User → Next.js API → Object Storage Cache → DB (if miss) Benefits: - App is the gateway (control URLs, auth, metrics) - Cache is transparent to users - CDN accelerates storage reads - Cheaper than Redis for static content Providers: - Supabase Storage (FREE for OSS, uses existing instance) - Cloudflare R2 ($0.015/GB, FREE egress, production) - Vercel Blob (simple, Vercel-integrated) - File System (dev/single-instance) Removed: - Redis/Upstash providers (wrong pattern for CMS caching) - Memory provider (not persistent) This architecture is better for a CMS: - Published content cached once, read many times - No need for in-memory cache or Redis - Object storage + CDN is cheaper and simpler - App maintains full control over access --- .env.example | 55 ++-- lib/hosted/cache/r2-provider.ts | 193 +++++++++++++ lib/hosted/cache/redis-provider.ts | 78 ------ lib/hosted/cache/vercel-blob-provider.ts | 148 ++++++++++ lib/providers/cache/README.md | 333 +++++++++++++++++++++++ lib/providers/cache/filesystem.ts | 31 +-- lib/providers/cache/memory.ts | 96 ------- lib/providers/cache/supabase-storage.ts | 151 ++++++++++ lib/providers/cache/types.ts | 55 ++-- lib/providers/index.ts | 59 ++-- 10 files changed, 928 insertions(+), 271 deletions(-) create mode 100644 lib/hosted/cache/r2-provider.ts delete mode 100644 lib/hosted/cache/redis-provider.ts create mode 100644 lib/hosted/cache/vercel-blob-provider.ts create mode 100644 lib/providers/cache/README.md delete mode 100644 lib/providers/cache/memory.ts create mode 100644 lib/providers/cache/supabase-storage.ts diff --git a/.env.example b/.env.example index 370e49f..9543e1b 100644 --- a/.env.example +++ b/.env.example @@ -49,31 +49,40 @@ SUPABASE_SERVICE_ROLE_KEY=your-service-role-key # ===================================================================== # CACHE (Optional - available for both OSS and Hosted) # ===================================================================== -# Content caching for fast queries (e.g., published items) -# Choose a cache provider or leave disabled - -# Options: disabled (default), memory, filesystem, redis, upstash +# Content caching using object storage (R2, Vercel Blob, Supabase Storage) +# +# Architecture: +# User → Next.js API → Object Storage Cache → DB (if miss) +# +# Your app is the gateway - users never access storage directly. +# This gives you control over URLs, metrics, rate limiting, auth. +# CDN accelerates the object storage reads. + +# Options: disabled (default), filesystem, supabase-storage, r2, vercel-blob # CACHE_PROVIDER=disabled -# Memory cache (simple, in-process, not persistent) -# Good for development or single-instance deployments -# CACHE_PROVIDER=memory - -# File system cache (persistent, no external dependencies) -# Good for single-instance deployments with disk access +# File system cache (local disk, no external dependencies) +# Good for single-instance OSS deployments # CACHE_PROVIDER=filesystem # CACHE_FILESYSTEM_DIR=./.cache -# Redis cache (distributed, persistent, recommended for production) -# Requires ioredis package: npm install ioredis -# CACHE_PROVIDER=redis -# REDIS_URL=redis://localhost:6379 - -# Upstash Redis (serverless-friendly, recommended for hosted) -# Requires @upstash/redis package: npm install @upstash/redis -# CACHE_PROVIDER=upstash -# UPSTASH_REDIS_URL=your-upstash-redis-url -# UPSTASH_REDIS_TOKEN=your-upstash-redis-token - -# Default cache TTL in seconds (default: 3600 = 1 hour) -# CACHE_DEFAULT_TTL=3600 +# Supabase Storage cache (uses your existing Supabase instance) +# FREE for OSS users, persistent, globally distributed via CDN +# Requires: 'cache' bucket to be created (public, no RLS) +# CACHE_PROVIDER=supabase-storage +# CACHE_STORAGE_BUCKET=cache + +# Cloudflare R2 cache (cheap, fast, globally distributed) +# $0.015/GB storage, FREE egress - recommended for production/hosted +# Requires: @aws-sdk/client-s3 package (npm install @aws-sdk/client-s3) +# CACHE_PROVIDER=r2 +# R2_ACCOUNT_ID=your-cloudflare-account-id +# R2_ACCESS_KEY_ID=your-r2-access-key-id +# R2_SECRET_ACCESS_KEY=your-r2-secret-access-key +# R2_BUCKET_NAME=cache +# R2_CDN_URL=https://your-custom-domain.com (optional) + +# Vercel Blob cache (simple, integrated with Vercel deployments) +# Requires: @vercel/blob package (npm install @vercel/blob) +# CACHE_PROVIDER=vercel-blob +# BLOB_READ_WRITE_TOKEN=your-vercel-blob-token (set by Vercel) diff --git a/lib/hosted/cache/r2-provider.ts b/lib/hosted/cache/r2-provider.ts new file mode 100644 index 0000000..5ee00e7 --- /dev/null +++ b/lib/hosted/cache/r2-provider.ts @@ -0,0 +1,193 @@ +import type { CacheProvider, CacheKey } from '@/lib/providers/cache/types' + +/** + * Cloudflare R2 Cache Provider (Hosted) + * + * Stores cache as JSON blobs in Cloudflare R2. + * Cheap ($0.015/GB storage, FREE egress), fast, globally distributed. + * Perfect for production/hosted deployments. + * + * Requires: @cloudflare/workers-types or aws-sdk/client-s3 (S3-compatible API) + * + * Setup: + * 1. Create R2 bucket in Cloudflare dashboard + * 2. Get Access Key ID and Secret Access Key + * 3. Optional: Configure custom domain with CDN + */ +export class R2CacheProvider implements CacheProvider { + private s3Client: any + private bucketName: string + private cdnUrl?: string + + constructor(config: { + accountId: string + accessKeyId: string + secretAccessKey: string + bucketName: string + cdnUrl?: string // Optional: Custom domain for R2 bucket + }) { + this.bucketName = config.bucketName + this.cdnUrl = config.cdnUrl + + // Initialize S3-compatible client for R2 + // Requires: npm install @aws-sdk/client-s3 + // const { S3Client } = require('@aws-sdk/client-s3') + + // this.s3Client = new S3Client({ + // region: 'auto', + // endpoint: `https://${config.accountId}.r2.cloudflarestorage.com`, + // credentials: { + // accessKeyId: config.accessKeyId, + // secretAccessKey: config.secretAccessKey, + // }, + // }) + + throw new Error('R2 provider not yet implemented. Install @aws-sdk/client-s3 and uncomment.') + } + + async get(key: CacheKey): Promise { + try { + // const { GetObjectCommand } = require('@aws-sdk/client-s3') + // const filePath = this.getFilePath(key) + + // const command = new GetObjectCommand({ + // Bucket: this.bucketName, + // Key: filePath, + // }) + + // const response = await this.s3Client.send(command) + // const text = await response.Body.transformToString() + + // return JSON.parse(text) as T + + return null + } catch (err: any) { + if (err?.name === 'NoSuchKey') { + return null + } + console.error('R2 cache read error:', err) + return null + } + } + + async set(key: CacheKey, value: T): Promise { + try { + // const { PutObjectCommand } = require('@aws-sdk/client-s3') + // const filePath = this.getFilePath(key) + // const json = JSON.stringify(value) + + // const command = new PutObjectCommand({ + // Bucket: this.bucketName, + // Key: filePath, + // Body: json, + // ContentType: 'application/json', + // CacheControl: 'public, max-age=3600', // CDN cache for 1 hour + // }) + + // await this.s3Client.send(command) + } catch (err) { + console.error('R2 cache write error:', err) + throw err + } + } + + async delete(key: CacheKey): Promise { + try { + // const { DeleteObjectCommand } = require('@aws-sdk/client-s3') + // const filePath = this.getFilePath(key) + + // const command = new DeleteObjectCommand({ + // Bucket: this.bucketName, + // Key: filePath, + // }) + + // await this.s3Client.send(command) + } catch (err) { + console.error('R2 cache delete error:', err) + } + } + + async deletePattern(pattern: string): Promise { + try { + // const { ListObjectsV2Command, DeleteObjectsCommand } = require('@aws-sdk/client-s3') + + // // List all objects + // const listCommand = new ListObjectsV2Command({ + // Bucket: this.bucketName, + // }) + + // const response = await this.s3Client.send(listCommand) + // if (!response.Contents) return + + // // Convert glob pattern to regex + // const regex = new RegExp('^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$') + + // // Find matching keys + // const toDelete = response.Contents + // .map((obj: any) => this.fileNameToKey(obj.Key)) + // .filter((key: string) => regex.test(key)) + // .map((key: string) => ({ Key: this.getFilePath(key) })) + + // if (toDelete.length > 0) { + // const deleteCommand = new DeleteObjectsCommand({ + // Bucket: this.bucketName, + // Delete: { Objects: toDelete }, + // }) + + // await this.s3Client.send(deleteCommand) + // } + } catch (err) { + console.error('R2 cache pattern delete error:', err) + } + } + + isEnabled(): boolean { + return true + } + + async clear(): Promise { + try { + // const { ListObjectsV2Command, DeleteObjectsCommand } = require('@aws-sdk/client-s3') + + // const listCommand = new ListObjectsV2Command({ + // Bucket: this.bucketName, + // }) + + // const response = await this.s3Client.send(listCommand) + // if (!response.Contents || response.Contents.length === 0) return + + // const toDelete = response.Contents.map((obj: any) => ({ Key: obj.Key })) + + // const deleteCommand = new DeleteObjectsCommand({ + // Bucket: this.bucketName, + // Delete: { Objects: toDelete }, + // }) + + // await this.s3Client.send(deleteCommand) + } catch (err) { + console.error('R2 cache clear error:', err) + } + } + + getPublicUrl(key: CacheKey): string | null { + if (!this.cdnUrl) return null + + const filePath = this.getFilePath(key) + return `${this.cdnUrl}/${filePath}` + } + + /** + * Convert cache key to file path + */ + private getFilePath(key: CacheKey): string { + const safeName = key.replace(/[^a-zA-Z0-9-_:]/g, '_') + return `${safeName}.json` + } + + /** + * Convert file name back to cache key + */ + private fileNameToKey(fileName: string): CacheKey { + return fileName.replace(/\.json$/, '') + } +} diff --git a/lib/hosted/cache/redis-provider.ts b/lib/hosted/cache/redis-provider.ts deleted file mode 100644 index 883ee98..0000000 --- a/lib/hosted/cache/redis-provider.ts +++ /dev/null @@ -1,78 +0,0 @@ -import type { CacheProvider, CacheKey } from '@/lib/providers/cache/types' - -/** - * Redis Cache Provider (Hosted) - * - * Uses Redis/Upstash/Vercel KV for fast, distributed caching. - * Suitable for multi-instance deployments and serverless environments. - * - * Install: npm install @upstash/redis - * Or: npm install ioredis - */ -export class RedisCacheProvider implements CacheProvider { - private redis: any // Redis client (Upstash or ioredis) - private defaultTtl: number - - constructor(config: { url: string; token?: string; defaultTtl?: number }) { - this.defaultTtl = config.defaultTtl || 3600 // 1 hour default - - // Initialize Redis client based on available packages - if (config.token) { - // Upstash Redis (serverless-friendly) - // const { Redis } = require('@upstash/redis') - // this.redis = new Redis({ url: config.url, token: config.token }) - throw new Error('Upstash Redis not yet implemented. Install @upstash/redis and uncomment.') - } else { - // Standard Redis (ioredis) - // const Redis = require('ioredis') - // this.redis = new Redis(config.url) - throw new Error('Redis not yet implemented. Install ioredis and uncomment.') - } - } - - async get(key: CacheKey): Promise { - const value = await this.redis.get(key) - - if (!value) { - return null - } - - try { - return JSON.parse(value) as T - } catch { - return value as T - } - } - - async set(key: CacheKey, value: T, ttl?: number): Promise { - const serialized = JSON.stringify(value) - const expiresIn = ttl || this.defaultTtl - - if (expiresIn > 0) { - await this.redis.setex(key, expiresIn, serialized) - } else { - await this.redis.set(key, serialized) - } - } - - async delete(key: CacheKey): Promise { - await this.redis.del(key) - } - - async deletePattern(pattern: string): Promise { - // Scan for keys matching pattern - const keys = await this.redis.keys(pattern) - - if (keys.length > 0) { - await this.redis.del(...keys) - } - } - - isEnabled(): boolean { - return true - } - - async clear(): Promise { - await this.redis.flushdb() - } -} diff --git a/lib/hosted/cache/vercel-blob-provider.ts b/lib/hosted/cache/vercel-blob-provider.ts new file mode 100644 index 0000000..56d6c38 --- /dev/null +++ b/lib/hosted/cache/vercel-blob-provider.ts @@ -0,0 +1,148 @@ +import type { CacheProvider, CacheKey } from '@/lib/providers/cache/types' + +/** + * Vercel Blob Cache Provider (Hosted) + * + * Stores cache as JSON blobs in Vercel Blob Storage. + * Simple, integrated with Vercel deployments, globally distributed. + * + * Requires: @vercel/blob + * + * Setup: + * 1. Enable Blob Storage in Vercel dashboard + * 2. Set BLOB_READ_WRITE_TOKEN in environment variables + * 3. Blobs are automatically served via Vercel CDN + */ +export class VercelBlobCacheProvider implements CacheProvider { + private blob: any + + constructor() { + // Requires: npm install @vercel/blob + // const { put, del, list, head } = require('@vercel/blob') + // this.blob = { put, del, list, head } + + throw new Error('Vercel Blob provider not yet implemented. Install @vercel/blob and uncomment.') + } + + async get(key: CacheKey): Promise { + try { + // const { head } = this.blob + // const pathname = this.getFilePath(key) + + // // Check if blob exists + // const blobHead = await head(pathname) + // if (!blobHead) return null + + // // Fetch the blob + // const response = await fetch(blobHead.url) + // if (!response.ok) return null + + // const text = await response.text() + // return JSON.parse(text) as T + + return null + } catch (err) { + console.error('Vercel Blob cache read error:', err) + return null + } + } + + async set(key: CacheKey, value: T): Promise { + try { + // const { put } = this.blob + // const pathname = this.getFilePath(key) + // const json = JSON.stringify(value) + + // await put(pathname, json, { + // access: 'public', + // contentType: 'application/json', + // cacheControlMaxAge: 3600, // Cache in CDN for 1 hour + // }) + } catch (err) { + console.error('Vercel Blob cache write error:', err) + throw err + } + } + + async delete(key: CacheKey): Promise { + try { + // const { del, head } = this.blob + // const pathname = this.getFilePath(key) + + // // Get blob URL first + // const blobHead = await head(pathname) + // if (blobHead) { + // await del(blobHead.url) + // } + } catch (err) { + console.error('Vercel Blob cache delete error:', err) + } + } + + async deletePattern(pattern: string): Promise { + try { + // const { list, del } = this.blob + + // // List all blobs + // const { blobs } = await list() + + // // Convert glob pattern to regex + // const regex = new RegExp('^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$') + + // // Find matching blobs + // const toDelete = blobs + // .map((blob: any) => this.fileNameToKey(blob.pathname)) + // .filter((key: string) => regex.test(key)) + + // // Delete each blob + // for (const key of toDelete) { + // const pathname = this.getFilePath(key) + // const blobHead = await head(pathname) + // if (blobHead) { + // await del(blobHead.url) + // } + // } + } catch (err) { + console.error('Vercel Blob cache pattern delete error:', err) + } + } + + isEnabled(): boolean { + return true + } + + async clear(): Promise { + try { + // const { list, del } = this.blob + + // const { blobs } = await list() + + // for (const blob of blobs) { + // await del(blob.url) + // } + } catch (err) { + console.error('Vercel Blob cache clear error:', err) + } + } + + getPublicUrl(key: CacheKey): string | null { + // Vercel Blob URLs are dynamic and need to be fetched via head() + // Not recommended for direct access - use app as gateway + return null + } + + /** + * Convert cache key to file path + */ + private getFilePath(key: CacheKey): string { + const safeName = key.replace(/[^a-zA-Z0-9-_:]/g, '_') + return `cache/${safeName}.json` + } + + /** + * Convert file name back to cache key + */ + private fileNameToKey(pathname: string): CacheKey { + return pathname.replace(/^cache\//, '').replace(/\.json$/, '') + } +} diff --git a/lib/providers/cache/README.md b/lib/providers/cache/README.md new file mode 100644 index 0000000..b63df26 --- /dev/null +++ b/lib/providers/cache/README.md @@ -0,0 +1,333 @@ +# Cache Provider - Usage Guide + +## Architecture + +``` +User Request + ↓ +Next.js API Route + ↓ +Cache Provider (check cache) + ├─ Cache HIT → Return cached JSON + └─ Cache MISS → Query DB → Cache result → Return +``` + +**Key Points:** +- All requests go through your Next.js app (not direct CDN access) +- You control URLs, routing, authentication, rate limiting +- Cache is transparent to users +- CDN accelerates object storage reads + +## Usage Example + +### 1. Publishing Flow (Write to Cache) + +```typescript +// app/api/collections/[slug]/items/[itemSlug]/publish/route.ts +import { getCacheProvider } from '@/lib/providers' +import { getAnalyticsProvider } from '@/lib/providers' + +export async function POST( + req: Request, + { params }: { params: { slug: string; itemSlug: string } } +) { + const cache = getCacheProvider() + const analytics = getAnalyticsProvider() + + // 1. Update database + const { data: item } = await supabase + .from('items') + .update({ published_at: new Date().toISOString() }) + .eq('slug', params.itemSlug) + .select() + .single() + + // 2. Cache the published item + const cacheKey = `item:${params.slug}:${params.itemSlug}` + await cache.set(cacheKey, item) + + // 3. Track analytics + await analytics.track({ + event: 'item_published', + properties: { collection: params.slug, item: params.itemSlug }, + }) + + return Response.json({ success: true }) +} +``` + +### 2. Reading Flow (Read from Cache) + +```typescript +// app/api/public/collections/[slug]/items/[itemSlug]/route.ts +import { getCacheProvider } from '@/lib/providers' + +export async function GET( + req: Request, + { params }: { params: { slug: string; itemSlug: string } } +) { + const cache = getCacheProvider() + const cacheKey = `item:${params.slug}:${params.itemSlug}` + + // Try cache first + const cached = await cache.get(cacheKey) + if (cached) { + return Response.json(cached, { + headers: { + 'X-Cache': 'HIT', + 'Cache-Control': 'public, max-age=60', // Browser cache for 1 min + }, + }) + } + + // Cache miss - query database + const { data: item } = await supabase + .from('items') + .select('*') + .eq('slug', params.itemSlug) + .eq('published_at', 'not.null') // Only published items + .single() + + if (!item) { + return Response.json({ error: 'Not found' }, { status: 404 }) + } + + // Cache for next time + await cache.set(cacheKey, item) + + return Response.json(item, { + headers: { + 'X-Cache': 'MISS', + 'Cache-Control': 'public, max-age=60', + }, + }) +} +``` + +### 3. Invalidation Flow (Update/Unpublish) + +```typescript +// app/api/collections/[slug]/items/[itemSlug]/route.ts +import { getCacheProvider } from '@/lib/providers' + +export async function PATCH( + req: Request, + { params }: { params: { slug: string; itemSlug: string } } +) { + const cache = getCacheProvider() + const body = await req.json() + + // Update database + const { data: item } = await supabase + .from('items') + .update(body) + .eq('slug', params.itemSlug) + .select() + .single() + + // Invalidate cache + const cacheKey = `item:${params.slug}:${params.itemSlug}` + await cache.delete(cacheKey) + + // If still published, re-cache + if (item.published_at) { + await cache.set(cacheKey, item) + } + + return Response.json(item) +} +``` + +### 4. Bulk Invalidation (Collection Updates) + +```typescript +// app/api/collections/[slug]/route.ts +import { getCacheProvider } from '@/lib/providers' + +export async function PATCH( + req: Request, + { params }: { params: { slug: string } } +) { + const cache = getCacheProvider() + + // Update collection metadata + await supabase.from('collections').update(...).eq('slug', params.slug) + + // Invalidate all cached items in this collection + await cache.deletePattern(`item:${params.slug}:*`) + + return Response.json({ success: true }) +} +``` + +## Cache Providers + +### Supabase Storage (Free for OSS) + +```bash +CACHE_PROVIDER=supabase-storage +CACHE_STORAGE_BUCKET=cache +``` + +**Setup:** +1. Create a public bucket named 'cache' in Supabase dashboard +2. Disable RLS on the bucket (or set permissive policies) +3. Optionally configure CDN in Supabase settings + +**Pros:** +- FREE (included with Supabase) +- Globally distributed via CDN +- No extra dependencies + +**Cons:** +- Requires Supabase instance +- May have rate limits on free tier + +### Cloudflare R2 (Cheap for Production) + +```bash +CACHE_PROVIDER=r2 +R2_ACCOUNT_ID=your-account-id +R2_ACCESS_KEY_ID=your-access-key +R2_SECRET_ACCESS_KEY=your-secret +R2_BUCKET_NAME=cache +R2_CDN_URL=https://cache.yourdomain.com (optional) +``` + +**Setup:** +1. Install: `npm install @aws-sdk/client-s3` +2. Create R2 bucket in Cloudflare dashboard +3. Generate API tokens (Access Key ID + Secret) +4. Optional: Configure custom domain with CDN + +**Pros:** +- Cheapest ($0.015/GB storage, FREE egress) +- Fast globally (Cloudflare network) +- No rate limits + +**Cons:** +- Requires AWS SDK dependency +- Cloudflare account needed + +### Vercel Blob (Simple for Vercel) + +```bash +CACHE_PROVIDER=vercel-blob +BLOB_READ_WRITE_TOKEN=vercel_blob_... (auto-set by Vercel) +``` + +**Setup:** +1. Install: `npm install @vercel/blob` +2. Enable Blob Storage in Vercel dashboard +3. Token is automatically set in production + +**Pros:** +- Dead simple on Vercel +- Globally distributed +- No config needed + +**Cons:** +- Only works on Vercel +- More expensive than R2 +- Requires Vercel account + +### File System (Dev/Testing) + +```bash +CACHE_PROVIDER=filesystem +CACHE_FILESYSTEM_DIR=./.cache +``` + +**Pros:** +- No external dependencies +- Works anywhere +- Good for development + +**Cons:** +- Not shared across instances +- Lost on serverless deployments +- No CDN acceleration + +## Metrics & Monitoring + +Track cache performance: + +```typescript +import { getCacheProvider, getAnalyticsProvider } from '@/lib/providers' + +export async function GET(req: Request) { + const cache = getCacheProvider() + const analytics = getAnalyticsProvider() + + const cacheKey = 'item:...' + const cached = await cache.get(cacheKey) + + // Track cache hit/miss + await analytics.track({ + event: cached ? 'cache_hit' : 'cache_miss', + properties: { key: cacheKey }, + }) + + // ... rest of handler +} +``` + +## Best Practices + +1. **Cache published content only** - Don't cache draft/private content +2. **Use descriptive keys** - `item:blog:my-post`, not `abc123` +3. **Invalidate on updates** - Delete cache when content changes +4. **Set browser cache** - Add `Cache-Control` headers for CDN +5. **Monitor hit rate** - Track cache effectiveness with analytics +6. **Graceful degradation** - If cache fails, fall back to DB +7. **Use patterns for bulk ops** - `deletePattern('item:blog:*')` + +## Security + +**Important:** Cache is for PUBLIC content only. + +- Never cache authenticated/private data +- Never cache user-specific data +- Cache storage should be public (object storage) +- Your app enforces auth before serving cached data + +Example: + +```typescript +export async function GET(req: Request) { + // 1. Check authentication FIRST + const user = await getUser(req) + if (!user) { + return Response.json({ error: 'Unauthorized' }, { status: 401 }) + } + + // 2. Then check cache for PUBLIC data + const cache = getCacheProvider() + const cached = await cache.get('item:...') + + // Your app controls access, not the cache +} +``` + +## Troubleshooting + +### Cache not working? + +1. Check `cache.isEnabled()` returns true +2. Verify env vars are set correctly +3. Check bucket/storage exists and is accessible +4. Look for errors in console logs + +### High cache miss rate? + +1. Verify cache keys are consistent +2. Check if cache is being invalidated too often +3. Ensure cache writes are completing successfully +4. Monitor object storage for actual files + +### Slow cache reads? + +1. Enable CDN for your object storage +2. Consider geographic distribution +3. Check network latency to storage provider +4. May need to upgrade storage tier diff --git a/lib/providers/cache/filesystem.ts b/lib/providers/cache/filesystem.ts index 5485310..74010ac 100644 --- a/lib/providers/cache/filesystem.ts +++ b/lib/providers/cache/filesystem.ts @@ -1,15 +1,15 @@ -import type { CacheProvider, CacheKey, CacheEntry } from './types' +import type { CacheProvider, CacheKey } from './types' import { promises as fs } from 'fs' import path from 'path' /** - * File System Cache Provider (OSS Opt-in) + * File System Cache Provider (OSS) * - * Stores cache entries as JSON files on disk. - * Persistent across restarts, no external dependencies. - * Good for single-instance deployments with disk access. + * Stores cache as JSON files on disk. + * Persistent, no external dependencies. + * Good for single-instance OSS deployments. * - * WARNING: Not suitable for serverless environments or multi-instance deployments. + * WARNING: Not suitable for serverless or multi-instance deployments. */ export class FileSystemCacheProvider implements CacheProvider { private cacheDir: string @@ -22,27 +22,14 @@ export class FileSystemCacheProvider implements CacheProvider { try { const filePath = this.getFilePath(key) const data = await fs.readFile(filePath, 'utf-8') - const entry: CacheEntry = JSON.parse(data) - - // Check if expired - if (entry.expiresAt && entry.expiresAt < Date.now()) { - await this.delete(key) - return null - } - - return entry.value + return JSON.parse(data) as T } catch (err) { // File doesn't exist or read error return null } } - async set(key: CacheKey, value: T, ttl?: number): Promise { - const entry: CacheEntry = { - value, - expiresAt: ttl ? Date.now() + ttl * 1000 : undefined, - } - + async set(key: CacheKey, value: T): Promise { const filePath = this.getFilePath(key) // Ensure directory exists @@ -50,7 +37,7 @@ export class FileSystemCacheProvider implements CacheProvider { // Write to temp file then rename (atomic operation) const tempPath = `${filePath}.tmp` - await fs.writeFile(tempPath, JSON.stringify(entry), 'utf-8') + await fs.writeFile(tempPath, JSON.stringify(value), 'utf-8') await fs.rename(tempPath, filePath) } diff --git a/lib/providers/cache/memory.ts b/lib/providers/cache/memory.ts deleted file mode 100644 index 6f57454..0000000 --- a/lib/providers/cache/memory.ts +++ /dev/null @@ -1,96 +0,0 @@ -import type { CacheProvider, CacheKey, CacheEntry } from './types' - -/** - * Memory Cache Provider (OSS Opt-in) - * - * Simple in-memory cache using a Map. - * Fast but not persistent across restarts. - * Good for development or single-instance deployments. - * - * WARNING: Not suitable for multi-instance deployments (cache will be per-instance). - */ -export class MemoryCacheProvider implements CacheProvider { - private cache: Map = new Map() - private cleanupInterval: NodeJS.Timeout | null = null - - constructor() { - // Clean up expired entries every 60 seconds - this.cleanupInterval = setInterval(() => { - this.cleanupExpired() - }, 60000) - } - - async get(key: CacheKey): Promise { - const entry = this.cache.get(key) - - if (!entry) { - return null - } - - // Check if expired - if (entry.expiresAt && entry.expiresAt < Date.now()) { - this.cache.delete(key) - return null - } - - return entry.value as T - } - - async set(key: CacheKey, value: T, ttl?: number): Promise { - const entry: CacheEntry = { - value, - expiresAt: ttl ? Date.now() + ttl * 1000 : undefined, - } - - this.cache.set(key, entry) - } - - async delete(key: CacheKey): Promise { - this.cache.delete(key) - } - - async deletePattern(pattern: string): Promise { - // Convert glob pattern to regex - const regex = new RegExp('^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$') - - const keysToDelete: CacheKey[] = [] - for (const key of this.cache.keys()) { - if (regex.test(key)) { - keysToDelete.push(key) - } - } - - keysToDelete.forEach((key) => this.cache.delete(key)) - } - - isEnabled(): boolean { - return true - } - - async clear(): Promise { - this.cache.clear() - } - - private cleanupExpired(): void { - const now = Date.now() - const keysToDelete: CacheKey[] = [] - - for (const [key, entry] of this.cache.entries()) { - if (entry.expiresAt && entry.expiresAt < now) { - keysToDelete.push(key) - } - } - - keysToDelete.forEach((key) => this.cache.delete(key)) - } - - /** - * Clean up interval on shutdown - */ - destroy(): void { - if (this.cleanupInterval) { - clearInterval(this.cleanupInterval) - this.cleanupInterval = null - } - } -} diff --git a/lib/providers/cache/supabase-storage.ts b/lib/providers/cache/supabase-storage.ts new file mode 100644 index 0000000..b1ebbaa --- /dev/null +++ b/lib/providers/cache/supabase-storage.ts @@ -0,0 +1,151 @@ +import type { CacheProvider, CacheKey } from './types' +import { createClient } from '@/lib/supabase/server' + +/** + * Supabase Storage Cache Provider (OSS) + * + * Stores cache as JSON blobs in Supabase Storage. + * Free for OSS users (already have Supabase). + * Persistent, globally distributed via CDN. + * + * Storage bucket: 'cache' (public, with CDN) + * Path format: cache/{key}.json + * + * Your app fetches from storage, users never access directly. + */ +export class SupabaseStorageCacheProvider implements CacheProvider { + private bucketName: string + + constructor(bucketName: string = 'cache') { + this.bucketName = bucketName + } + + async get(key: CacheKey): Promise { + try { + const supabase = await createClient() + const filePath = this.getFilePath(key) + + const { data, error } = await supabase.storage + .from(this.bucketName) + .download(filePath) + + if (error || !data) { + return null + } + + const text = await data.text() + return JSON.parse(text) as T + } catch (err) { + console.error('Cache read error:', err) + return null + } + } + + async set(key: CacheKey, value: T): Promise { + try { + const supabase = await createClient() + const filePath = this.getFilePath(key) + const json = JSON.stringify(value) + + // Upload (upsert if exists) + const { error } = await supabase.storage + .from(this.bucketName) + .upload(filePath, json, { + contentType: 'application/json', + upsert: true, + cacheControl: '3600', // Cache in CDN for 1 hour + }) + + if (error) { + throw error + } + } catch (err) { + console.error('Cache write error:', err) + throw err + } + } + + async delete(key: CacheKey): Promise { + try { + const supabase = await createClient() + const filePath = this.getFilePath(key) + + await supabase.storage.from(this.bucketName).remove([filePath]) + } catch (err) { + console.error('Cache delete error:', err) + } + } + + async deletePattern(pattern: string): Promise { + try { + const supabase = await createClient() + + // List all files + const { data: files } = await supabase.storage.from(this.bucketName).list() + + if (!files) return + + // Convert glob pattern to regex + const regex = new RegExp('^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$') + + // Find matching files + const toDelete = files + .map((f) => this.fileNameToKey(f.name)) + .filter((key) => regex.test(key)) + .map((key) => this.getFilePath(key)) + + if (toDelete.length > 0) { + await supabase.storage.from(this.bucketName).remove(toDelete) + } + } catch (err) { + console.error('Cache pattern delete error:', err) + } + } + + isEnabled(): boolean { + return true + } + + async clear(): Promise { + try { + const supabase = await createClient() + + // List all files + const { data: files } = await supabase.storage.from(this.bucketName).list() + + if (!files || files.length === 0) return + + const filePaths = files.map((f) => f.name) + await supabase.storage.from(this.bucketName).remove(filePaths) + } catch (err) { + console.error('Cache clear error:', err) + } + } + + getPublicUrl(key: CacheKey): string | null { + // Note: This returns the CDN URL, but you probably won't use it directly + // Your app should be the gateway + const supabase = createClient() + const filePath = this.getFilePath(key) + + const { data } = supabase.storage.from(this.bucketName).getPublicUrl(filePath) + + return data?.publicUrl || null + } + + /** + * Convert cache key to file path + */ + private getFilePath(key: CacheKey): string { + // Replace special characters to make it filesystem-safe + const safeName = key.replace(/[^a-zA-Z0-9-_:]/g, '_') + return `${safeName}.json` + } + + /** + * Convert file name back to cache key + */ + private fileNameToKey(fileName: string): CacheKey { + return fileName.replace(/\.json$/, '') + } +} diff --git a/lib/providers/cache/types.ts b/lib/providers/cache/types.ts index e87dc88..b5f5511 100644 --- a/lib/providers/cache/types.ts +++ b/lib/providers/cache/types.ts @@ -1,34 +1,39 @@ /** * Cache provider types * - * Used for caching published content for fast queries without hitting the database. - * Example: When an item is published, cache the serialized version for public API access. + * Used for caching published content in object storage for fast queries without DB hits. + * + * Architecture: + * User → Next.js API → Object Storage Cache → DB (if miss) + * + * All requests go through your app (not direct CDN access): + * - Control URLs, routing, auth + * - Track metrics and analytics + * - Cache is transparent to users + * - CDN accelerates object storage reads */ export type CacheKey = string -export interface CacheEntry { - value: T - expiresAt?: number // Unix timestamp in milliseconds -} - /** * Cache provider interface + * + * Stores JSON blobs in object storage (R2, Vercel Blob, Supabase Storage) + * App fetches from cache, falls back to DB */ export interface CacheProvider { /** * Get a value from cache - * Returns null if not found or expired + * Returns null if not found */ get(key: CacheKey): Promise /** * Set a value in cache - * @param key Cache key - * @param value Value to cache - * @param ttl Time to live in seconds (optional) + * @param key Cache key (e.g., "collection:blog:my-post") + * @param value Value to cache (will be JSON stringified) */ - set(key: CacheKey, value: T, ttl?: number): Promise + set(key: CacheKey, value: T): Promise /** * Delete a value from cache @@ -50,26 +55,10 @@ export interface CacheProvider { * Clear all cache entries (use with caution) */ clear(): Promise -} -/** - * Cache configuration - */ -export interface CacheConfig { - enabled: boolean - provider: 'disabled' | 'memory' | 'filesystem' | 'redis' | 'upstash' | 'vercel-kv' - - // Redis/Upstash configuration - redis?: { - url: string - token?: string - } - - // File system configuration - filesystem?: { - directory: string - } - - // Default TTL in seconds - defaultTtl?: number + /** + * Get a public URL for direct access (optional) + * Most implementations won't use this - app is the gateway + */ + getPublicUrl?(key: CacheKey): string | null } diff --git a/lib/providers/index.ts b/lib/providers/index.ts index b67af61..f22d077 100644 --- a/lib/providers/index.ts +++ b/lib/providers/index.ts @@ -10,8 +10,8 @@ import { NoOpBillingProvider } from './billing/noop' import { NoOpAnalyticsProvider } from './analytics/noop' import { NoOpStorageProvider } from './storage/noop' import { DisabledCacheProvider } from './cache/disabled' -import { MemoryCacheProvider } from './cache/memory' import { FileSystemCacheProvider } from './cache/filesystem' +import { SupabaseStorageCacheProvider } from './cache/supabase-storage' // Global provider instances let contextProvider: ContextProvider | null = null @@ -80,41 +80,62 @@ function initializeOSSProviders() { /** * Initialize cache provider based on configuration * Cache is opt-in for both OSS and hosted modes + * + * Object storage options store JSON blobs in cloud storage. + * App fetches from cache, falls back to DB - users never access storage directly. */ function initializeCacheProvider() { const cacheType = process.env.CACHE_PROVIDER || 'disabled' switch (cacheType) { - case 'memory': - cacheProvider = new MemoryCacheProvider() - console.log(' - Cache: memory (in-process)') - break - case 'filesystem': const cacheDir = process.env.CACHE_FILESYSTEM_DIR || './.cache' cacheProvider = new FileSystemCacheProvider(cacheDir) console.log(` - Cache: filesystem (${cacheDir})`) break - case 'redis': - case 'upstash': + case 'supabase-storage': + const bucketName = process.env.CACHE_STORAGE_BUCKET || 'cache' + cacheProvider = new SupabaseStorageCacheProvider(bucketName) + console.log(` - Cache: Supabase Storage (bucket: ${bucketName})`) + break + + case 'r2': try { - const { RedisCacheProvider } = require('@/lib/hosted/cache/redis-provider') - const redisUrl = process.env.REDIS_URL || process.env.UPSTASH_REDIS_URL - const redisToken = process.env.UPSTASH_REDIS_TOKEN + const { R2CacheProvider } = require('@/lib/hosted/cache/r2-provider') + + const accountId = process.env.R2_ACCOUNT_ID + const accessKeyId = process.env.R2_ACCESS_KEY_ID + const secretAccessKey = process.env.R2_SECRET_ACCESS_KEY + const bucketName = process.env.R2_BUCKET_NAME || 'cache' + const cdnUrl = process.env.R2_CDN_URL - if (!redisUrl) { - throw new Error('REDIS_URL or UPSTASH_REDIS_URL is required') + if (!accountId || !accessKeyId || !secretAccessKey) { + throw new Error('R2_ACCOUNT_ID, R2_ACCESS_KEY_ID, and R2_SECRET_ACCESS_KEY are required') } - cacheProvider = new RedisCacheProvider({ - url: redisUrl, - token: redisToken, - defaultTtl: parseInt(process.env.CACHE_DEFAULT_TTL || '3600'), + cacheProvider = new R2CacheProvider({ + accountId, + accessKeyId, + secretAccessKey, + bucketName, + cdnUrl, }) - console.log(` - Cache: ${cacheType}`) + console.log(` - Cache: Cloudflare R2 (bucket: ${bucketName})`) + } catch (err) { + console.error('Failed to initialize R2 cache:', err) + console.log(' - Cache: disabled (fallback)') + cacheProvider = new DisabledCacheProvider() + } + break + + case 'vercel-blob': + try { + const { VercelBlobCacheProvider } = require('@/lib/hosted/cache/vercel-blob-provider') + cacheProvider = new VercelBlobCacheProvider() + console.log(' - Cache: Vercel Blob') } catch (err) { - console.error('Failed to initialize Redis cache:', err) + console.error('Failed to initialize Vercel Blob cache:', err) console.log(' - Cache: disabled (fallback)') cacheProvider = new DisabledCacheProvider() } From 0f94b0f4c4cef2313b08c9f031780edd31582526 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 12 Nov 2025 06:51:14 +0000 Subject: [PATCH 5/5] Add storage provider architecture (media + video) Creates provider pattern for media and video storage. Not yet integrated with existing code - just the architecture. Media Storage (images/files): - Supabase Storage provider (OSS default) - R2 provider (hosted option, stub) - Interface: uploadImage(), uploadFile(), delete() Video Storage (separate - different needs): - Disabled provider (default) - Cloudflare Stream provider (transcoding, HLS, thumbnails) - Bunny.net Stream provider (cheapest option) - Interface: uploadVideo(), getVideoInfo(), playback URLs Design decisions: - Separate media vs video (different requirements) - OSS default: Supabase Storage (already have it, free) - Hosted options: R2 for media, CF Stream/Bunny for video - Video is opt-in (most CMSes don't need it) Not yet implemented: - Provider registry initialization - Integration with existing upload code - Video upload UI - Usage tracking/limits --- .env.example | 49 +++++ lib/hosted/media/r2-provider.ts | 124 ++++++++++++ lib/hosted/video/bunny-provider.ts | 171 ++++++++++++++++ .../video/cloudflare-stream-provider.ts | 161 +++++++++++++++ lib/providers/STORAGE.md | 188 ++++++++++++++++++ lib/providers/media/supabase-storage.ts | 102 ++++++++++ lib/providers/media/types.ts | 83 ++++++++ lib/providers/video/disabled.ts | 35 ++++ lib/providers/video/types.ts | 109 ++++++++++ 9 files changed, 1022 insertions(+) create mode 100644 lib/hosted/media/r2-provider.ts create mode 100644 lib/hosted/video/bunny-provider.ts create mode 100644 lib/hosted/video/cloudflare-stream-provider.ts create mode 100644 lib/providers/STORAGE.md create mode 100644 lib/providers/media/supabase-storage.ts create mode 100644 lib/providers/media/types.ts create mode 100644 lib/providers/video/disabled.ts create mode 100644 lib/providers/video/types.ts diff --git a/.env.example b/.env.example index 9543e1b..6739dc3 100644 --- a/.env.example +++ b/.env.example @@ -86,3 +86,52 @@ SUPABASE_SERVICE_ROLE_KEY=your-service-role-key # Requires: @vercel/blob package (npm install @vercel/blob) # CACHE_PROVIDER=vercel-blob # BLOB_READ_WRITE_TOKEN=your-vercel-blob-token (set by Vercel) + +# ===================================================================== +# MEDIA STORAGE (Images & Files) +# ===================================================================== +# Storage for user-uploaded images and files +# Default: Supabase Storage (OSS) + +# Options: supabase-storage (default), r2 +# MEDIA_STORAGE_PROVIDER=supabase-storage + +# Supabase Storage (default for OSS) +# Uses your existing Supabase instance +# Free tier: 1GB storage, 2GB bandwidth/month +# MEDIA_STORAGE_PROVIDER=supabase-storage +# MEDIA_STORAGE_BUCKET=media + +# Cloudflare R2 (hosted option) +# Cheap ($0.015/GB storage, FREE egress) +# Requires: @aws-sdk/client-s3 +# MEDIA_STORAGE_PROVIDER=r2 +# MEDIA_R2_ACCOUNT_ID=your-cloudflare-account-id +# MEDIA_R2_ACCESS_KEY_ID=your-r2-access-key +# MEDIA_R2_SECRET_ACCESS_KEY=your-r2-secret +# MEDIA_R2_BUCKET_NAME=media +# MEDIA_R2_CDN_URL=https://media.yourdomain.com (optional) + +# ===================================================================== +# VIDEO STORAGE (Optional) +# ===================================================================== +# Video hosting with transcoding and adaptive streaming +# Disabled by default - enable if you need video support + +# Options: disabled (default), cloudflare-stream, bunny +# VIDEO_PROVIDER=disabled + +# Cloudflare Stream +# $5/1000 min stored, $1/1000 min delivered +# Good for small-medium video usage +# VIDEO_PROVIDER=cloudflare-stream +# CLOUDFLARE_STREAM_API_TOKEN=your-api-token +# CLOUDFLARE_STREAM_ACCOUNT_ID=your-account-id + +# Bunny.net Stream (cheapest!) +# $0.005/GB stored, $0.01/GB delivered +# Best value for money +# VIDEO_PROVIDER=bunny +# BUNNY_STREAM_API_KEY=your-api-key +# BUNNY_STREAM_LIBRARY_ID=your-library-id +# BUNNY_STREAM_CDN_HOSTNAME=your-cdn-hostname.b-cdn.net diff --git a/lib/hosted/media/r2-provider.ts b/lib/hosted/media/r2-provider.ts new file mode 100644 index 0000000..23d7d4e --- /dev/null +++ b/lib/hosted/media/r2-provider.ts @@ -0,0 +1,124 @@ +import type { MediaStorageProvider, UploadResult, UploadOptions, StorageInfo } from '@/lib/providers/media/types' + +/** + * Cloudflare R2 Media Provider (Hosted) + * + * Uses Cloudflare R2 for images and files. + * Cheap ($0.015/GB), free egress, fast globally. + * Good for production/hosted deployments. + * + * Requires: @aws-sdk/client-s3 + */ +export class R2MediaProvider implements MediaStorageProvider { + private s3Client: any + private bucketName: string + private cdnUrl?: string + + constructor(config: { + accountId: string + accessKeyId: string + secretAccessKey: string + bucketName: string + cdnUrl?: string + }) { + this.bucketName = config.bucketName + this.cdnUrl = config.cdnUrl + + // Requires: npm install @aws-sdk/client-s3 + // const { S3Client } = require('@aws-sdk/client-s3') + + // this.s3Client = new S3Client({ + // region: 'auto', + // endpoint: `https://${config.accountId}.r2.cloudflarestorage.com`, + // credentials: { + // accessKeyId: config.accessKeyId, + // secretAccessKey: config.secretAccessKey, + // }, + // }) + + throw new Error('R2 Media Provider not yet implemented. Install @aws-sdk/client-s3.') + } + + async uploadImage(file: File, options?: UploadOptions): Promise { + return this.upload(file, options) + } + + async uploadFile(file: File, options?: UploadOptions): Promise { + return this.upload(file, options) + } + + private async upload(file: File, options?: UploadOptions): Promise { + // const { PutObjectCommand } = require('@aws-sdk/client-s3') + + // Generate filename + // const timestamp = Date.now() + // const random = Math.random().toString(36).substring(7) + // const ext = file.name.split('.').pop() + // const filename = options?.filename || `${timestamp}-${random}.${ext}` + + // Build path + // const path = options?.path ? `${options.path}/${filename}` : filename + + // Upload + // const buffer = await file.arrayBuffer() + // const command = new PutObjectCommand({ + // Bucket: this.bucketName, + // Key: path, + // Body: new Uint8Array(buffer), + // ContentType: options?.contentType || file.type, + // CacheControl: 'public, max-age=31536000', // Cache for 1 year + // }) + + // await this.s3Client.send(command) + + // const url = this.cdnUrl + // ? `${this.cdnUrl}/${path}` + // : `https://${this.bucketName}.r2.cloudflarestorage.com/${path}` + + // return { + // url, + // size: file.size, + // contentType: file.type, + // id: path, + // cdnUrl: url, + // } + + throw new Error('Not implemented') + } + + async delete(urlOrId: string): Promise { + // const { DeleteObjectCommand } = require('@aws-sdk/client-s3') + + // Extract path from URL if needed + // let path = urlOrId + // if (urlOrId.startsWith('http')) { + // const url = new URL(urlOrId) + // path = url.pathname.substring(1) // Remove leading / + // } + + // const command = new DeleteObjectCommand({ + // Bucket: this.bucketName, + // Key: path, + // }) + + // await this.s3Client.send(command) + + throw new Error('Not implemented') + } + + async getStorageInfo(): Promise { + // Would need to list all objects and sum sizes + // Or use Cloudflare Analytics API + return { + usedBytes: 0, + limitBytes: null, // No limit on R2 + withinLimits: true, + usedGb: 0, + limitGb: null, + } + } + + isEnabled(): boolean { + return true + } +} diff --git a/lib/hosted/video/bunny-provider.ts b/lib/hosted/video/bunny-provider.ts new file mode 100644 index 0000000..121ba0a --- /dev/null +++ b/lib/hosted/video/bunny-provider.ts @@ -0,0 +1,171 @@ +import type { VideoStorageProvider, VideoUploadResult, VideoInfo, VideoUploadOptions, VideoStorageInfo } from '@/lib/providers/video/types' + +/** + * Bunny.net Stream Video Provider (Hosted) + * + * Uses Bunny.net Stream for video hosting. + * Pricing: $0.005/GB stored, $0.01/GB delivered (cheapest option!) + * Includes: Transcoding, adaptive streaming, thumbnails + * + * Requires: Bunny.net account with Stream enabled + */ +export class BunnyStreamProvider implements VideoStorageProvider { + private apiKey: string + private libraryId: string + private cdnHostname: string + + constructor(config: { apiKey: string; libraryId: string; cdnHostname: string }) { + this.apiKey = config.apiKey + this.libraryId = config.libraryId + this.cdnHostname = config.cdnHostname + } + + async uploadVideo(file: File, options?: VideoUploadOptions): Promise { + // Create video object first + const createResponse = await fetch( + `https://video.bunnycdn.com/library/${this.libraryId}/videos`, + { + method: 'POST', + headers: { + AccessKey: this.apiKey, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + title: options?.title || file.name, + }), + } + ) + + if (!createResponse.ok) { + throw new Error(`Failed to create video: ${createResponse.statusText}`) + } + + const createData = await createResponse.json() + const videoId = createData.guid + + // Upload file + const uploadResponse = await fetch( + `https://video.bunnycdn.com/library/${this.libraryId}/videos/${videoId}`, + { + method: 'PUT', + headers: { + AccessKey: this.apiKey, + }, + body: file, + } + ) + + if (!uploadResponse.ok) { + throw new Error(`Failed to upload video: ${uploadResponse.statusText}`) + } + + return { + id: videoId, + status: 'processing', + thumbnailUrl: `https://${this.cdnHostname}/${videoId}/thumbnail.jpg`, + size: file.size, + progress: 25, + } + } + + async getVideoInfo(videoId: string): Promise { + const response = await fetch( + `https://video.bunnycdn.com/library/${this.libraryId}/videos/${videoId}`, + { + headers: { + AccessKey: this.apiKey, + }, + } + ) + + if (!response.ok) { + throw new Error(`Failed to get video info: ${response.statusText}`) + } + + const video = await response.json() + + return { + id: video.guid, + status: this.mapStatus(video.status), + playbackUrl: + video.status === 4 ? `https://${this.cdnHostname}/${video.guid}/playlist.m3u8` : undefined, + thumbnailUrl: `https://${this.cdnHostname}/${video.guid}/thumbnail.jpg`, + duration: video.length, + createdAt: new Date(video.dateUploaded), + } + } + + async delete(videoId: string): Promise { + const response = await fetch( + `https://video.bunnycdn.com/library/${this.libraryId}/videos/${videoId}`, + { + method: 'DELETE', + headers: { + AccessKey: this.apiKey, + }, + } + ) + + if (!response.ok) { + throw new Error(`Failed to delete video: ${response.statusText}`) + } + } + + async getStorageInfo(): Promise { + // Would need to get statistics from Bunny API + // https://docs.bunny.net/reference/videolibrarypublic_getvideolibrary + const response = await fetch(`https://video.bunnycdn.com/library/${this.libraryId}/statistics`, { + headers: { + AccessKey: this.apiKey, + }, + }) + + if (!response.ok) { + return { + minutesStored: 0, + minutesDelivered: 0, + storageLimitMinutes: null, + deliveryLimitMinutes: null, + withinLimits: true, + } + } + + const stats = await response.json() + + return { + minutesStored: Math.round((stats.storageUsed || 0) / (1024 * 1024 * 1024)), // Rough estimate + minutesDelivered: Math.round((stats.bandwidth || 0) / (1024 * 1024 * 1024)), + storageLimitMinutes: null, + deliveryLimitMinutes: null, + withinLimits: true, + } + } + + isEnabled(): boolean { + return true + } + + private mapStatus(bunnyStatus: number): 'uploading' | 'processing' | 'ready' | 'error' { + // Bunny status codes: + // 0 = Queued + // 1 = Processing + // 2 = Encoding + // 3 = Finished + // 4 = Resolution finished + // 5 = Error + switch (bunnyStatus) { + case 0: + return 'uploading' + case 1: + case 2: + case 3: + return 'processing' + case 4: + return 'ready' + case 5: + return 'error' + default: + return 'uploading' + } + } +} diff --git a/lib/hosted/video/cloudflare-stream-provider.ts b/lib/hosted/video/cloudflare-stream-provider.ts new file mode 100644 index 0000000..8f2adde --- /dev/null +++ b/lib/hosted/video/cloudflare-stream-provider.ts @@ -0,0 +1,161 @@ +import type { VideoStorageProvider, VideoUploadResult, VideoInfo, VideoUploadOptions, VideoStorageInfo } from '@/lib/providers/video/types' + +/** + * Cloudflare Stream Video Provider (Hosted) + * + * Uses Cloudflare Stream for video hosting. + * Pricing: $5/1000 min stored, $1/1000 min delivered + * Includes: Transcoding, adaptive streaming, thumbnails + * + * Requires: Cloudflare account with Stream enabled + */ +export class CloudflareStreamProvider implements VideoStorageProvider { + private apiToken: string + private accountId: string + + constructor(config: { apiToken: string; accountId: string }) { + this.apiToken = config.apiToken + this.accountId = config.accountId + } + + async uploadVideo(file: File, options?: VideoUploadOptions): Promise { + // Upload to Cloudflare Stream + // https://developers.cloudflare.com/stream/uploading-videos/direct-creator-uploads/ + + const formData = new FormData() + formData.append('file', file) + + if (options?.title) { + formData.append('meta', JSON.stringify({ name: options.title })) + } + + if (options?.thumbnailTime) { + formData.append('thumbnailTimestampPct', String(options.thumbnailTime)) + } + + if (options?.webhookUrl) { + formData.append('webhookUrl', options.webhookUrl) + } + + const response = await fetch( + `https://api.cloudflare.com/client/v4/accounts/${this.accountId}/stream`, + { + method: 'POST', + headers: { + Authorization: `Bearer ${this.apiToken}`, + }, + body: formData, + } + ) + + if (!response.ok) { + const error = await response.json() + throw new Error(`Upload failed: ${error.errors?.[0]?.message || response.statusText}`) + } + + const data = await response.json() + const video = data.result + + return { + id: video.uid, + status: this.mapStatus(video.status?.state), + playbackUrl: video.status?.state === 'ready' ? this.getPlaybackUrl(video.uid) : undefined, + thumbnailUrl: video.thumbnail, + duration: video.duration, + size: file.size, + progress: this.getProgress(video.status?.state), + } + } + + async getVideoInfo(videoId: string): Promise { + const response = await fetch( + `https://api.cloudflare.com/client/v4/accounts/${this.accountId}/stream/${videoId}`, + { + headers: { + Authorization: `Bearer ${this.apiToken}`, + }, + } + ) + + if (!response.ok) { + throw new Error(`Failed to get video info: ${response.statusText}`) + } + + const data = await response.json() + const video = data.result + + return { + id: video.uid, + status: this.mapStatus(video.status?.state), + playbackUrl: video.status?.state === 'ready' ? this.getPlaybackUrl(video.uid) : undefined, + thumbnailUrl: video.thumbnail, + duration: video.duration, + createdAt: new Date(video.created), + } + } + + async delete(videoId: string): Promise { + const response = await fetch( + `https://api.cloudflare.com/client/v4/accounts/${this.accountId}/stream/${videoId}`, + { + method: 'DELETE', + headers: { + Authorization: `Bearer ${this.apiToken}`, + }, + } + ) + + if (!response.ok) { + throw new Error(`Failed to delete video: ${response.statusText}`) + } + } + + async getStorageInfo(): Promise { + // Cloudflare doesn't provide easy usage stats via API + // Would need to list all videos and calculate, or use Analytics API + return { + minutesStored: 0, + minutesDelivered: 0, + storageLimitMinutes: null, + deliveryLimitMinutes: null, + withinLimits: true, + } + } + + isEnabled(): boolean { + return true + } + + private getPlaybackUrl(videoId: string): string { + return `https://customer-${this.accountId}.cloudflarestream.com/${videoId}/manifest/video.m3u8` + } + + private mapStatus(cfStatus?: string): 'uploading' | 'processing' | 'ready' | 'error' { + switch (cfStatus) { + case 'queued': + case 'inprogress': + return 'processing' + case 'ready': + return 'ready' + case 'error': + return 'error' + default: + return 'uploading' + } + } + + private getProgress(cfStatus?: string): number { + switch (cfStatus) { + case 'queued': + return 25 + case 'inprogress': + return 50 + case 'ready': + return 100 + case 'error': + return 0 + default: + return 0 + } + } +} diff --git a/lib/providers/STORAGE.md b/lib/providers/STORAGE.md new file mode 100644 index 0000000..30442b5 --- /dev/null +++ b/lib/providers/STORAGE.md @@ -0,0 +1,188 @@ +# Storage Providers + +Architecture for media (images/files) and video storage. + +## Overview + +Two separate provider systems: + +1. **Media Storage** - Images, PDFs, documents, etc. +2. **Video Storage** - Video files with transcoding and streaming + +They're separate because video has fundamentally different requirements (transcoding, adaptive streaming, thumbnails). + +## Media Storage (Images & Files) + +### Providers + +**Supabase Storage** (OSS Default) +- Uses existing Supabase instance +- FREE tier: 1GB storage, 2GB bandwidth/month +- Good for: OSS users, small-medium sites +- Setup: Create 'media' bucket in Supabase dashboard + +**Cloudflare R2** (Hosted) +- $0.015/GB storage, FREE egress +- Best for: High-traffic hosted deployments +- Setup: Requires @aws-sdk/client-s3, Cloudflare account + +### Usage (Not Yet Implemented) + +```typescript +import { getMediaStorageProvider } from '@/lib/providers' + +const storage = getMediaStorageProvider() + +// Upload image +const result = await storage.uploadImage(file, { + path: 'avatars', + public: true, +}) +console.log(result.url) // Public URL + +// Upload file +const result = await storage.uploadFile(pdfFile, { + path: 'documents', +}) + +// Delete +await storage.delete(result.url) + +// Get usage +const info = await storage.getStorageInfo() +console.log(`${info.usedGb} GB used of ${info.limitGb} GB`) +``` + +## Video Storage + +### Providers + +**Disabled** (Default) +- Video support is opt-in +- Most CMSes don't need video + +**Cloudflare Stream** (Hosted) +- $5 per 1,000 minutes stored +- $1 per 1,000 minutes delivered +- Includes: Transcoding, thumbnails, adaptive streaming (HLS) +- Good for: Small-medium video usage + +**Bunny.net Stream** (Hosted - Cheapest!) +- $0.005/GB stored (~$0.15/hour of video) +- $0.01/GB delivered +- Same features as Cloudflare Stream +- Good for: Cost-conscious deployments, high traffic + +### Usage (Not Yet Implemented) + +```typescript +import { getVideoStorageProvider } from '@/lib/providers' + +const video = getVideoStorageProvider() + +// Upload video +const result = await video.uploadVideo(videoFile, { + title: 'My Video', + public: true, +}) +console.log(result.id) // Video ID +console.log(result.status) // 'processing' + +// Check status +const info = await video.getVideoInfo(result.id) +if (info.status === 'ready') { + console.log(info.playbackUrl) // HLS URL for player + console.log(info.thumbnailUrl) // Poster image +} + +// Delete +await video.delete(result.id) +``` + +## Implementation Status + +✅ **Created:** +- Provider interfaces and types +- Supabase Storage media provider +- R2 media provider (stub) +- Cloudflare Stream video provider +- Bunny.net Stream video provider +- Disabled video provider (default) + +❌ **Not Yet Implemented:** +- Integration with existing upload code +- Provider registry initialization +- Frontend UI for provider selection +- Migration from current Supabase Storage usage + +## Next Steps + +To integrate these providers: + +1. **Add to provider registry** (`lib/providers/index.ts`) + - Initialize media storage provider based on env vars + - Initialize video storage provider based on env vars + +2. **Update existing upload logic** to use providers + - Currently uses Supabase Storage directly + - Replace with `getMediaStorageProvider().uploadImage()` + +3. **Add video upload UI** (optional) + - New field type: "Video" + - Upload component with progress tracking + - Video player for previewing + +4. **Add usage tracking** (hosted) + - Track storage bytes per project + - Track video minutes per project + - Enforce limits based on plan + +## Cost Comparison + +### 100 hours of video storage + 10,000 views + +**Cloudflare Stream:** +- Storage: 100 hours × $5/1000 = $0.50 +- Delivery: 100 hours × 10,000 views = 1,000,000 min × $1/1000 = $1,000 +- **Total: $1,000.50/month** + +**Bunny.net Stream:** +- Storage: 100 hours × 3GB/hour = 300GB × $0.005 = $1.50 +- Delivery: 1,000,000 min × 1GB/10min = 100TB × $0.01/GB = $1,000 +- **Total: $1,001.50/month** + +(Both are similar - Bunny is slightly cheaper on storage, similar on delivery) + +### 10 hours of video storage + 1,000 views + +**Cloudflare Stream:** +- Storage: $0.05 +- Delivery: $10 +- **Total: $10.05/month** + +**Bunny.net Stream:** +- Storage: $0.15 +- Delivery: $10 +- **Total: $10.15/month** + +For your $10/mo hosted plan, you could support: +- ~10 hours of video per user +- ~1,000 views per month +- More if you impose limits or charge extra for video + +## Recommendations + +**For Now:** +- Keep Supabase Storage for images/files (already works) +- Add provider pattern but don't switch yet +- Disable video by default + +**For Hosted (When Ready):** +- Media: Switch to R2 (cheaper egress) +- Video: Start with Cloudflare Stream (simpler) +- Consider Bunny if costs grow + +**For Future:** +- Add video limits (e.g., 5 videos per user, 100 views/month) +- Charge extra for more video usage +- Or make video a premium feature only diff --git a/lib/providers/media/supabase-storage.ts b/lib/providers/media/supabase-storage.ts new file mode 100644 index 0000000..1840775 --- /dev/null +++ b/lib/providers/media/supabase-storage.ts @@ -0,0 +1,102 @@ +import type { MediaStorageProvider, UploadResult, UploadOptions, StorageInfo } from './types' +import { createClient } from '@/lib/supabase/server' + +/** + * Supabase Storage Media Provider (OSS Default) + * + * Uses Supabase Storage for images and files. + * Free tier: 1GB storage, 2GB bandwidth/month + * Good for OSS users who already have Supabase. + */ +export class SupabaseStorageMediaProvider implements MediaStorageProvider { + private bucketName: string + + constructor(bucketName: string = 'media') { + this.bucketName = bucketName + } + + async uploadImage(file: File, options?: UploadOptions): Promise { + return this.upload(file, options) + } + + async uploadFile(file: File, options?: UploadOptions): Promise { + return this.upload(file, options) + } + + private async upload(file: File, options?: UploadOptions): Promise { + const supabase = await createClient() + + // Generate filename + const timestamp = Date.now() + const random = Math.random().toString(36).substring(7) + const ext = file.name.split('.').pop() + const filename = options?.filename || `${timestamp}-${random}.${ext}` + + // Build path + const path = options?.path ? `${options.path}/${filename}` : filename + + // Upload + const { data, error } = await supabase.storage + .from(this.bucketName) + .upload(path, file, { + contentType: options?.contentType || file.type, + upsert: false, + }) + + if (error) { + throw new Error(`Upload failed: ${error.message}`) + } + + // Get public URL + const { data: urlData } = supabase.storage + .from(this.bucketName) + .getPublicUrl(data.path) + + return { + url: urlData.publicUrl, + size: file.size, + contentType: file.type, + id: data.path, + cdnUrl: urlData.publicUrl, // Supabase CDN URL + } + } + + async delete(urlOrId: string): Promise { + const supabase = await createClient() + + // Extract path from URL if it's a full URL + let path = urlOrId + if (urlOrId.startsWith('http')) { + const url = new URL(urlOrId) + const pathMatch = url.pathname.match(/\/storage\/v1\/object\/public\/[^/]+\/(.+)/) + if (pathMatch) { + path = pathMatch[1] + } + } + + const { error } = await supabase.storage + .from(this.bucketName) + .remove([path]) + + if (error) { + throw new Error(`Delete failed: ${error.message}`) + } + } + + async getStorageInfo(): Promise { + // Note: Supabase doesn't provide easy way to get bucket usage + // Would need to list all files and sum sizes, or use custom function + // For now, return placeholder + return { + usedBytes: 0, + limitBytes: 1024 * 1024 * 1024, // 1GB free tier + withinLimits: true, + usedGb: 0, + limitGb: 1, + } + } + + isEnabled(): boolean { + return true + } +} diff --git a/lib/providers/media/types.ts b/lib/providers/media/types.ts new file mode 100644 index 0000000..f650fd4 --- /dev/null +++ b/lib/providers/media/types.ts @@ -0,0 +1,83 @@ +/** + * Media storage provider types + * + * For images, files, and other static media (not video). + * Video has different requirements (transcoding, streaming) and uses VideoStorageProvider. + */ + +export interface UploadResult { + /** Public URL to access the uploaded file */ + url: string + + /** File size in bytes */ + size: number + + /** MIME type */ + contentType: string + + /** Optional: Storage provider's internal ID */ + id?: string + + /** Optional: CDN URL if different from storage URL */ + cdnUrl?: string +} + +export interface MediaStorageProvider { + /** + * Upload an image + * May perform optimization (compression, format conversion) + */ + uploadImage(file: File, options?: UploadOptions): Promise + + /** + * Upload a file (PDF, document, etc.) + */ + uploadFile(file: File, options?: UploadOptions): Promise + + /** + * Delete a file by URL or ID + */ + delete(urlOrId: string): Promise + + /** + * Get storage info (used bytes, limits, etc.) + */ + getStorageInfo(): Promise + + /** + * Check if provider is enabled + */ + isEnabled(): boolean +} + +export interface UploadOptions { + /** Directory/path prefix (e.g., "avatars", "posts/123") */ + path?: string + + /** Whether file is publicly accessible */ + public?: boolean + + /** Custom filename (otherwise generates one) */ + filename?: string + + /** Content type override */ + contentType?: string + + /** Max file size in bytes (provider may have lower limit) */ + maxSize?: number +} + +export interface StorageInfo { + /** Bytes used */ + usedBytes: number + + /** Storage limit in bytes (null = unlimited) */ + limitBytes: number | null + + /** Within limits? */ + withinLimits: boolean + + /** Human-readable values */ + usedGb: number + limitGb: number | null +} diff --git a/lib/providers/video/disabled.ts b/lib/providers/video/disabled.ts new file mode 100644 index 0000000..78e603f --- /dev/null +++ b/lib/providers/video/disabled.ts @@ -0,0 +1,35 @@ +import type { VideoStorageProvider, VideoUploadResult, VideoInfo, VideoUploadOptions, VideoStorageInfo } from './types' + +/** + * Disabled Video Provider (Default) + * + * Video storage is disabled by default. + * Enable by configuring a video provider (Cloudflare Stream, Bunny, Mux). + */ +export class DisabledVideoProvider implements VideoStorageProvider { + async uploadVideo(_file: File, _options?: VideoUploadOptions): Promise { + throw new Error('Video storage is not enabled. Configure VIDEO_PROVIDER environment variable.') + } + + async getVideoInfo(_videoId: string): Promise { + throw new Error('Video storage is not enabled.') + } + + async delete(_videoId: string): Promise { + throw new Error('Video storage is not enabled.') + } + + async getStorageInfo(): Promise { + return { + minutesStored: 0, + minutesDelivered: 0, + storageLimitMinutes: null, + deliveryLimitMinutes: null, + withinLimits: true, + } + } + + isEnabled(): boolean { + return false + } +} diff --git a/lib/providers/video/types.ts b/lib/providers/video/types.ts new file mode 100644 index 0000000..b27216a --- /dev/null +++ b/lib/providers/video/types.ts @@ -0,0 +1,109 @@ +/** + * Video storage provider types + * + * For video content that requires transcoding, adaptive streaming, and thumbnails. + * Separate from MediaStorageProvider because video has different requirements. + */ + +export type VideoStatus = 'uploading' | 'processing' | 'ready' | 'error' + +export interface VideoUploadResult { + /** Unique video ID from provider */ + id: string + + /** Video status */ + status: VideoStatus + + /** Playback URL (HLS/DASH) - available when status is 'ready' */ + playbackUrl?: string + + /** Thumbnail/poster image URL */ + thumbnailUrl?: string + + /** Duration in seconds (available after processing) */ + duration?: number + + /** Original file size in bytes */ + size: number + + /** Progress percentage (0-100) */ + progress?: number +} + +export interface VideoInfo { + id: string + status: VideoStatus + playbackUrl?: string + thumbnailUrl?: string + duration?: number + createdAt: Date + + /** Optional: Different quality variants */ + qualities?: Array<{ + quality: string // '1080p', '720p', '480p', etc. + url: string + }> +} + +export interface VideoStorageProvider { + /** + * Upload a video file + * Returns immediately with 'uploading' or 'processing' status + * Use getVideoInfo() to poll for completion + */ + uploadVideo(file: File, options?: VideoUploadOptions): Promise + + /** + * Get video info and status + */ + getVideoInfo(videoId: string): Promise + + /** + * Delete a video + */ + delete(videoId: string): Promise + + /** + * Get storage/usage info + */ + getStorageInfo(): Promise + + /** + * Check if provider is enabled + */ + isEnabled(): boolean +} + +export interface VideoUploadOptions { + /** Video title/name */ + title?: string + + /** Whether video is publicly accessible */ + public?: boolean + + /** Custom thumbnail (otherwise auto-generated) */ + thumbnailTime?: number // Seconds into video for thumbnail + + /** Max file size in bytes */ + maxSize?: number + + /** Webhook URL for processing completion */ + webhookUrl?: string +} + +export interface VideoStorageInfo { + /** Total minutes of video stored */ + minutesStored: number + + /** Total minutes delivered (watched) this month */ + minutesDelivered: number + + /** Storage limit in minutes (null = unlimited) */ + storageLimitMinutes: number | null + + /** Delivery limit in minutes (null = unlimited) */ + deliveryLimitMinutes: number | null + + /** Within limits? */ + withinLimits: boolean +}