Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/app/server/src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ export const env = createEnv({
GROQ_API_KEY: z.string().optional(),
XAI_API_KEY: z.string().optional(),
OPENROUTER_API_KEY: z.string().optional(),
VERCEL_API_KEY: z.string().optional(),
TAVILY_API_KEY: z.string().optional(),
E2B_API_KEY: z.string().optional(),
GOOGLE_SERVICE_ACCOUNT_KEY_ENCODED: z.string().optional(),
Expand Down
6 changes: 6 additions & 0 deletions packages/app/server/src/providers/ProviderFactory.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import { OpenAIImageProvider } from './OpenAIImageProvider';
import { OpenAIResponsesProvider } from './OpenAIResponsesProvider';
import { OpenRouterProvider } from './OpenRouterProvider';
import { ProviderType } from './ProviderType';
import { VercelAIGatewayProvider } from './VercelAIGatewayProvider';
import { XAIProvider } from './XAIProvider';
import {
VertexAIProvider,
Expand Down Expand Up @@ -58,6 +59,9 @@ const createChatModelToProviderMapping = (): Record<string, ProviderType> => {
case 'Xai':
mapping[modelConfig.model_id] = ProviderType.XAI;
break;
case 'Vercel':
mapping[modelConfig.model_id] = ProviderType.VERCEL_AI_GATEWAY;
break;
// Add other providers as needed
default:
// Skip models with unsupported providers
Expand Down Expand Up @@ -192,6 +196,8 @@ export const getProvider = (
return new GroqProvider(stream, model);
case ProviderType.XAI:
return new XAIProvider(stream, model);
case ProviderType.VERCEL_AI_GATEWAY:
return new VercelAIGatewayProvider(stream, model);
default:
throw new Error(`Unknown provider type: ${type}`);
}
Expand Down
1 change: 1 addition & 0 deletions packages/app/server/src/providers/ProviderType.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ export enum ProviderType {
OPENAI_VIDEOS = 'OPENAI_VIDEOS',
GROQ = 'GROQ',
XAI = 'XAI',
VERCEL_AI_GATEWAY = 'VERCEL_AI_GATEWAY',
}
82 changes: 82 additions & 0 deletions packages/app/server/src/providers/VercelAIGatewayProvider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import { LlmTransactionMetadata, Transaction } from '../types';
import { getCostPerToken } from '../services/AccountingService';
import { BaseProvider } from './BaseProvider';
import { ProviderType } from './ProviderType';
import { CompletionStateBody, parseSSEGPTFormat } from './GPTProvider';
import logger from '../logger';
import { env } from '../env';

export class VercelAIGatewayProvider extends BaseProvider {
private readonly VERCEL_AI_GATEWAY_BASE_URL =
'https://ai-gateway.vercel.sh/v1';

getType(): ProviderType {
return ProviderType.VERCEL_AI_GATEWAY;
}

getBaseUrl(): string {
return this.VERCEL_AI_GATEWAY_BASE_URL;
}

getApiKey(): string | undefined {
return env.VERCEL_API_KEY;
}

override supportsStream(): boolean {
return true;
}

async handleBody(data: string): Promise<Transaction> {
try {
let prompt_tokens = 0;
let completion_tokens = 0;
let total_tokens = 0;
let providerId = 'null';

if (this.getIsStream()) {
const chunks = parseSSEGPTFormat(data);

for (const chunk of chunks) {
if (chunk.usage !== null) {
prompt_tokens += chunk.usage.prompt_tokens;
completion_tokens += chunk.usage.completion_tokens;
total_tokens += chunk.usage.total_tokens;
}
providerId = chunk.id || 'null';
}
} else {
const parsed = JSON.parse(data) as CompletionStateBody;
prompt_tokens += parsed.usage.prompt_tokens;
completion_tokens += parsed.usage.completion_tokens;
total_tokens += parsed.usage.total_tokens;
providerId = parsed.id || 'null';
}

const cost = getCostPerToken(
this.getModel(),
prompt_tokens,
completion_tokens
);

const metadata: LlmTransactionMetadata = {
providerId: providerId,
provider: this.getType(),
model: this.getModel(),
inputTokens: prompt_tokens,
outputTokens: completion_tokens,
totalTokens: total_tokens,
};

const transaction: Transaction = {
rawTransactionCost: cost,
metadata: metadata,
status: 'success',
};

return transaction;
} catch (error) {
logger.error(`Error processing data: ${error}`);
throw error;
}
}
}
2 changes: 2 additions & 0 deletions packages/app/server/src/services/AccountingService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
SupportedImageModel,
SupportedVideoModel,
XAIModels,
VercelModels,
} from '@merit-systems/echo-typescript-sdk';

import { Decimal } from '@prisma/client/runtime/library';
Expand All @@ -30,6 +31,7 @@ export const ALL_SUPPORTED_MODELS: SupportedModel[] = [
...OpenRouterModels,
...GroqModels,
...XAIModels,
...VercelModels,
];

// Handle image models separately since they have different pricing structure
Expand Down
5 changes: 3 additions & 2 deletions packages/sdk/ts/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
"update-models:gemini": "tsx scripts/update-gemini-models.ts",
"update-models:openrouter": "tsx scripts/update-openrouter-models.ts",
"update-models:groq": "tsx scripts/update-groq-models.ts",
"update-all-models": "pnpm run update-models:openai && pnpm run update-models:anthropic && pnpm run update-models:gemini && pnpm run update-models:openrouter && pnpm run update-models:groq",
"update-models:vercel": "tsx scripts/update-vercel-models.ts",
"update-all-models": "pnpm run update-models:openai && pnpm run update-models:anthropic && pnpm run update-models:gemini && pnpm run update-models:openrouter && pnpm run update-models:groq && pnpm run update-models:vercel",
"prepublishOnly": "pnpm run build"
},
"keywords": [
Expand All @@ -41,7 +42,6 @@
"author": "Merit Systems",
"license": "MIT",
"devDependencies": {
"@ai-sdk/gateway": "^1.0.12",
"@types/node": "^24.3.1",
"@typescript-eslint/eslint-plugin": "^8.34.1",
"@typescript-eslint/parser": "^8.34.1",
Expand All @@ -58,6 +58,7 @@
],
"dependencies": {
"@ai-sdk/anthropic": "2.0.17",
"@ai-sdk/gateway": "^1.0.12",
"@ai-sdk/google": "2.0.14",
"@ai-sdk/groq": "2.0.17",
"@ai-sdk/openai": "2.0.32",
Expand Down
171 changes: 171 additions & 0 deletions packages/sdk/ts/scripts/update-vercel-models.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
#!/usr/bin/env node

// -> Fetch all available models from the Vercel AI Gateway pricing API
// Uses the @ai-sdk/gateway package to get model pricing
// Write to a static file in the src/supported-models/chat/vercel.ts file

import { gateway } from '@ai-sdk/gateway';
import { writeFileSync } from 'fs';
import { join } from 'path';
import { config } from 'dotenv';
import type { SupportedModel } from './update-models';

config();

async function fetchVercelGatewayModels(): Promise<SupportedModel[]> {
try {
console.log('Fetching models from Vercel AI Gateway...');

const availableModels = await gateway.getAvailableModels();

console.log(
`Found ${availableModels.models.length} total models from Vercel AI Gateway`
);

const supportedModels: SupportedModel[] = [];

for (const model of availableModels.models) {
// Only include language models
if (model.modelType !== 'language') {
continue;
}

// Ensure pricing data exists
if (
!model.pricing ||
model.pricing.input === undefined ||
model.pricing.input === null ||
model.pricing.output === undefined ||
model.pricing.output === null
) {
console.warn(`Skipping ${model.id} - missing pricing data`);
continue;
}

const inputCost = Number(model.pricing.input);
const outputCost = Number(model.pricing.output);

// Skip models with zero or invalid pricing
if (
isNaN(inputCost) ||
isNaN(outputCost) ||
inputCost === 0 ||
outputCost === 0
) {
console.warn(`Skipping ${model.id} - invalid pricing data`);
continue;
}

// Use the gateway model ID with @ prefix for the provider portion
// e.g., "openai/gpt-4o" becomes "@openai/gpt-4o"
const modelId = `@${model.id}`;

supportedModels.push({
model_id: modelId,
input_cost_per_token: inputCost,
output_cost_per_token: outputCost,
provider: 'Vercel',
});

console.log(
` ${modelId} - Input: $${inputCost}/token, Output: $${outputCost}/token`
);
}

console.log(
`\nProcessed ${supportedModels.length} compatible language models`
);
return supportedModels;
} catch (error) {
console.error(
'Error fetching models from Vercel AI Gateway:',
error
);
throw error;
}
}

function generateVercelModelFile(models: SupportedModel[]): string {
const sortedModels = models.sort((a, b) =>
a.model_id.localeCompare(b.model_id)
);

// Generate union type
const unionType = sortedModels
.map(model => ` | "${model.model_id}"`)
.join('\n');

// Generate model objects
const modelObjects = sortedModels
.map(model => {
return ` {
model_id: "${model.model_id}",
input_cost_per_token: ${model.input_cost_per_token},
output_cost_per_token: ${model.output_cost_per_token},
provider: "${model.provider}",
}`;
})
.join(',\n');

return `import { SupportedModel } from "../types";

// Union type of all valid Vercel AI Gateway model IDs
// Models are prefixed with their upstream provider as used by the Vercel AI Gateway
// Pricing sourced from: https://vercel.com/ai-gateway via @ai-sdk/gateway API
export type VercelModel =
${unionType};

export const VercelModels: SupportedModel[] = [
${modelObjects}
];

`;
}

async function updateVercelModels() {
try {
console.log('Starting Vercel AI Gateway model update process...\n');

// Fetch models and pricing from Vercel AI Gateway API
const models = await fetchVercelGatewayModels();

if (models.length === 0) {
console.log('No compatible models found');
return;
}

// Generate the new file content
const fileContent = generateVercelModelFile(models);

// Write the updated file
const fullPath = join(
process.cwd(),
'src/supported-models/chat/vercel.ts'
);
writeFileSync(fullPath, fileContent, 'utf8');

console.log(
`\nSuccessfully updated vercel.ts with ${models.length} models`
);
console.log(`Models included:`);

// Show a sample of models for verification
const sampleModels = models.slice(0, 10);
sampleModels.forEach(model => {
console.log(` - ${model.model_id}`);
});

if (models.length > 10) {
console.log(` ... and ${models.length - 10} more models`);
}
} catch (error) {
console.error('Error updating Vercel models:', error);
process.exit(1);
}
}

// Run the script
updateVercelModels().catch(error => {
console.error('Unexpected error:', error);
process.exit(1);
});
3 changes: 3 additions & 0 deletions packages/sdk/ts/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ export * from './utils/error-handling';
export * from './utils/validation';
export * from './providers';
export { createEchoXAI } from './providers/xai';
export { createEchoVercelAIGateway } from './providers/vercel';

// Export tool-related types and utilities
export type {
Expand Down Expand Up @@ -49,6 +50,8 @@ export { GroqModels } from './supported-models/chat/groq';
export type { GroqModel } from './supported-models/chat/groq';
export { XAIModels } from './supported-models/chat/xai';
export type { XAIModel } from './supported-models/chat/xai';
export { VercelModels } from './supported-models/chat/vercel';
export type { VercelModel } from './supported-models/chat/vercel';
export { OpenAIImageModels } from './supported-models/image/openai';
export type { OpenAIImageModel } from './supported-models/image/openai';
export { GeminiVideoModels } from './supported-models/video/gemini';
Expand Down
2 changes: 2 additions & 0 deletions packages/sdk/ts/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ export * from './groq';
export * from './xai';
export * from './openai';
export * from './openrouter';
export * from './vercel';

export function echoFetch(
originalFetch: typeof fetch,
Expand Down Expand Up @@ -63,3 +64,4 @@ export { type GroqProvider } from '@ai-sdk/groq';
export { type OpenAIProvider } from '@ai-sdk/openai';
export { type OpenRouterProvider } from '@openrouter/ai-sdk-provider';
export { type XaiProvider } from '@ai-sdk/xai';
export { type GatewayProvider as VercelAIGatewayProvider } from '@ai-sdk/gateway';
26 changes: 26 additions & 0 deletions packages/sdk/ts/src/providers/vercel.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import {
createGateway,
type GatewayProvider,
} from '@ai-sdk/gateway';
import { ROUTER_BASE_URL } from 'config';
import { EchoConfig } from '../types';
import { validateAppId } from '../utils/validation';
import { echoFetch } from './index';

export function createEchoVercelAIGateway(
{ appId, baseRouterUrl = ROUTER_BASE_URL }: EchoConfig,
getTokenFn: (appId: string) => Promise<string | null>,
onInsufficientFunds?: () => void
): GatewayProvider {
validateAppId(appId, 'createEchoVercelAIGateway');

return createGateway({
baseURL: baseRouterUrl,
apiKey: 'placeholder_replaced_by_echoFetch',
fetch: echoFetch(
fetch,
async () => await getTokenFn(appId),
onInsufficientFunds
),
});
}
Loading