diff --git a/package.json b/package.json index af748f9..0da9346 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,10 @@ "express": "^4.18.2", "cors": "^2.8.5", "dotenv": "^16.3.1", - "openai": "^4.24.1" + "openai": "^4.24.1", + "@anthropic-ai/sdk": "^0.8.1", + "cohere-ai": "^5.0.0", + "langchain": "^0.1.0" }, "devDependencies": { "nodemon": "^3.0.2" diff --git a/server.js b/server.js index f78964f..30ab34c 100644 --- a/server.js +++ b/server.js @@ -1,51 +1,57 @@ require('dotenv').config() const express = require('express') const cors = require('cors') -const OpenAI = require('openai') +const path = require('path') +const providers = require('./src/llm/providers') const app = express() const port = process.env.PORT || 3000 -// Initialize OpenAI client -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY -}) - // Middleware app.use(cors()) app.use(express.json()) app.use(express.static('public')) +// Get available providers and models +app.get('/api/providers', (req, res) => { + const providerInfo = Object.entries(providers).map(([id, provider]) => ({ + id, + name: provider.name, + models: provider.models + })) + res.json(providerInfo) +}) + // Generate article endpoint app.post('/api/generate-article', async (req, res) => { try { - const { topic } = req.body + const { topic, provider: providerId, model } = req.body if (!topic) { return res.status(400).json({ error: 'Topic is required' }) } - const completion = await openai.chat.completions.create({ - model: "gpt-3.5-turbo", - messages: [ - { - role: "system", - content: "You are a helpful assistant that generates short, informative articles." - }, - { - role: "user", - content: `Write a short, informative article about ${topic}. The article should be between 200-300 words.` - } - ], - temperature: 0.7, - max_tokens: 500 - }) - - const article = completion.choices[0].message.content - res.json({ article }) + if (!providerId || !providers[providerId]) { + return res.status(400).json({ error: 'Invalid provider' }) + } + + if (!model || !providers[providerId].models.includes(model)) { + return res.status(400).json({ error: 'Invalid model for provider' }) + } + + const provider = providers[providerId] + const prompt = `Write a short, informative article about ${topic}. The article should be between 200-300 words.` + + try { + const article = await provider.generate(provider.client, model, prompt) + res.json({ article, provider: provider.name, model }) + } catch (error) { + console.error(`Error with ${provider.name}:`, error) + res.status(500).json({ error: `Failed to generate article using ${provider.name}` }) + } } catch (error) { console.error('Error:', error) - res.status(500).json({ error: 'Failed to generate article' }) + res.status(500).json({ error: 'Failed to process request' }) } }) diff --git a/src/llms/providers.js b/src/llms/providers.js new file mode 100644 index 0000000..a1bccc9 --- /dev/null +++ b/src/llms/providers.js @@ -0,0 +1,68 @@ +const OpenAI = require('openai') +const Anthropic = require('@anthropic-ai/sdk') +const cohere = require('cohere-ai') + +const providers = { + openai: { + name: 'OpenAI', + client: new OpenAI({ + apiKey: process.env.OPENAI_API_KEY + }), + models: ['gpt-3.5-turbo', 'gpt-4'], + generate: async (client, model, prompt) => { + const completion = await client.chat.completions.create({ + model, + messages: [ + { + role: "system", + content: "You are a helpful assistant that generates short, informative articles." + }, + { + role: "user", + content: prompt + } + ], + temperature: 0.7, + max_tokens: 500 + }) + return completion.choices[0].message.content + } + }, + anthropic: { + name: 'Anthropic', + client: new Anthropic({ + apiKey: process.env.ANTHROPIC_API_KEY + }), + models: ['claude-2', 'claude-instant-1'], + generate: async (client, model, prompt) => { + const completion = await client.messages.create({ + model, + max_tokens: 500, + messages: [ + { + role: "user", + content: prompt + } + ] + }) + return completion.content[0].text + } + }, + cohere: { + name: 'Cohere', + client: cohere, + models: ['command', 'command-light'], + generate: async (client, model, prompt) => { + client.init(process.env.COHERE_API_KEY) + const response = await client.generate({ + model, + prompt, + max_tokens: 500, + temperature: 0.7 + }) + return response.body.generations[0].text + } + } +} + +module.exports = providers