Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@
"express": "^4.18.2",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"openai": "^4.24.1"
"openai": "^4.24.1",
"@anthropic-ai/sdk": "^0.8.1",
"cohere-ai": "^5.0.0",
"langchain": "^0.1.0"
},
"devDependencies": {
"nodemon": "^3.0.2"
Expand Down
58 changes: 32 additions & 26 deletions server.js
Original file line number Diff line number Diff line change
@@ -1,51 +1,57 @@
require('dotenv').config()
const express = require('express')
const cors = require('cors')
const OpenAI = require('openai')
const path = require('path')
const providers = require('./src/llm/providers')
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: Path './src/llm/providers' doesn't match actual file location './src/llms/providers'


const app = express()
const port = process.env.PORT || 3000

// Initialize OpenAI client
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
})

// Middleware
app.use(cors())
app.use(express.json())
app.use(express.static('public'))

// Get available providers and models
app.get('/api/providers', (req, res) => {
const providerInfo = Object.entries(providers).map(([id, provider]) => ({
id,
name: provider.name,
models: provider.models
}))
res.json(providerInfo)
})

// Generate article endpoint
app.post('/api/generate-article', async (req, res) => {
try {
const { topic } = req.body
const { topic, provider: providerId, model } = req.body

if (!topic) {
return res.status(400).json({ error: 'Topic is required' })
}

const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
content: "You are a helpful assistant that generates short, informative articles."
},
{
role: "user",
content: `Write a short, informative article about ${topic}. The article should be between 200-300 words.`
}
],
temperature: 0.7,
max_tokens: 500
})

const article = completion.choices[0].message.content
res.json({ article })
if (!providerId || !providers[providerId]) {
return res.status(400).json({ error: 'Invalid provider' })
}

if (!model || !providers[providerId].models.includes(model)) {
return res.status(400).json({ error: 'Invalid model for provider' })
}

const provider = providers[providerId]
const prompt = `Write a short, informative article about ${topic}. The article should be between 200-300 words.`

try {
const article = await provider.generate(provider.client, model, prompt)
res.json({ article, provider: provider.name, model })
} catch (error) {
console.error(`Error with ${provider.name}:`, error)
res.status(500).json({ error: `Failed to generate article using ${provider.name}` })
}
} catch (error) {
console.error('Error:', error)
res.status(500).json({ error: 'Failed to generate article' })
res.status(500).json({ error: 'Failed to process request' })
}
})

Expand Down
68 changes: 68 additions & 0 deletions src/llms/providers.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
const OpenAI = require('openai')
const Anthropic = require('@anthropic-ai/sdk')
const cohere = require('cohere-ai')

const providers = {
openai: {
name: 'OpenAI',
client: new OpenAI({
apiKey: process.env.OPENAI_API_KEY
}),
models: ['gpt-3.5-turbo', 'gpt-4'],
generate: async (client, model, prompt) => {
const completion = await client.chat.completions.create({
model,
messages: [
{
role: "system",
content: "You are a helpful assistant that generates short, informative articles."
},
{
role: "user",
content: prompt
}
],
temperature: 0.7,
max_tokens: 500
})
return completion.choices[0].message.content
}
},
anthropic: {
name: 'Anthropic',
client: new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY
}),
models: ['claude-2', 'claude-instant-1'],
generate: async (client, model, prompt) => {
const completion = await client.messages.create({
model,
max_tokens: 500,
messages: [
{
role: "user",
content: prompt
}
]
})
Comment on lines +38 to +47
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Anthropic's API requires a 'system' message and temperature parameter for consistent behavior with other providers

return completion.content[0].text
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: Accessing completion.content[0].text is incorrect for Anthropic's API - should be completion.content

Suggested change
return completion.content[0].text
return completion.content

}
},
cohere: {
name: 'Cohere',
client: cohere,
models: ['command', 'command-light'],
generate: async (client, model, prompt) => {
client.init(process.env.COHERE_API_KEY)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Cohere client initialization should be moved outside the generate function to avoid reinitializing on every call

Suggested change
client.init(process.env.COHERE_API_KEY)
// Client should already be initialized

const response = await client.generate({
model,
prompt,
max_tokens: 500,
temperature: 0.7
})
Comment on lines +57 to +62
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: Cohere's generate API requires a 'num_generations' parameter, otherwise it may return empty results

Suggested change
const response = await client.generate({
model,
prompt,
max_tokens: 500,
temperature: 0.7
})
const response = await client.generate({
model,
prompt,
max_tokens: 500,
temperature: 0.7,
num_generations: 1
})

return response.body.generations[0].text
}
}
}

module.exports = providers