Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@
"express": "^4.18.2",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"openai": "^4.24.1"
"openai": "^4.24.1",
"@anthropic-ai/sdk": "^0.8.1",
"cohere-ai": "^5.0.0",
"langchain": "^0.1.0"
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: langchain dependency is not used anywhere in the codebase. Remove if not needed.

},
"devDependencies": {
"nodemon": "^3.0.2"
Expand Down
58 changes: 32 additions & 26 deletions server.js
Original file line number Diff line number Diff line change
@@ -1,51 +1,57 @@
require('dotenv').config()
const express = require('express')
const cors = require('cors')
const OpenAI = require('openai')
const path = require('path')
const providers = require('./src/llm/providers')
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: Path './src/llm/providers' doesn't match actual file location './src/llms/providers'


const app = express()
const port = process.env.PORT || 3000

// Initialize OpenAI client
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
})

// Middleware
app.use(cors())
app.use(express.json())
app.use(express.static('public'))

// Get available providers and models
app.get('/api/providers', (req, res) => {
const providerInfo = Object.entries(providers).map(([id, provider]) => ({
id,
name: provider.name,
models: provider.models
}))
res.json(providerInfo)
})

// Generate article endpoint
app.post('/api/generate-article', async (req, res) => {
try {
const { topic } = req.body
const { topic, provider: providerId, model } = req.body

if (!topic) {
return res.status(400).json({ error: 'Topic is required' })
}

const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
content: "You are a helpful assistant that generates short, informative articles."
},
{
role: "user",
content: `Write a short, informative article about ${topic}. The article should be between 200-300 words.`
}
],
temperature: 0.7,
max_tokens: 500
})

const article = completion.choices[0].message.content
res.json({ article })
if (!providerId || !providers[providerId]) {
return res.status(400).json({ error: 'Invalid provider' })
}

if (!model || !providers[providerId].models.includes(model)) {
return res.status(400).json({ error: 'Invalid model for provider' })
}

const provider = providers[providerId]
const prompt = `Write a short, informative article about ${topic}. The article should be between 200-300 words.`

try {
const article = await provider.generate(provider.client, model, prompt)
res.json({ article, provider: provider.name, model })
} catch (error) {
console.error(`Error with ${provider.name}:`, error)
res.status(500).json({ error: `Failed to generate article using ${provider.name}` })
}
} catch (error) {
console.error('Error:', error)
res.status(500).json({ error: 'Failed to generate article' })
res.status(500).json({ error: 'Failed to process request' })
}
})

Expand Down
68 changes: 68 additions & 0 deletions src/llms/providers.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
const OpenAI = require('openai')
const Anthropic = require('@anthropic-ai/sdk')
const cohere = require('cohere-ai')

const providers = {
openai: {
name: 'OpenAI',
client: new OpenAI({
apiKey: process.env.OPENAI_API_KEY
}),
models: ['gpt-3.5-turbo', 'gpt-4'],
generate: async (client, model, prompt) => {
const completion = await client.chat.completions.create({
model,
messages: [
{
role: "system",
content: "You are a helpful assistant that generates short, informative articles."
},
{
role: "user",
content: prompt
}
],
temperature: 0.7,
max_tokens: 500
})
Comment on lines +13 to +27
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Consider extracting common configuration (max_tokens, temperature) into shared constants

return completion.choices[0].message.content
}
},
anthropic: {
name: 'Anthropic',
client: new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY
}),
models: ['claude-2', 'claude-instant-1'],
generate: async (client, model, prompt) => {
const completion = await client.messages.create({
model,
max_tokens: 500,
messages: [
{
role: "user",
content: prompt
}
]
})
Comment on lines +38 to +47
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Missing system prompt and temperature settings that other providers use. Could lead to inconsistent article generation.

return completion.content[0].text
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: Potential undefined error if completion.content is empty array. Add null check or default value.

Suggested change
return completion.content[0].text
return completion.content?.[0]?.text || ''

}
},
cohere: {
name: 'Cohere',
client: cohere,
models: ['command', 'command-light'],
generate: async (client, model, prompt) => {
client.init(process.env.COHERE_API_KEY)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

logic: Initializing Cohere client on every request is inefficient. Move client.init() to where other clients are initialized.

const response = await client.generate({
model,
prompt,
max_tokens: 500,
temperature: 0.7
})
return response.body.generations[0].text
}
}
}

module.exports = providers