Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions LocalMind-Backend/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -43,5 +43,6 @@ BACKEND_URL=http://localhost:5000

# Model Api keys
OPENAI_API_KEY=your_openai_api_key_here
OPENAI_MODEL=gpt-4o-mini
GOOGLE_API_KEY=your_google_api_key_here
GROQ_API_KEY=your_groq_api_key_here
2 changes: 1 addition & 1 deletion LocalMind-Backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,12 @@
"@langchain/google-genai": "^0.2.18",
"@langchain/groq": "^0.2.4",
"@langchain/ollama": "^0.2.4",
"@langchain/openai": "^1.2.0",
"@types/cookie-parser": "^1.4.9",
"@types/jsonwebtoken": "^9.0.10",
"@types/mongoose": "^5.11.97",
"@types/morgan": "^1.9.10",
"argon2": "^0.44.0",
"bcrypt": "^5.1.1",
"axios": "^1.12.2",
"bcrypt": "^6.0.0",
"chalk": "^5.6.2",
Expand Down
777 changes: 736 additions & 41 deletions LocalMind-Backend/pnpm-lock.yaml

Large diffs are not rendered by default.

173 changes: 173 additions & 0 deletions LocalMind-Backend/src/api/v1/LangChain/langchain.controller.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
import { Request, Response } from 'express'
import { SendResponse } from '../utils/SendResponse.utils'
import langchainService from '../services/langchain.service'

/**
* LangChain Controller
*
* Handles HTTP requests for LangChain-powered AI operations.
* Provides endpoints for:
* - Simple chat with system + user prompts
* - User-only prompts
* - Custom template execution
* - Streaming responses
*/
class LangChainController {
/**
* Simple chat endpoint with system and user prompts
*
* POST /api/v1/langchain/chat
* Body: { systemPrompt: string, userPrompt: string }
*/
async chat(req: Request, res: Response) {
try {
const { systemPrompt, userPrompt } = req.body

if (!userPrompt) {
return SendResponse.error(res, 'userPrompt is required', 400)
}

const defaultSystemPrompt =
systemPrompt || 'You are a helpful AI assistant powered by LocalMind.'

const response = await langchainService.runSimplePrompt(defaultSystemPrompt, userPrompt)

SendResponse.success(
res,
'AI response generated successfully',
{
response,
systemPrompt: defaultSystemPrompt,
userPrompt,
},
200
)
} catch (error: any) {
console.error('LangChain chat error:', error)
SendResponse.error(res, 'Failed to generate AI response', 500, { error: error.message })
}
}

/**
* User prompt only (no system message)
*
* POST /api/v1/langchain/prompt
* Body: { prompt: string }
*/
async prompt(req: Request, res: Response) {
try {
const { prompt } = req.body

if (!prompt) {
return SendResponse.error(res, 'prompt is required', 400)
}

const response = await langchainService.runUserPrompt(prompt)

SendResponse.success(
res,
'AI response generated successfully',
{
response,
prompt,
},
200
)
} catch (error: any) {
console.error('LangChain prompt error:', error)
SendResponse.error(res, 'Failed to generate AI response', 500, { error: error.message })
}
}

/**
* Custom template with variables
*
* POST /api/v1/langchain/template
* Body: { template: string, variables: object }
*/
async customTemplate(req: Request, res: Response) {
try {
const { template, variables } = req.body

if (!template) {
return SendResponse.error(res, 'template is required', 400)
}

if (!variables || typeof variables !== 'object') {
return SendResponse.error(res, 'variables must be an object', 400)
}

const response = await langchainService.runCustomTemplate(template, variables)

SendResponse.success(
res,
'Template executed successfully',
{
response,
template,
variables,
},
200
)
} catch (error: any) {
console.error('LangChain template error:', error)
SendResponse.error(res, 'Failed to execute template', 500, { error: error.message })
}
}

/**
* Health check endpoint to verify LangChain is configured
*
* GET /api/v1/langchain/health
*/
async healthCheck(req: Request, res: Response) {
try {
const model = langchainService.getChatModel()

SendResponse.success(
res,
'LangChain is configured and ready',
{
status: 'operational',
model: model.modelName,
temperature: model.temperature,
maxTokens: model.maxTokens,
},
200
)
} catch (error: any) {
SendResponse.error(res, 'LangChain is not properly configured', 500, { error: error.message })
}
}

/**
* Test endpoint with a simple query
*
* GET /api/v1/langchain/test
*/
async test(req: Request, res: Response) {
try {
const testPrompt = 'Say hello in one sentence and confirm you are working correctly.'

const response = await langchainService.runSimplePrompt(
'You are a helpful AI assistant.',
testPrompt
)

SendResponse.success(
res,
'LangChain test successful',
{
testPrompt,
response,
timestamp: new Date().toISOString(),
},
200
)
} catch (error: any) {
SendResponse.error(res, 'LangChain test failed', 500, { error: error.message })
}
}
}

export default new LangChainController()
27 changes: 27 additions & 0 deletions LocalMind-Backend/src/api/v1/LangChain/langchain.routes.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import { Router } from 'express'
import langchainController from './langchain.controller'

const router: Router = Router()

/**
* LangChain Routes
*
* Endpoints for LangChain-powered AI operations
*/

// Health check
router.get('/v1/langchain/health', langchainController.healthCheck)

// Test endpoint
router.get('/v1/langchain/test', langchainController.test)

// Chat with system + user prompts
router.post('/v1/langchain/chat', langchainController.chat)

// Simple user prompt
router.post('/v1/langchain/prompt', langchainController.prompt)

// Custom template execution
router.post('/v1/langchain/template', langchainController.customTemplate)

export { router as LangChainRouter }
3 changes: 2 additions & 1 deletion LocalMind-Backend/src/routes/app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import { DataSetRoutes } from '../api/v1/DataSet/v1/DataSet.routes'
import { userRoutes } from '../api/v1/user/user.routes'
import { OllamaRouter } from '../api/v1/Ai-model/Ollama/Ollama.routes'
import { GroqRouter } from '../api/v1/Ai-model/Groq/Groq.routes'
import { LangChainRouter } from '../api/v1/LangChain/langchain.routes'


logger.token('time', () => new Date().toLocaleString())
Expand All @@ -19,7 +20,7 @@ app.use(express.json())
app.use(express.urlencoded({ extended: true }))

// API routes
app.use('/api', GoogleRoutes, userRoutes, DataSetRoutes, OllamaRouter, GroqRouter)
app.use('/api', GoogleRoutes, userRoutes, DataSetRoutes, OllamaRouter, GroqRouter, LangChainRouter)

// Serve static files from public directory (for frontend in production)
const publicPath = path.join(__dirname, '../../public')
Expand Down
Loading