From 4c377db7bbe478dc24792e1a4b07c40cc83cb020 Mon Sep 17 00:00:00 2001
From: maskeen <120341809+mxskeen@users.noreply.github.com>
Date: Sun, 27 Jul 2025 12:46:01 +0530
Subject: [PATCH] llm addition, need review
---
prisma/schema.prisma | 5 +
src/components/app/ChatWindow.tsx | 34 +-
src/components/app/ChatWindowEdit.tsx | 483 ++++++++++++++++++++++++++
src/components/app/Problem.tsx | 5 +
src/components/app/Settings.tsx | 79 ++++-
src/pages/api/getUserSettings.ts | 5 +
src/pages/api/openai.ts | 24 +-
7 files changed, 624 insertions(+), 11 deletions(-)
create mode 100644 src/components/app/ChatWindowEdit.tsx
diff --git a/prisma/schema.prisma b/prisma/schema.prisma
index fcf6c31..e7b86d5 100644
--- a/prisma/schema.prisma
+++ b/prisma/schema.prisma
@@ -18,6 +18,11 @@ model User {
subscriptionStart DateTime?
subscriptionEnd DateTime?
apiKey String? @default("null")
+ baseUrl String? @default("https://api.openai.com/v1")
+ llmApiKey String? @default("null")
+ llmModel String? @default("gpt-4o")
+ llmTemperature Float? @default(0.7)
+ llmTopP Float? @default(1.0)
collections Collection[]
learnSteps String @default("10m 1d")
relearnSteps String @default("10m")
diff --git a/src/components/app/ChatWindow.tsx b/src/components/app/ChatWindow.tsx
index 136707e..efba4d4 100644
--- a/src/components/app/ChatWindow.tsx
+++ b/src/components/app/ChatWindow.tsx
@@ -2,10 +2,26 @@ import React, { useState, useEffect, useRef } from 'react';
import hljs from 'highlight.js';
import 'highlight.js/styles/atom-one-dark-reasonable.css';
-const ChatWindow = ({ problem, editorContent, apiKey, onClose, buttonPosition }: {
+const ChatWindow = ({
+ problem,
+ editorContent,
+ apiKey,
+ baseUrl,
+ llmApiKey,
+ llmModel,
+ llmTemperature,
+ llmTopP,
+ onClose,
+ buttonPosition
+}: {
problem: any,
editorContent: string,
- apiKey: any,
+ apiKey: any,
+ baseUrl?: string,
+ llmApiKey?: string,
+ llmModel?: string,
+ llmTemperature?: number,
+ llmTopP?: number,
onClose: () => void,
buttonPosition: { x: number, y: number } | null
}) => {
@@ -32,6 +48,11 @@ const ChatWindow = ({ problem, editorContent, apiKey, onClose, buttonPosition }:
userSolution: editorContent,
userMessage: "analyze", // Special flag to just analyze the code
apiKey: apiKey,
+ baseUrl: baseUrl,
+ llmApiKey: llmApiKey,
+ llmModel: llmModel,
+ llmTemperature: llmTemperature,
+ llmTopP: llmTopP,
mode: "analyze" // Tell the API we're just loading context
}),
});
@@ -59,7 +80,7 @@ const ChatWindow = ({ problem, editorContent, apiKey, onClose, buttonPosition }:
};
analyzeCode();
- }, [problem, editorContent, apiKey]);
+ }, [problem, editorContent, apiKey, baseUrl, llmApiKey, llmModel, llmTemperature, llmTopP]);
useEffect(() => {
hljs.highlightAll();
@@ -101,6 +122,11 @@ const ChatWindow = ({ problem, editorContent, apiKey, onClose, buttonPosition }:
userSolution: editorContent,
userMessage: initialMessage || input,
apiKey: apiKey,
+ baseUrl: baseUrl,
+ llmApiKey: llmApiKey,
+ llmModel: llmModel,
+ llmTemperature: llmTemperature,
+ llmTopP: llmTopP,
mode: "chat" // Specify we're in chat mode now
}),
});
@@ -454,4 +480,4 @@ const ChatWindow = ({ problem, editorContent, apiKey, onClose, buttonPosition }:
);
};
-export default ChatWindow;
\ No newline at end of file
+export default ChatWindow;
diff --git a/src/components/app/ChatWindowEdit.tsx b/src/components/app/ChatWindowEdit.tsx
new file mode 100644
index 0000000..efba4d4
--- /dev/null
+++ b/src/components/app/ChatWindowEdit.tsx
@@ -0,0 +1,483 @@
+import React, { useState, useEffect, useRef } from 'react';
+import hljs from 'highlight.js';
+import 'highlight.js/styles/atom-one-dark-reasonable.css';
+
+const ChatWindow = ({
+ problem,
+ editorContent,
+ apiKey,
+ baseUrl,
+ llmApiKey,
+ llmModel,
+ llmTemperature,
+ llmTopP,
+ onClose,
+ buttonPosition
+}: {
+ problem: any,
+ editorContent: string,
+ apiKey: any,
+ baseUrl?: string,
+ llmApiKey?: string,
+ llmModel?: string,
+ llmTemperature?: number,
+ llmTopP?: number,
+ onClose: () => void,
+ buttonPosition: { x: number, y: number } | null
+}) => {
+ const [messages, setMessages] = useState>([]);
+ const [input, setInput] = useState("");
+ const [isAnalyzing, setIsAnalyzing] = useState(true);
+ const [isTyping, setIsTyping] = useState(false);
+ const [showQuickQuestions, setShowQuickQuestions] = useState(true);
+ const messagesEndRef = useRef(null);
+ const chatContainerRef = useRef(null);
+
+ // Initial analysis when component mounts
+ useEffect(() => {
+ const analyzeCode = async () => {
+ try {
+ const response = await fetch('/api/openai', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({
+ question: problem.question,
+ solution: problem.solution,
+ userSolution: editorContent,
+ userMessage: "analyze", // Special flag to just analyze the code
+ apiKey: apiKey,
+ baseUrl: baseUrl,
+ llmApiKey: llmApiKey,
+ llmModel: llmModel,
+ llmTemperature: llmTemperature,
+ llmTopP: llmTopP,
+ mode: "analyze" // Tell the API we're just loading context
+ }),
+ });
+
+ setIsAnalyzing(false);
+
+ if (response.ok) {
+ // After analysis is complete, show the greeting message
+ setMessages([{ text: "How can I help you with this problem?", sender: "ai" }]);
+ } else {
+ setMessages([{
+ text: "Failed to analyze your code. Please make sure you have entered a valid API Key in the Settings page.",
+ sender: "ai"
+ }]);
+ setShowQuickQuestions(false);
+ }
+ } catch (error) {
+ setIsAnalyzing(false);
+ setMessages([{
+ text: "Failed to analyze your code. Please make sure you have entered a valid API Key in the Settings page.",
+ sender: "ai"
+ }]);
+ setShowQuickQuestions(false);
+ }
+ };
+
+ analyzeCode();
+ }, [problem, editorContent, apiKey, baseUrl, llmApiKey, llmModel, llmTemperature, llmTopP]);
+
+ useEffect(() => {
+ hljs.highlightAll();
+ messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
+ }, [messages]);
+
+ // Handle escape key to close chat
+ useEffect(() => {
+ const handleEscape = (e: KeyboardEvent) => {
+ if (e.key === 'Escape') onClose();
+ };
+ window.addEventListener('keydown', handleEscape);
+ return () => window.removeEventListener('keydown', handleEscape);
+ }, [onClose]);
+
+ // Hide quick questions after first user message
+ useEffect(() => {
+ if (messages.length > 1 && showQuickQuestions) {
+ setShowQuickQuestions(false);
+ }
+ }, [messages.length, showQuickQuestions]);
+
+ const sendMessage = async (initialMessage = "") => {
+ const userMessage = { text: initialMessage || input, sender: "user" };
+ if (initialMessage === "") setMessages(prev => [...prev, userMessage]);
+ setInput("");
+ setIsTyping(true);
+ setShowQuickQuestions(false); // Hide quick questions once a message is sent
+
+ try {
+ const response = await fetch('/api/openai', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({
+ question: problem.question,
+ solution: problem.solution,
+ userSolution: editorContent,
+ userMessage: initialMessage || input,
+ apiKey: apiKey,
+ baseUrl: baseUrl,
+ llmApiKey: llmApiKey,
+ llmModel: llmModel,
+ llmTemperature: llmTemperature,
+ llmTopP: llmTopP,
+ mode: "chat" // Specify we're in chat mode now
+ }),
+ });
+
+ setIsTyping(false);
+
+ if (response.ok) {
+ const data = await response.json();
+ const aiMessage = { text: data.message, sender: "ai" };
+ setMessages(prevMessages => initialMessage ? [...prevMessages, userMessage, aiMessage] : [...prevMessages, aiMessage]);
+ } else {
+ const errorMessage = { text: "Failed to get response from AI. Please make sure you have entered a valid API Key in the Settings page, and that you have credits in your OpenAI account.", sender: "ai" };
+ setMessages(prevMessages => initialMessage ? [...prevMessages, userMessage, errorMessage] : [...prevMessages, errorMessage]);
+ }
+ } catch (error) {
+ setIsTyping(false);
+ const errorMessage = { text: "Failed to get response from AI. Please make sure you have entered a valid API Key in the Settings page, and that you have credits in your OpenAI account.", sender: "ai" };
+ setMessages(prevMessages => initialMessage ? [...prevMessages, userMessage, errorMessage] : [...prevMessages, errorMessage]);
+ }
+ };
+
+ const renderMessage = (msg: { text: string, sender: string }, index: number) => {
+ const codeRegex = /```(\w+)?\n([\s\S]*?)```/g;
+ const parts = msg.text.split(codeRegex);
+
+ return (
+
+ {parts.map((part, i) => {
+ if (i % 3 === 2) {
+ const language = parts[i - 1] || 'plaintext';
+ return (
+
+ {part}
+
+ );
+ }
+ return part &&
{part}
;
+ })}
+
+ );
+ };
+
+ const handleSubmit = (e: React.FormEvent) => {
+ e.preventDefault();
+ if (input.trim()) {
+ sendMessage();
+ }
+ };
+
+ // Quick questions to show as suggestion buttons
+ const quickQuestions = [
+ "Is my solution correct?",
+ "Are there edge cases my code is missing?"
+ ];
+
+ // Calculate position based on button location
+ const chatStyle = buttonPosition ? {
+ bottom: `calc(100vh - ${buttonPosition.y}px + 16px)`,
+ right: '16px',
+ transform: 'translateY(0)',
+ opacity: 1
+ } : {};
+
+ return (
+
+ {/* Chat header */}
+
+
+ auto_awesome
+
AI Assistant
+
+
+
+ {/* Chat messages */}
+
+ {isAnalyzing ? (
+
+
+
Analyzing your code...
+
Preparing AI assistant
+
+ ) : (
+ <>
+ {messages.map((msg, index) => renderMessage(msg, index))}
+
+ {/* Quick question buttons */}
+ {showQuickQuestions && messages.length === 1 && messages[0].sender === 'ai' && (
+
+ {quickQuestions.map((question, idx) => (
+
+ ))}
+
+ )}
+
+ {isTyping && (
+
+
+
+
+
+
+
+
AI is thinking...
+
+
+ )}
+ >
+ )}
+
+
+
+ {/* Chat input */}
+
+
+ {/* CSS for typing indicator and animations */}
+
+
+ );
+};
+
+export default ChatWindow;
diff --git a/src/components/app/Problem.tsx b/src/components/app/Problem.tsx
index 873f1b7..ad46e74 100644
--- a/src/components/app/Problem.tsx
+++ b/src/components/app/Problem.tsx
@@ -427,6 +427,11 @@ const Problem = ({ problem, contentActive, setContentActive, editorContent, setE
problem={problem}
editorContent={editorContent}
apiKey={data?.apiKey}
+ baseUrl={data?.baseUrl}
+ llmApiKey={data?.llmApiKey}
+ llmModel={data?.llmModel}
+ llmTemperature={data?.llmTemperature}
+ llmTopP={data?.llmTopP}
onClose={() => setShowChat(false)}
buttonPosition={buttonPosition}
/>
diff --git a/src/components/app/Settings.tsx b/src/components/app/Settings.tsx
index 565a7ed..70afe74 100644
--- a/src/components/app/Settings.tsx
+++ b/src/components/app/Settings.tsx
@@ -9,7 +9,8 @@ import {
Key as KeyIcon,
Cog as CogIcon,
Brain as BrainIcon,
- RefreshCw as RefreshIcon
+ RefreshCw as RefreshIcon,
+ Globe as GlobeIcon
} from 'lucide-react';
import { motion } from 'framer-motion';
@@ -119,6 +120,16 @@ const Settings = () => {
),
// API Settings
apiKey: (document.getElementById('apiKey') as HTMLInputElement)?.value,
+ // LLM API Settings
+ baseUrl: (document.getElementById('baseUrl') as HTMLInputElement)?.value,
+ llmApiKey: (document.getElementById('llmApiKey') as HTMLInputElement)?.value,
+ llmModel: (document.getElementById('llmModel') as HTMLInputElement)?.value,
+ llmTemperature: parseFloat(
+ (document.getElementById('llmTemperature') as HTMLInputElement)?.value
+ ),
+ llmTopP: parseFloat(
+ (document.getElementById('llmTopP') as HTMLInputElement)?.value
+ ),
};
// Validations (same as before)
@@ -140,9 +151,11 @@ const Settings = () => {
if (
!validateDecimal(newSettings.startingEase) ||
!validateDecimal(newSettings.easyBonus) ||
- !validateDecimal(newSettings.intervalModifier)
+ !validateDecimal(newSettings.intervalModifier) ||
+ isNaN(newSettings.llmTemperature) ||
+ isNaN(newSettings.llmTopP)
) {
- showToast("Starting Ease, Easy Bonus, and Interval Modifier must be decimal numbers.");
+ showToast("Starting Ease, Easy Bonus, Interval Modifier, LLM Temperature, and LLM Top P must be valid numbers.");
return;
}
@@ -375,6 +388,66 @@ const Settings = () => {
Enter your OpenAI API key to enable AI-based features.
+
+ {/* New LLM API Settings Fields */}
+
+
+
+
LLM API Settings
+
+
+
+
+
+
+
+ The base URL for the LLM API (e.g., https://api.openai.com/v1)
+
+
+
+
+
+
+
+ Your API key for the LLM service.
+
+
+
+
+
+
+
+ The model of the LLM to use.
+
+
+
+
+
+
+
+ Controls the randomness of the output. Higher values mean more randomness.
+
+
+
+
+
+
+
+ Controls the diversity of the output. 0.9 means only the top 90% most likely tokens are considered.
+
+
+
+
diff --git a/src/pages/api/getUserSettings.ts b/src/pages/api/getUserSettings.ts
index 32c8f61..ce2caf0 100644
--- a/src/pages/api/getUserSettings.ts
+++ b/src/pages/api/getUserSettings.ts
@@ -28,6 +28,11 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
maximumInterval: true,
maximumNewPerDay: true,
apiKey: true,
+ baseUrl: true,
+ llmApiKey: true,
+ llmModel: true,
+ llmTemperature: true,
+ llmTopP: true,
contributionHistory: true,
},
});
diff --git a/src/pages/api/openai.ts b/src/pages/api/openai.ts
index 07cd06b..489a6b4 100644
--- a/src/pages/api/openai.ts
+++ b/src/pages/api/openai.ts
@@ -2,10 +2,25 @@ import { NextApiRequest, NextApiResponse } from 'next';
import OpenAI from 'openai';
export default async (req: NextApiRequest, res: NextApiResponse) => {
- const { question, solution, userSolution, userMessage, apiKey, mode = "chat" } = req.body;
+ const {
+ question,
+ solution,
+ userSolution,
+ userMessage,
+ apiKey,
+ baseUrl = "https://api.openai.com/v1",
+ llmApiKey,
+ llmModel = "gpt-4o",
+ llmTemperature = 0.7,
+ mode = "chat"
+ } = req.body;
+
+ // Use LLM API key if provided, otherwise fall back to regular API key
+ const finalApiKey = llmApiKey || apiKey;
const openai = new OpenAI({
- apiKey: apiKey,
+ apiKey: finalApiKey,
+ baseURL: baseUrl
});
let messages: any = [];
@@ -45,9 +60,10 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
}
const completion = await openai.chat.completions.create({
- model: "gpt-4o",
+ model: llmModel,
messages,
- max_tokens: 300,
+ max_tokens: 300,
+ temperature: llmTemperature
});
if (completion.choices && completion.choices.length > 0) {