Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 59 additions & 40 deletions app/learn/chat/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,18 @@ import remarkMath from "remark-math";
import rehypeKatex from "rehype-katex";
import "katex/dist/katex.min.css";

// Define message structure for chat
interface Message {
id: number;
content: string; // The actual message text
sender: "user" | "ai";
role: 'user' | 'assistant';
content: string;
options?: string[];
}

interface AgentResponse {
status: string;
explanation: string;
subtopics: string[];
prerequisites: string[];
summary: string;
}

const Chat: React.FC = () => {
Expand Down Expand Up @@ -72,7 +79,7 @@ const Chat: React.FC = () => {
parsedResponse.response ||
parsedResponse.summary ||
"No content available";
setMessages([{ id: Date.now(), content: moduleContent, sender: "ai" }]);
setMessages([{ role: "assistant", content: moduleContent }]);
localStorage.removeItem("chatResponse");
}
}, []);
Expand All @@ -84,9 +91,8 @@ const Chat: React.FC = () => {
if (!input.trim()) return;

const userMessage: Message = {
id: Date.now(),
role: "user",
content: input,
sender: "user",
};

setMessages((prev) => [...prev, userMessage]);
Expand All @@ -96,10 +102,9 @@ const Chat: React.FC = () => {
try {
// TODO: Add error handling for network issues
const response = await axios.post(
"http://127.0.0.1:5000/process-content",
"http://127.0.0.1:5000/process-interaction",
{
notes: input,
files: [], // Future enhancement: add file upload support
input: input,
}
);

Expand All @@ -108,22 +113,22 @@ const Chat: React.FC = () => {
response.data.response ||
response.data.summary ||
response.data.learning_plan ||
response.data.feedback ||
"Sorry, I couldn't generate a response";

const aiMessage: Message = {
id: Date.now() + 1,
role: "assistant",
content: aiContent,
sender: "ai",
options: response.data.subtopics
};

setMessages((prev) => [...prev, aiMessage]);
} catch (error) {
console.error("Error:", error);

const errorMessage: Message = {
id: Date.now() + 1,
role: "assistant",
content: "Oops! Something went wrong. Please try again.",
sender: "ai",
};

setMessages((prev) => [...prev, errorMessage]);
Expand Down Expand Up @@ -236,10 +241,9 @@ const Chat: React.FC = () => {
setIsLoading(true);
try {
const response = await axios.post(
"http://127.0.0.1:5000/process-content",
"http://127.0.0.1:5000/process-interaction",
{
notes: `Please explain this more deeply: ${content}`,
files: [],
input: `Please explain this more deeply: ${content}`,
}
);

Expand All @@ -249,9 +253,9 @@ const Chat: React.FC = () => {
"I couldn't generate a deeper explanation";

const aiMessage: Message = {
id: Date.now(),
role: "assistant",
content: aiContent,
sender: "ai",
options: response.data.subtopics
};

setMessages((prev) => [...prev, aiMessage]);
Expand All @@ -260,9 +264,8 @@ const Chat: React.FC = () => {
setMessages((prev) => [
...prev,
{
id: Date.now(),
role: "assistant",
content: "Sorry, I couldn't generate a deeper explanation.",
sender: "ai",
},
]);
} finally {
Expand All @@ -274,10 +277,9 @@ const Chat: React.FC = () => {
setIsLoading(true);
try {
const response = await axios.post(
"http://127.0.0.1:5000/process-content",
"http://127.0.0.1:5000/process-interaction",
{
notes: `Please create a quick test to verify understanding of this content: ${content}`,
files: [],
input: `Please create a quick test to verify understanding of this content: ${content}`,
}
);

Expand All @@ -287,9 +289,9 @@ const Chat: React.FC = () => {
"I couldn't generate a test";

const aiMessage: Message = {
id: Date.now(),
role: "assistant",
content: aiContent,
sender: "ai",
options: response.data.subtopics
};

setMessages((prev) => [...prev, aiMessage]);
Expand All @@ -298,24 +300,29 @@ const Chat: React.FC = () => {
setMessages((prev) => [
...prev,
{
id: Date.now(),
role: "assistant",
content: "Sorry, I couldn't generate a test.",
sender: "ai",
},
]);
} finally {
setIsLoading(false);
}
};

const handleOptionClick = async (option: string) => {
setInput(option);
const fakeEvent = { preventDefault: () => {} } as React.FormEvent;
await handleSubmit(fakeEvent);
};

const handleInteractiveQuestions = async (content: string) => {
setIsLoading(true);
console.log("handling interactive question");
try {
const response = await axios.post(
"http://127.0.0.1:5000/process-content",
"http://127.0.0.1:5000/process-interaction",
{
notes: `Please create interactive questions to verify understanding of this content: ${content}`,
files: [],
input: `Generate a quiz about the following content:\n\n${content}`
}
);

Expand All @@ -325,9 +332,9 @@ const Chat: React.FC = () => {
"I couldn't generate interactive questions";

const aiMessage: Message = {
id: Date.now(),
role: "assistant",
content: aiContent,
sender: "ai",
options: response.data.subtopics
};

setMessages((prev) => [...prev, aiMessage]);
Expand All @@ -336,9 +343,8 @@ const Chat: React.FC = () => {
setMessages((prev) => [
...prev,
{
id: Date.now(),
role: "assistant",
content: "Sorry, I couldn't generate interactive questions.",
sender: "ai",
},
]);
} finally {
Expand All @@ -363,14 +369,13 @@ const Chat: React.FC = () => {
<AnimatePresence mode="popLayout">
{messages.map((message) => (
<motion.div
key={message.id}
className={`flex ${
message.sender === "user" ? "justify-end" : "justify-start"
message.role === "user" ? "justify-end" : "justify-start"
}`}
initial={{
opacity: 0,
y: 20,
x: message.sender === "user" ? 20 : -20,
x: message.role === "user" ? 20 : -20,
}}
animate={{ opacity: 1, y: 0, x: 0 }}
exit={{
Expand All @@ -383,12 +388,12 @@ const Chat: React.FC = () => {
>
<div
className={`max-w-[70%] p-4 rounded-xl ${
message.sender === "user"
message.role === "user"
? "bg-blue-600 text-white"
: "bg-gray-100 text-gray-900"
}`}
>
{message.sender === "ai" ? (
{message.role === "assistant" ? (
<>
<ReactMarkdown
remarkPlugins={[remarkMath]}
Expand Down Expand Up @@ -416,6 +421,20 @@ const Chat: React.FC = () => {
>
{message.content}
</ReactMarkdown>
{message.options && message.options.length > 0 && (
<div className="mt-4 space-y-2">
{message.options.map((option, i) => (
<Button
key={i}
variant="outline"
className="w-full text-left justify-start"
onClick={() => handleOptionClick(option)}
>
{option}
</Button>
))}
</div>
)}
<div className="flex gap-2 mt-3">
<Button
variant="outline"
Expand Down
2 changes: 2 additions & 0 deletions backend/agents/agent_instructions.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
3. Prioritize user needs
4. Maintain learning flow
5. Be decisive in selection
6. If the latest context summary is undefined, then always choose the exploration agent.

INPUT FORMAT:
{
Expand Down Expand Up @@ -240,6 +241,7 @@
3. Note areas for review
4. Suggest improvements
5. Maintain clarity
6. In the summary, you should end with how the user responded to the last agent, and the name of the agent.

INPUT FORMAT:
{
Expand Down
10 changes: 6 additions & 4 deletions backend/agents/agent_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,10 @@ def _handle_answer_evaluation(self, topic: str) -> None:
self.learning_state.awaiting_answer = False
answer_eval_input = AnswerEvalAgentInput("", self.learning_state.active_subtopic, self.learning_state.current_topic, self.learning_state.last_question, topic)
answer_eval = handle_answer_eval(self.model, answer_eval_input, self._call_agent)
return {
"is_correct": answer_eval.is_correct,
"feedback": answer_eval.feedback
}
return AnswerEvalAgentOutput(
is_correct=answer_eval.is_correct,
feedback=answer_eval.feedback
)

def _call_agent(self, instructions: str, input_data: Any) -> Any:
"""Handle communication with the AI model."""
Expand Down Expand Up @@ -251,6 +251,8 @@ def start_new_topic(self, topic: str, user_background: Optional[str] = None, cur
entry['content'] for entry in self.learning_state.session_history
)

print("Agent: ", classification.next_agent)

match classification.next_agent:
case 'exploration':
input_data = ExplorationAgentInput(
Expand Down