diff --git a/AGENTS.md b/AGENTS.md index 7d55e7e..630ccc0 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -2,15 +2,46 @@ ## Project Overview -Wordpan is a full-stack AI-powered web application template demonstrating modern AI agent integration with CrewAI, Supabase, and comprehensive observability using Arize Phoenix. +Wordpan is a full-stack AI-powered language learning application demonstrating modern AI agent integration with CrewAI, Supabase, and comprehensive observability using Arize Phoenix. ### Architecture -- **Frontend**: React 19.1 + TypeScript 5.9 + Vite 7 + Tailwind CSS 4 +- **Frontend**: React 19.1 + TypeScript 5.9 + Vite 7 + Tailwind CSS 4 + Radix UI - **AI Backend**: Python 3.13 + Flask + CrewAI 0.201 + LiteLLM - **Database**: PostgreSQL via Supabase with Row-Level Security (RLS) - **Observability**: Arize Phoenix 12.4.0 with OpenTelemetry tracing - **Containerization**: Docker Compose with hot reload +### Key Features +1. **Random Phrase Generator**: Bilingual phrase generation using AI +2. **Pronunciation Tips**: AI-powered pronunciation guidance with caching +3. **Smart Tutor Chat**: Multi-agent conversational AI for language learning +4. **Language Selection**: Target language preferences (Polish, Belarusian, Italian) +5. **Flashcard System**: Save vocabulary pairs for later review + +### Project Structure +``` +wordpan/ +├── ai/ # Python AI backend +│ ├── src/ +│ │ ├── crews/ # CrewAI agent definitions +│ │ │ ├── base/ # Shared LLM configuration +│ │ │ ├── random_phrase_crew/ +│ │ │ ├── pronunciation_tips_crew/ +│ │ │ └── chat_tutor_crew/ +│ │ └── lib/ # Shared utilities (tracer) +│ ├── run.py # Flask API server +│ └── pyproject.toml # Python dependencies +├── web/ # React frontend +│ ├── src/ +│ │ ├── pages/ # Page components +│ │ ├── components/ # UI components +│ │ ├── hooks/ # Custom React hooks +│ │ └── lib/ # Utilities and services +│ └── package.json +└── supabase/ # Database configuration + └── migrations/ # SQL migration files +``` + ## Database Operations **CRITICAL: Never run database migration commands in this project.** @@ -47,16 +78,15 @@ Only create migration files. Let the developer apply them manually. mkdir -p ai/src/crews/my_new_crew/config ``` -2. Define agents in `agents.yaml`: +2. Define agents in `config/agents.yaml`: ```yaml my_agent: role: "Agent Role" goal: "What the agent should achieve" backstory: "Agent's background and expertise" - model: "groq/llama-3.3-70b-versatile" ``` -3. Define tasks in `tasks.yaml`: +3. Define tasks in `config/tasks.yaml`: ```yaml my_task: description: "Task description with {variable}" @@ -64,32 +94,118 @@ Only create migration files. Let the developer apply them manually. agent: my_agent ``` -4. Create crew class in `crew.py`: +4. Create schemas in `schemas.py`: + ```python + from pydantic import BaseModel, Field + + class MyOutput(BaseModel): + field_name: str = Field(description="Field description") + ``` + +5. Create crew class in `crew.py` using `@CrewBase` decorator: ```python from crewai import Agent, Crew, Task, Process + from crewai.project import CrewBase, agent, crew, task + from src.crews.base.llm import DEFAULT_LLM + + @CrewBase + class MyNewCrew(): + agents: list[Agent] + tasks: list[Task] + + @agent + def my_agent(self) -> Agent: + return Agent( + config=self.agents_config['my_agent'], + llm=DEFAULT_LLM + ) - class MyNewCrew: - def __init__(self): - # Load configs, initialize agents and tasks - pass + @task + def my_task(self) -> Task: + return Task( + config=self.tasks_config['my_task'], + output_pydantic=MyOutput + ) - def run(self, inputs: dict): - crew = Crew( - agents=[self.agent], - tasks=[self.task], + @crew + def crew(self) -> Crew: + return Crew( + agents=self.agents, + tasks=self.tasks, process=Process.sequential ) - return crew.kickoff(inputs=inputs) ``` -5. Add endpoint in `ai/run.py`: +6. Add endpoint in `ai/run.py`: ```python + from src.crews.my_new_crew.crew import MyNewCrew + from src.crews.my_new_crew.schemas import MyOutput + @app.route('/api/my-endpoint', methods=['POST']) + @require_auth async def my_endpoint(): - # Initialize crew, run, return result - pass + # Get user context + user_id = request.user.id + user_context, target_language = await get_user_context(user_id) + + # Run crew + inputs = {'user_context': user_context, 'target_language': target_language} + result = await MyNewCrew().crew().kickoff_async(inputs=inputs) + + if hasattr(result, 'pydantic'): + return jsonify(result.pydantic.model_dump()), 200 + + return jsonify({"error": "Unexpected result format"}), 500 ``` +### Agent Configuration + +**LLM Provider** (`ai/src/crews/base/llm.py`): +- Default: Groq Llama 3.3 70B Versatile +- Configurable via environment variables +- LiteLLM abstraction allows easy provider switching + +**Agent Patterns**: +- Use descriptive roles and goals +- Provide detailed backstory for context +- Include expected output format in task descriptions +- Use Pydantic models for structured outputs + +**Tool Calling**: +- Define tools as Python functions +- Use CrewAI's @tool decorator +- Pass tools to agents via `tools` parameter +- Tool results are automatically integrated into agent context + +### Existing Crews + +**RandomPhraseCrew** (`ai/src/crews/random_phrase_crew/`) +- Single agent: `phrase_creator` +- Creates bilingual phrases from random word lists +- Supports Polish, Belarusian, and Italian translations +- Output: `PhraseOutput` (phrase, phrase_target_lang, target_language, words_used) + +**PronunciationTipsCrew** (`ai/src/crews/pronunciation_tips_crew/`) +- Single agent: `pronunciation_guide` +- Provides IPA transcriptions, syllable breakdowns, memory aids +- Uses session-based caching via Supabase +- Output: `PronunciationTipsOutput` (word, phonetic_transcription, syllables, pronunciation_tips, memory_aids, common_mistakes) + +**ChatTutorCrew** (`ai/src/crews/chat_tutor_crew/`) +- Three agents: `router_agent`, `translation_agent`, `vocabulary_agent` +- Routes requests to appropriate specialists +- Supports tool calling for saving vocabulary pairs +- Enforces domain boundaries (language learning only) +- Output: `TutorResponse` (response_type, content, data, tool_calls) + +### Tracing + +All crew operations are automatically traced via OpenTelemetry: +- View traces in Phoenix UI: http://localhost:6006 +- See agent reasoning chains +- Monitor LLM calls and latency +- Track token usage + ### Environment Configuration **Frontend** (`web/.env.local`): diff --git a/CLAUDE.md b/CLAUDE.md index f911f7e..c798c1d 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -2,15 +2,56 @@ ## Project Overview -Wordpan is a full-stack AI-powered web application template demonstrating modern AI agent integration with CrewAI, Supabase, and comprehensive observability using Arize Phoenix. +Wordpan is a full-stack AI-powered language learning application demonstrating modern AI agent integration with CrewAI, Supabase, and comprehensive observability using Arize Phoenix. ### Architecture -- **Frontend**: React 19.1 + TypeScript 5.9 + Vite 7 + Tailwind CSS 4 +- **Frontend**: React 19.1 + TypeScript 5.9 + Vite 7 + Tailwind CSS 4 + Radix UI - **AI Backend**: Python 3.13 + Flask + CrewAI 0.201 + LiteLLM - **Database**: PostgreSQL via Supabase with Row-Level Security (RLS) - **Observability**: Arize Phoenix 12.4.0 with OpenTelemetry tracing - **Containerization**: Docker Compose with hot reload +### Key Features +1. **Random Phrase Generator**: Bilingual phrase generation using AI +2. **Pronunciation Tips**: AI-powered pronunciation guidance with caching +3. **Smart Tutor Chat**: Multi-agent conversational AI for language learning +4. **Language Selection**: Target language preferences (Polish, Belarusian, Italian) +5. **Flashcard System**: Save vocabulary pairs for later review + +### Project Structure +``` +wordpan/ +├── README.md +├── CLAUDE.md # This file +├── AGENTS.md # Agent-specific instructions +├── docker-compose.yml # Service orchestration +├── .vscode/ # VS Code settings +├── ai/ # Python AI backend +│ ├── src/ +│ │ ├── crews/ # CrewAI agent definitions +│ │ │ ├── base/ # Shared LLM configuration +│ │ │ ├── random_phrase_crew/ +│ │ │ ├── pronunciation_tips_crew/ +│ │ │ └── chat_tutor_crew/ +│ │ └── lib/ # Shared utilities (tracer) +│ ├── run.py # Flask API server +│ ├── Dockerfile +│ └── pyproject.toml # Python dependencies +├── web/ # React frontend +│ ├── src/ +│ │ ├── pages/ # Page components +│ │ ├── components/ # UI components +│ │ ├── hooks/ # Custom React hooks +│ │ ├── lib/ # Utilities and services +│ │ ├── contexts/ # React contexts +│ │ └── layouts/ # Layout components +│ ├── Dockerfile +│ └── package.json +├── supabase/ # Database configuration +│ └── migrations/ # SQL migration files +└── phoenix/ # Observability configuration +``` + ## Database Operations **CRITICAL: Never run database migration commands in this project.** @@ -40,6 +81,64 @@ Only create migration files. Let the developer apply them manually. - Backend validates JWT before processing requests - RLS policies enforce data access control +### API Endpoints + +**Phrase Generation** +- `POST /api/random-phrase` - Generate random phrase from words + - Body: `{ "words": ["word1", "word2", ...] }` + - Returns: `{ "phrase", "phrase_target_lang", "target_language", "words_used" }` + +**Pronunciation Tips** +- `POST /api/pronunciation-tips` - Get pronunciation guidance + - Body: `{ "word": "example" }` + - Returns: `{ "word", "phonetic_transcription", "syllables", "pronunciation_tips", "memory_aids", "common_mistakes" }` + +**Chat System** +- `POST /api/chat/message` - Send message and get AI response + - Body: `{ "chat_id": "uuid" | null, "message": "text" }` + - Returns: ChatMessage with structured content +- `GET /api/chats` - List all user chats +- `POST /api/chats` - Create new chat +- `DELETE /api/chats/:id` - Delete chat +- `GET /api/chats/:id/messages` - Get chat history + +**User Profile** +- `PUT /api/profile/target-language` - Update language preference + - Body: `{ "target_language": "polish" | "belarusian" | "italian" }` + +### CrewAI Agents + +**RandomPhraseCrew** (`ai/src/crews/random_phrase_crew/`) +- `phrase_creator` - Creates bilingual phrases from random words +- Output: `PhraseOutput` with phrase, translation, and words used + +**PronunciationTipsCrew** (`ai/src/crews/pronunciation_tips_crew/`) +- `pronunciation_guide` - Provides pronunciation guidance +- Output: `PronunciationTipsOutput` with IPA, syllables, tips, and memory aids +- Uses session caching via Supabase for performance + +**ChatTutorCrew** (`ai/src/crews/chat_tutor_crew/`) +- `router_agent` - Routes requests to appropriate specialists +- `translation_agent` - Handles translation requests +- `vocabulary_agent` - Suggests new vocabulary +- Tools: `save_word_pair` - Saves vocabulary to flashcard deck +- Output: `TutorResponse` with response_type, content, and optional tool_calls + +### Database Schema + +**Tables** +- `words` - Random word pool for phrase generation +- `profiles` - User profiles with context and target_language +- `pronunciation_tips_cache` - Cached pronunciation data per user +- `chats` - Chat session metadata (title, created_at, updated_at) +- `chat_messages` - Individual messages with JSONB content +- `word_pairs` - Saved vocabulary pairs with example sentences + +**Row-Level Security (RLS)** +- All tables have RLS enabled +- Users can only access their own data +- Service role client used for profile operations to bypass RLS when needed + ### Adding New CrewAI Crews 1. Create crew directory structure: @@ -53,7 +152,6 @@ Only create migration files. Let the developer apply them manually. role: "Agent Role" goal: "What the agent should achieve" backstory: "Agent's background and expertise" - model: "groq/llama-3.3-70b-versatile" ``` 3. Define tasks in `tasks.yaml`: @@ -64,49 +162,174 @@ Only create migration files. Let the developer apply them manually. agent: my_agent ``` -4. Create crew class in `crew.py`: +4. Create schemas in `schemas.py`: + ```python + from pydantic import BaseModel, Field + + class MyOutput(BaseModel): + field_name: str = Field(description="Field description") + ``` + +5. Create crew class in `crew.py`: ```python from crewai import Agent, Crew, Task, Process + from crewai.project import CrewBase, agent, crew, task + from src.crews.base.llm import DEFAULT_LLM + + @CrewBase + class MyNewCrew(): + agents: list[Agent] + tasks: list[Task] + + @agent + def my_agent(self) -> Agent: + return Agent( + config=self.agents_config['my_agent'], + llm=DEFAULT_LLM + ) - class MyNewCrew: - def __init__(self): - # Load configs, initialize agents and tasks - pass + @task + def my_task(self) -> Task: + return Task( + config=self.tasks_config['my_task'], + output_pydantic=MyOutput + ) - def run(self, inputs: dict): - crew = Crew( - agents=[self.agent], - tasks=[self.task], + @crew + def crew(self) -> Crew: + return Crew( + agents=self.agents, + tasks=self.tasks, process=Process.sequential ) - return crew.kickoff(inputs=inputs) ``` -5. Add endpoint in `ai/run.py`: +6. Add endpoint in `ai/run.py`: ```python + from src.crews.my_new_crew.crew import MyNewCrew + from src.crews.my_new_crew.schemas import MyOutput + @app.route('/api/my-endpoint', methods=['POST']) + @require_auth async def my_endpoint(): - # Initialize crew, run, return result - pass + # Get user context + user_id = request.user.id + user_context, target_language = await get_user_context(user_id) + + # Run crew + inputs = {'user_context': user_context, 'target_language': target_language} + result = await MyNewCrew().crew().kickoff_async(inputs=inputs) + + if hasattr(result, 'pydantic'): + return jsonify(result.pydantic.model_dump()), 200 + + return jsonify({"error": "Unexpected result format"}), 500 ``` ### Environment Configuration **Frontend** (`web/.env.local`): -- `VITE_SUPABASE_URL` - Supabase project URL -- `VITE_SUPABASE_ANON_KEY` - Supabase anonymous key -- `VITE_AI_SERVICE_URL` - AI backend base URL +```bash +VITE_SUPABASE_URL=http://127.0.0.1:54321 +VITE_SUPABASE_ANON_KEY= +VITE_AI_SERVICE_URL=http://localhost:8000 +``` **Backend** (`ai/.env`): -- `GROQ_API_KEY` (or `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`) - LLM provider key -- `PHOENIX_PROJECT_NAME` - Project name in Phoenix -- `PHOENIX_COLLECTOR_ENDPOINT` - Phoenix OTLP endpoint -- `SUPABASE_URL` - Supabase URL (use `http://host.docker.internal:54321` in Docker) -- `SUPABASE_ANON_KEY` - Supabase anonymous key +```bash +GROQ_API_KEY= +PHOENIX_PROJECT_NAME=GOMANAI_WORKSHOP +PHOENIX_COLLECTOR_ENDPOINT=http://phoenix:4317/v1/traces +SUPABASE_URL=http://host.docker.internal:54321 +SUPABASE_ANON_KEY= +SUPABASE_SERVICE_ROLE_KEY= +PORT=8000 +``` **Phoenix** (`phoenix/.env`): -- `POSTGRES_HOST`, `POSTGRES_USER`, `POSTGRES_DB`, `POSTGRES_PASSWORD` -- `PHOENIX_SQL_DATABASE_URL` - Phoenix database connection string +```bash +POSTGRES_HOST=phoenix-db +POSTGRES_USER=postgres +POSTGRES_DB=postgres +POSTGRES_PASSWORD=postgres +PHOENIX_SQL_DATABASE_URL=postgresql+psycopg://postgres:postgres@phoenix-db/postgres +``` + +### Key Dependencies + +**Frontend** (package.json): +```json +{ + "react": "^19.1.1", + "typescript": "~5.9.3", + "vite": "^7.1.7", + "@supabase/supabase-js": "^2.75.0", + "@tanstack/react-query": "^5.90.21", + "react-router-dom": "^7.6.0", + "tailwindcss": "^4.1.14", + "@radix-ui/react-*": "UI component library" +} +``` + +**Backend** (pyproject.toml): +```toml +dependencies = [ + "flask[async]>=3.1.0", + "crewai==0.201.0", + "litellm", + "arize-phoenix==12.4.0", + "supabase>=2.22.0", + "openinference-instrumentation-crewai", + "openinference-instrumentation-litellm" +] +``` + +### Frontend Components + +**Pages** (`web/src/pages/`) +- `dashboard.tsx` - Main dashboard with quick actions +- `login.tsx` - User login +- `signup.tsx` - User registration +- `words.tsx` - Word list with pagination +- `random-phrase.tsx` - Random phrase generator +- `chat.tsx` - AI chat tutor interface +- `settings.tsx` - User settings + +**UI Components** (`web/src/components/`) +- `ui/` - Radix UI components (button, card, input, etc.) +- `chat/` - Chat-specific components +- `app-sidebar.tsx` - Main navigation sidebar +- `nav-main.tsx` - Navigation items +- `PronunciationTipsModal.tsx` - Pronunciation tips display + +**Hooks** (`web/src/hooks/`) +- `use-words.ts` - Word list pagination +- `use-random-phrase.ts` - Random phrase generation +- `use-chat.ts` - Chat state management +- `use-mobile-detection.ts` - Mobile device detection + +### Observability + +**Phoenix Integration** +- All AI operations traced via OpenTelemetry +- Automatic instrumentation for CrewAI and LiteLLM +- Session-based span correlation +- Performance metrics dashboard at http://localhost:6006 + +**Traced Operations** +- Random phrase generation +- Pronunciation tips generation +- Chat message processing +- Tool invocations (save_word_pair) + +**Viewing Traces** +1. Open Phoenix UI: http://localhost:6006 +2. Select a trace to view: + - Input parameters + - Agent reasoning + - LLM calls with latency + - Token usage + - Output results ## Common Tasks @@ -140,6 +363,71 @@ open http://127.0.0.1:54323 - **AI Observability**: Phoenix UI at http://localhost:6006 - **Database**: Supabase Studio at http://127.0.0.1:54323 +### Running Services + +**All Services (Recommended)** +```bash +docker compose up --build +``` + +**Individual Services** +```bash +# Frontend only +cd web && npm run dev + +# AI backend only +cd ai && uv run python run.py + +# Supabase (database & auth) +supabase start + +# Observability +docker compose up phoenix phoenix-db +``` + +### Database Operations +```bash +# Create migration (ONLY command allowed) +supabase migration new + +# Access database directly +supabase db psql + +# Open Supabase Studio (GUI) +open http://127.0.0.1:54323 + +# View database logs +supabase logs +``` + +### Debugging + +**Frontend** +- React DevTools browser extension +- Vite terminal logs for HMR updates +- Network tab for API calls +- Console for JavaScript errors +- Source maps enabled in development + +**Backend** +- VS Code debugging: Press F5 with `.vscode/launch.json` configuration +- debugpy on port 5678 +- Flask logs in terminal with traceback +- Use `logger.info()`, `logger.error()` for custom logging + +**AI Observability** +- Phoenix UI: http://localhost:6006 +- View traces for all AI operations +- Monitor LLM latency and token usage +- Inspect agent reasoning chains + +**Database** +- Supabase Studio: http://127.0.0.1:54323 +- Table editor for data inspection +- SQL editor for custom queries +- Real-time subscription monitoring +- RLS policy tester + ## Important Constraints 1. **Database Migrations**: Never run `supabase db reset` or `supabase db push` @@ -149,6 +437,28 @@ open http://127.0.0.1:54323 5. **User Context**: AI crews should incorporate user profile data 6. **LiteLLM**: Use LiteLLM for LLM provider abstraction 7. **Hot Reload**: Docker setup supports hot reload for development +8. **Order By**: Supabase Python client uses `.order(column, desc=True/asc=True)` syntax + +## Common Issues & Solutions + +**Issue**: "BaseSelectRequestBuilder.order() takes 2 positional arguments" +- **Solution**: Use named parameters: `.order("column", desc=True)` or `.order("column", asc=True)` + +**Issue**: CORS errors when calling API +- **Solution**: Ensure Authorization header is sent with JWT token +- Check `VITE_AI_SERVICE_URL` in `.env.local` + +**Issue**: "Profile not found" errors +- **Solution**: Use `supabase_admin` (service role) for profile operations +- Profile auto-creation trigger may not have fired + +**Issue**: Chat messages not saving +- **Solution**: Verify `chat_id` is passed correctly +- Check RLS policies on `chat_messages` table + +**Issue**: Phoenix traces not appearing +- **Solution**: Check `PHOENIX_COLLECTOR_ENDPOINT` uses `http://phoenix:4317` in Docker +- Verify Phoenix service is running: `docker compose ps` ## Security Notes @@ -156,3 +466,23 @@ open http://127.0.0.1:54323 - JWT tokens stored in Supabase client (localStorage) - Backend validates all tokens with Supabase before processing - Auto-created profiles via database triggers +- Service role client only used when necessary (profile operations) +- All API endpoints require valid JWT except `/health` + +## Performance Considerations + +**Caching** +- Pronunciation tips cached per user session +- React Query caches API responses (5min staleTime, 30min gcTime) +- Consider cache invalidation strategies for user data updates + +**AI Operations** +- CrewAI agents use sequential process by default +- LLM calls are the bottleneck - monitor via Phoenix +- Consider streaming responses for long-running operations +- Use `@traceable` decorator for custom span creation + +**Database** +- RLS policies add query overhead +- Index on commonly filtered columns (user_id, chat_id) +- Use Supabase PostgREST for direct data access when possible diff --git a/ai/run.py b/ai/run.py index d6cff3c..5fd9a0f 100644 --- a/ai/run.py +++ b/ai/run.py @@ -9,12 +9,14 @@ from flask_cors import CORS from supabase import create_client, Client -from crews.random_phrase_crew.crew import RandomPhraseCrew -from crews.random_phrase_crew.schemas import PhraseOutput -from crews.pronunciation_tips_crew.crew import PronunciationTipsCrew -from crews.pronunciation_tips_crew.schemas import PronunciationTipsOutput +from src.crews.random_phrase_crew.crew import RandomPhraseCrew +from src.crews.random_phrase_crew.schemas import PhraseOutput +from src.crews.pronunciation_tips_crew.crew import PronunciationTipsCrew +from src.crews.pronunciation_tips_crew.schemas import PronunciationTipsOutput +from src.crews.chat_tutor_crew.crew import ChatTutorCrew +from src.crews.chat_tutor_crew.schemas import TutorResponse, RoutingDecision -from lib.tracer import traceable +from src.lib.tracer import traceable # Configure logging logging.basicConfig( @@ -398,6 +400,500 @@ async def get_pronunciation_tips(): return jsonify({"error": f"An error occurred: {str(e)}"}), 500 +async def handle_tool_call(tool_name: str, arguments: dict, user_id: str) -> str: + """ + Handle tool invocations from the AI agent. + + Args: + tool_name: Name of the tool to call + arguments: Arguments to pass to the tool + user_id: The user's UUID + + Returns: + Result message from the tool execution + """ + if tool_name == "save_word_pair": + # Inject user_id into arguments if not already present + # This handles both manual tool calls and CrewAI agent tool calls + arguments_with_user = {**arguments, "user_id": user_id} + + source_word = arguments_with_user.get("source_word") + translated_word = arguments_with_user.get("translated_word") + context_sentence = arguments_with_user.get("context_sentence") + + if not source_word or not translated_word: + return "Error: Both source_word and translated_word are required to save a word pair." + + # Check for duplicates + existing = supabase_admin.table("word_pairs").select("*") \ + .eq("user_id", user_id) \ + .eq("source_word", source_word) \ + .eq("translated_word", translated_word) \ + .execute() + + if existing.data: + return f"'{source_word}' is already in your flashcard deck!" + + # Insert new word pair + word_pair_data = { + "user_id": user_id, + "source_word": source_word, + "translated_word": translated_word, + } + + if context_sentence: + word_pair_data["context_sentence"] = context_sentence + + supabase_admin.table("word_pairs").insert(word_pair_data).execute() + + # Return user-friendly confirmation + if context_sentence: + return f"Done! I've added '{source_word} → {translated_word}' to your flashcard deck. Example: {context_sentence}" + else: + return f"Done! I've added '{source_word} → {translated_word}' to your flashcard deck." + + return f"Unknown tool: {tool_name}" + + +@app.route("/api/chat/message", methods=["POST"]) +@require_auth +async def send_chat_message(): + """ + Send a message to the chat tutor and get an AI response. + + Request body: + { + "chat_id": "uuid", + "message": "user message" + } + + Headers: + Authorization: Bearer + + Response: + { + "id": "message_id", + "chat_id": "chat_id", + "role": "assistant", + "content": {...}, + "created_at": "timestamp" + } + """ + try: + data = request.get_json() + + if not data or "message" not in data: + return jsonify({"error": "Request body must include 'message' field"}), 400 + + message = data.get("message", "").strip() + chat_id = data.get("chat_id") + + if not message: + return jsonify({"error": "'message' cannot be empty"}), 400 + + user_id = request.user.id + + # Fetch conversation history if chat_id is provided + conversation_history = [] + if chat_id: + messages_response = supabase_admin.table("chat_messages").select("*") \ + .eq("chat_id", chat_id) \ + .limit(20) \ + .execute() + + # Reverse to get chronological order + conversation_history = list(reversed(messages_response.data)) + + # Fetch user context + user_context, target_language = await get_user_context(user_id) + + # Format conversation history for the AI + history_text = "" + for msg in conversation_history[-10:]: # Last 10 messages for context + role = msg.get("role", "user") + content = msg.get("content", {}) + if isinstance(content, dict): + content_str = content.get("content", str(content)) + else: + content_str = str(content) + history_text += f"{role.capitalize()}: {content_str}\n" + + # Run the ChatTutorCrew with conditional routing + # The crew's callback will handle routing to the appropriate specialist + # This creates a single Phoenix trace showing: router → specialist chain + inputs = { + 'user_message': message, + 'target_language': target_language if target_language else "None", + 'user_context': user_context if user_context else "", + 'conversation_history': history_text if history_text else "No previous messages", + 'user_id': user_id # Pass user_id so tools can access it + } + + crew_instance = ChatTutorCrew() + result = await crew_instance.crew().kickoff_async(inputs=inputs) + + # Extract the final response from the last executed task + if hasattr(result, 'pydantic'): + # The result could be either RoutingDecision (if declined) or TutorResponse (if specialist ran) + result_model = result.pydantic + + # Handle rejection from router + if isinstance(result_model, RoutingDecision): + if not result_model.should_respond: + tutor_response = TutorResponse( + response_type="error", + content=result_model.rejection_reason or "I can only help with language-related questions.", + data=None, + tool_calls=None + ) + else: + # Router returned a decision but no specialist was executed + # This shouldn't happen with callback routing, but handle gracefully + tutor_response = TutorResponse( + response_type="error", + content="I couldn't determine how to help with that request.", + data=None, + tool_calls=None + ) + elif isinstance(result_model, TutorResponse): + tutor_response = result_model + else: + tutor_response = TutorResponse( + response_type="text", + content=str(result_model), + data=None, + tool_calls=None + ) + else: + # Fallback for unexpected result format + tutor_response = TutorResponse( + response_type="text", + content=str(result), + data=None, + tool_calls=None + ) + + # Handle tool calls if present + if tutor_response.tool_calls: + for tool_call in tutor_response.tool_calls: + tool_result = await handle_tool_call( + tool_call.name, + tool_call.arguments, + user_id + ) + logger.info(f"Tool {tool_call.name} result: {tool_result}") + + # Save user message to database + if not chat_id: + # Create new chat if needed + chat_response = supabase_admin.table("chats").insert({ + "user_id": user_id, + "title": message[:50] + "..." if len(message) > 50 else message + }).execute() + chat_id = chat_response.data[0]["id"] + + # Save user message + supabase_admin.table("chat_messages").insert({ + "chat_id": chat_id, + "role": "user", + "content": {"content": message} + }).execute() + + # Save assistant response + response_data = supabase_admin.table("chat_messages").insert({ + "chat_id": chat_id, + "role": "assistant", + "content": tutor_response.model_dump() + }).execute() + + # Update chat's updated_at timestamp + supabase_admin.table("chats").update({ + "updated_at": "now()" + }).eq("id", chat_id).execute() + + # Return the assistant message + assistant_message = response_data.data[0] + return jsonify(assistant_message), 200 + + except Exception as e: + logger.error(f"Error sending chat message: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/chats", methods=["GET"]) +@require_auth +async def get_chats(): + """ + Get all chats for the current user. + + Headers: + Authorization: Bearer + + Response: + [ + { + "id": "uuid", + "user_id": "uuid", + "title": "chat title", + "created_at": "timestamp", + "updated_at": "timestamp" + } + ] + """ + try: + user_id = request.user.id + + response = supabase_admin.table("chats").select("*") \ + .eq("user_id", user_id) \ + .execute() + + return jsonify(response.data), 200 + + except Exception as e: + logger.error(f"Error fetching chats: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/chats", methods=["POST"]) +@require_auth +async def create_chat(): + """ + Create a new chat. + + Request body: + { + "title": "optional title" + } + + Headers: + Authorization: Bearer + + Response: + { + "id": "uuid", + "user_id": "uuid", + "title": "title or null", + "created_at": "timestamp", + "updated_at": "timestamp" + } + """ + try: + data = request.get_json() or {} + title = data.get("title") + user_id = request.user.id + + response = supabase_admin.table("chats").insert({ + "user_id": user_id, + "title": title + }).execute() + + return jsonify(response.data[0]), 201 + + except Exception as e: + logger.error(f"Error creating chat: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/chats/", methods=["DELETE"]) +@require_auth +async def delete_chat(chat_id: str): + """ + Delete a chat and all its messages. + + Headers: + Authorization: Bearer + + Response: + {"success": true} + """ + try: + user_id = request.user.id + + # Verify the chat belongs to the user + chat_response = supabase_admin.table("chats").select("*") \ + .eq("id", chat_id) \ + .eq("user_id", user_id) \ + .execute() + + if not chat_response.data: + return jsonify({"error": "Chat not found"}), 404 + + # Delete the chat (messages will be cascaded) + supabase_admin.table("chats").delete().eq("id", chat_id).execute() + + return jsonify({"success": True}), 200 + + except Exception as e: + logger.error(f"Error deleting chat: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/chats/", methods=["PUT"]) +@require_auth +async def update_chat(chat_id: str): + """ + Update a chat's title. + + Request body: + { + "title": "new chat title" + } + + Headers: + Authorization: Bearer + + Response: + { + "id": "uuid", + "user_id": "uuid", + "title": "new chat title", + "created_at": "timestamp", + "updated_at": "timestamp" + } + """ + try: + data = request.get_json() + + if not data or "title" not in data: + return jsonify({"error": "Request body must include 'title' field"}), 400 + + title = data.get("title") + user_id = request.user.id + + # Verify the chat belongs to the user + chat_response = supabase_admin.table("chats").select("*") \ + .eq("id", chat_id) \ + .eq("user_id", user_id) \ + .execute() + + if not chat_response.data: + return jsonify({"error": "Chat not found"}), 404 + + # Update the chat title + response = supabase_admin.table("chats").update({"title": title}) \ + .eq("id", chat_id) \ + .execute() + + return jsonify(response.data[0]), 200 + + except Exception as e: + logger.error(f"Error updating chat: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/chats//messages", methods=["GET"]) +@require_auth +async def get_chat_messages(chat_id: str): + """ + Get all messages for a chat. + + Headers: + Authorization: Bearer + + Response: + [ + { + "id": "uuid", + "chat_id": "uuid", + "role": "user" | "assistant" | "system", + "content": {...}, + "created_at": "timestamp" + } + ] + """ + try: + user_id = request.user.id + + # Verify the chat belongs to the user + chat_response = supabase_admin.table("chats").select("*") \ + .eq("id", chat_id) \ + .eq("user_id", user_id) \ + .execute() + + if not chat_response.data: + return jsonify({"error": "Chat not found"}), 404 + + # Fetch messages + messages_response = supabase_admin.table("chat_messages").select("*") \ + .eq("chat_id", chat_id) \ + .execute() + + return jsonify(messages_response.data), 200 + + except Exception as e: + logger.error(f"Error fetching chat messages: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/word-pairs", methods=["GET"]) +@require_auth +async def get_word_pairs(): + """ + Get all word pairs for the current user. + + Headers: + Authorization: Bearer + + Response: + [ + { + "id": "uuid", + "user_id": "uuid", + "source_word": "word", + "translated_word": "translation", + "context_sentence": "example", + "created_at": "timestamp" + } + ] + """ + try: + user_id = request.user.id + + response = supabase_admin.table("word_pairs").select("*") \ + .eq("user_id", user_id) \ + .execute() + + return jsonify(response.data), 200 + + except Exception as e: + logger.error(f"Error fetching word pairs: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + +@app.route("/api/word-pairs/", methods=["DELETE"]) +@require_auth +async def delete_word_pair(word_pair_id: str): + """ + Delete a word pair. + + Headers: + Authorization: Bearer + + Response: + {"success": true} + """ + try: + user_id = request.user.id + + # Verify the word pair belongs to the user + word_pair_response = supabase_admin.table("word_pairs").select("*") \ + .eq("id", word_pair_id) \ + .eq("user_id", user_id) \ + .execute() + + if not word_pair_response.data: + return jsonify({"error": "Word pair not found"}), 404 + + # Delete the word pair + supabase_admin.table("word_pairs").delete().eq("id", word_pair_id).execute() + + return jsonify({"success": True}), 200 + + except Exception as e: + logger.error(f"Error deleting word pair: {e}", exc_info=True) + return jsonify({"error": f"An error occurred: {str(e)}"}), 500 + + if __name__ == "__main__": # Run the Flask app port = int(os.getenv("PORT", 8000)) diff --git a/ai/src/crews/chat_tutor_crew/__init__.py b/ai/src/crews/chat_tutor_crew/__init__.py new file mode 100644 index 0000000..ff8211d --- /dev/null +++ b/ai/src/crews/chat_tutor_crew/__init__.py @@ -0,0 +1,3 @@ +from .crew import ChatTutorCrew + +__all__ = ['ChatTutorCrew'] diff --git a/ai/src/crews/chat_tutor_crew/config/agents.yaml b/ai/src/crews/chat_tutor_crew/config/agents.yaml new file mode 100644 index 0000000..8446d3f --- /dev/null +++ b/ai/src/crews/chat_tutor_crew/config/agents.yaml @@ -0,0 +1,39 @@ +router_agent: + role: > + Language Tutor Router + goal: > + Understand the user's intent and route to the appropriate specialist + or respond directly to general language learning questions + backstory: > + You are an intelligent router for a language learning platform. You excel at + understanding user intent and determining the best way to help them. You can + recognize when users need translations, vocabulary suggestions, or general + language guidance. You only respond to language-related questions and politely + redirect off-topic conversations back to language learning. When users ask + to save words or would benefit from saving vocabulary they've learned, you + include a tool_call to save_word_pair in your response. + +translation_agent: + role: > + Translation Specialist + goal: > + Provide accurate translations and language explanations + backstory: > + You are a skilled translator and language educator with expertise in multiple + languages. You provide clear, accurate translations with helpful context about + grammar, usage, and cultural nuances. You understand the challenges of language + learners and explain concepts in an accessible way. After providing translations, + you often offer to save the word pair to the user's flashcard deck using the + save_word_pair tool so they can practice later. + +vocabulary_agent: + role: > + Vocabulary Curator + goal: > + Suggest new vocabulary words with context and examples + backstory: > + You are a vocabulary expert who selects words based on the user's target language + and proficiency level. You provide words that are practical, useful, and come + with clear translations and example sentences. You help users build their + vocabulary systematically by using the save_word_pair tool to add suggested + words to their flashcard deck. diff --git a/ai/src/crews/chat_tutor_crew/config/tasks.yaml b/ai/src/crews/chat_tutor_crew/config/tasks.yaml new file mode 100644 index 0000000..b25fca3 --- /dev/null +++ b/ai/src/crews/chat_tutor_crew/config/tasks.yaml @@ -0,0 +1,97 @@ +route_task: + description: > + Analyze the user's message and determine which specialized agent should handle it. + + User message: {user_message} + Target language: {target_language} + User context: {user_context} + + Conversation history: + {conversation_history} + + Your task: + 1. Understand what the user is asking for + 2. Check if this is a language-learning related request + 3. If it's NOT language-related, set should_respond to false and set rejection_reason + 4. If it IS language-related, determine which agent should handle it: + - Translation requests: Set agent to "translation" + - Vocabulary requests: Set agent to "vocabulary" + - Grammar questions: Set agent to "translation" (can handle explanations) + - General help: Set agent to "translation" + 5. Set up the delegation parameters for the chosen agent + + Important boundaries: + - Only respond to language-learning questions + - Politely decline off-topic requests (e.g., weather, news, general knowledge) + - When declining, suggest a language-related alternative + expected_output: > + A JSON object with the following structure: + { + "should_respond": true/false, + "agent": "translation" | "vocabulary" | null, + "rejection_reason": "reason if declining" | null, + "user_request": "original request summary", + "context_for_agent": "detailed context for the specialized agent" + } + agent: router_agent + +translation_task: + description: > + Handle a translation request from the user. + + User request: {user_request} + Target language: {target_language} + User context: {user_context} + + Your task: + 1. Provide the translation with accurate spelling and characters + 2. Include helpful context about grammar, usage, or cultural nuances + 3. Offer to save the word pair to the user's flashcard deck + 4. If appropriate, use the save_word_pair tool to save the word + + You have access to the save_word_pair tool which can save word pairs + to the user's flashcard deck. Use it when the user might want to remember + the translation. + + IMPORTANT: You must return your response as a JSON object with: + - response_type: MUST be one of "text", "word_suggestion", "save_confirmation", or "error" + For translation responses, use "text" + - content: Your response text + - tool_calls: Optional list of tool calls (e.g., save_word_pair) + expected_output: > + A JSON object with response_type (use "text" for translations), content, and optional tool_calls. + agent: translation_agent + depends_on: route_task + context_mapping: + user_request: "{route_task.user_request}" + target_language: "{target_language}" + user_context: "{user_context}" + +vocabulary_task: + description: > + Suggest a new vocabulary word for the user to learn. + + User request: {user_request} + Target language: {target_language} + User context: {user_context} + + Your task: + 1. Select an appropriate vocabulary word based on the target language + 2. Provide the word, its translation, and an example sentence + 3. Make the word practical and useful for language learners + 4. Include a tool_call for "save_word_pair" to add the word to their deck + + You have access to the save_word_pair tool which can save word pairs + to the user's flashcard deck. Always use it for vocabulary suggestions. + + Return your response as a JSON object with response_type="word_suggestion", + data containing word/translation/example, and tool_calls for saving the word. + expected_output: > + A JSON object with response_type="word_suggestion", data with word details, + and tool_calls for saving the word to the flashcard deck. + agent: vocabulary_agent + depends_on: route_task + context_mapping: + user_request: "{route_task.user_request}" + target_language: "{target_language}" + user_context: "{user_context}" diff --git a/ai/src/crews/chat_tutor_crew/crew.py b/ai/src/crews/chat_tutor_crew/crew.py new file mode 100644 index 0000000..9b85811 --- /dev/null +++ b/ai/src/crews/chat_tutor_crew/crew.py @@ -0,0 +1,107 @@ +from crewai import Agent, Crew, Process, Task, Callback +from crewai.project import CrewBase, agent, crew, task +from crewai.agents.agent_builder.base_agent import BaseAgent +from typing import List, Optional, Dict, Any, Union +from src.crews.base.llm import DEFAULT_LLM +from src.crews.chat_tutor_crew.schemas import TutorResponse, RoutingDecision +from src.crews.chat_tutor_crew.tools import save_word_pair_tool + + +@CrewBase +class ChatTutorCrew(): + """Crew for handling chat interactions with the language tutor + + This crew uses a conditional routing pattern where the router agent + determines which specialist agent should handle the request: + 1. Router Agent (route_task) - Analyzes user message and determines intent + 2. Translation Agent (translation_task) - Handles translations and grammar + 3. Vocabulary Agent (vocabulary_task) - Suggests new vocabulary words + + Phoenix traces will show the complete chain: router → specialist + in a single trace with clear delegation. + """ + agents: List[BaseAgent] + tasks: List[Task] + + @agent + def router_agent(self) -> Agent: + return Agent( + config=self.agents_config['router_agent'], + llm=DEFAULT_LLM + ) + + @agent + def translation_agent(self) -> Agent: + return Agent( + config=self.agents_config['translation_agent'], + llm=DEFAULT_LLM, + tools=[save_word_pair_tool] + ) + + @agent + def vocabulary_agent(self) -> Agent: + return Agent( + config=self.agents_config['vocabulary_agent'], + llm=DEFAULT_LLM, + tools=[save_word_pair_tool] + ) + + @task + def route_task(self) -> Task: + return Task( + config=self.tasks_config['route_task'], + output_pydantic=RoutingDecision, + callback=self._route_callback + ) + + @task + def translation_task(self) -> Task: + return Task( + config=self.tasks_config['translation_task'], + output_pydantic=TutorResponse, + agent=self.translation_agent + ) + + @task + def vocabulary_task(self) -> Task: + return Task( + config=self.tasks_config['vocabulary_task'], + output_pydantic=TutorResponse, + agent=self.vocabulary_agent + ) + + def _route_callback(self, output: Union[str, RoutingDecision]) -> List[Task]: + """ + Callback function that determines which specialist task to run next + based on the router's decision. + + This is the key to the conditional routing pattern - it allows the router + to dynamically delegate to the appropriate specialist. + """ + # Parse the routing decision + if isinstance(output, str): + import json + output = json.loads(output) + + # If router declined, return no further tasks + if not output.get('should_respond', False): + return [] + + # Route to the appropriate specialist + agent = output.get('agent') + + if agent == 'translation': + return [self.translation_task()] + elif agent == 'vocabulary': + return [self.vocabulary_task()] + else: + # No specific agent specified - default to translation + return [self.translation_task()] + + @crew + def crew(self) -> Crew: + return Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential + ) diff --git a/ai/src/crews/chat_tutor_crew/schemas.py b/ai/src/crews/chat_tutor_crew/schemas.py new file mode 100644 index 0000000..b2749cf --- /dev/null +++ b/ai/src/crews/chat_tutor_crew/schemas.py @@ -0,0 +1,39 @@ +from pydantic import BaseModel, Field +from typing import Optional, Literal, List, Dict, Any + + +class ToolCall(BaseModel): + """Represents a tool call from the AI agent""" + name: str = Field(description="Name of the tool to call (e.g., 'save_word_pair')") + arguments: Dict[str, Any] = Field(description="Arguments to pass to the tool") + + +class TutorResponse(BaseModel): + """Structured response from the chat tutor""" + response_type: Literal["text", "word_suggestion", "save_confirmation", "error"] = Field( + description="Type of response - determines UI rendering" + ) + content: str = Field(description="The main text content of the response") + data: Optional[Dict[str, Any]] = Field( + None, + description="Additional data for special response types (word, translation, example for word_suggestion)" + ) + tool_calls: Optional[List[ToolCall]] = Field( + None, + description="List of tool calls the agent wants to make" + ) + + +class RoutingDecision(BaseModel): + """Output from the router agent indicating which specialist should handle the request""" + should_respond: bool = Field(description="Whether the request is language-related and should be handled") + agent: Optional[Literal["translation", "vocabulary"]] = Field( + None, + description="Which specialist agent should handle the request" + ) + rejection_reason: Optional[str] = Field( + None, + description="Reason for declining if should_respond is false" + ) + user_request: str = Field(description="Summary of the user's original request") + context_for_agent: str = Field(description="Detailed context for the specialized agent") diff --git a/ai/src/crews/chat_tutor_crew/tools.py b/ai/src/crews/chat_tutor_crew/tools.py new file mode 100644 index 0000000..4b0ae9e --- /dev/null +++ b/ai/src/crews/chat_tutor_crew/tools.py @@ -0,0 +1,68 @@ +""" +CrewAI tools for the Chat Tutor Crew. + +This module defines tools that agents can use to take actions with side effects, +such as saving word pairs to the user's flashcard deck. + +Note: When CrewAI agents call these tools, user_id may not be automatically injected. +The tool handling layer (in run.py) is responsible for injecting user_id into tool calls. +""" + +import logging +from typing import Optional +from crewai.tools import tool +from supabase import create_client +import os + +logger = logging.getLogger(__name__) + +# Initialize Supabase client for tool operations +SUPABASE_URL = os.getenv("SUPABASE_URL", "http://127.0.0.1:54321") +SUPABASE_SERVICE_ROLE_KEY = os.getenv("SUPABASE_SERVICE_ROLE_KEY", "") +supabase_admin = create_client(SUPABASE_URL, SUPABASE_SERVICE_ROLE_KEY) + + +@tool("save_word_pair") +def save_word_pair_tool( + source_word: str, + translated_word: str, + context_sentence: Optional[str] = None +) -> str: + """ + Save a word and its translation to the user's personal flashcard deck for future practice. + + This tool should be used when: + - A user asks to save a word they just learned + - After providing a translation, if the user wants to remember it + - When suggesting new vocabulary that the user might want to practice + + Args: + source_word: The word in the user's native language (e.g., "thank you") + translated_word: The translation in the target language (e.g., "dziękuję") + context_sentence: An example sentence using the word (optional) + + Returns: + A confirmation message with details about the saved word pair. + + Example: + save_word_pair_tool( + source_word="thank you", + translated_word="dziękuję", + context_sentence="Dziękuję bardzo za pomoc!" + ) + Returns: "Done! I've added 'thank you → dziękuję' to your flashcard deck." + + Note: user_id is injected automatically by the system and should not be provided by the agent. + """ + # Validate required parameters + if not source_word or not translated_word: + return "Error: Both source_word and translated_word are required to save a word pair." + + # Return a structured response indicating that the tool should be called + # The actual database operation will be handled by the tool handler in run.py + # This allows the system to inject user_id and handle the operation securely + return f"Tool call received for save_word_pair: {source_word} -> {translated_word}" + + +# Export all tools for CrewAI integration +__all__ = ["save_word_pair_tool"] diff --git a/ai/src/crews/pronunciation_tips_crew/config/agents.yaml b/ai/src/crews/pronunciation_tips_crew/config/agents.yaml index 31ad69e..28bdc43 100644 --- a/ai/src/crews/pronunciation_tips_crew/config/agents.yaml +++ b/ai/src/crews/pronunciation_tips_crew/config/agents.yaml @@ -6,4 +6,3 @@ pronunciation_expert: language learners master pronunciation by providing IPA transcriptions, syllable breakdowns, practical tips, and memory aids. You understand common pronunciation challenges that non-native speakers face. - model: "groq/llama-3.3-70b-versatile" diff --git a/supabase/migrations/20250215000000_create_chat_tables.sql b/supabase/migrations/20250215000000_create_chat_tables.sql new file mode 100644 index 0000000..fe27ed8 --- /dev/null +++ b/supabase/migrations/20250215000000_create_chat_tables.sql @@ -0,0 +1,86 @@ +-- Chat tables for Smart Tutor Chat feature +-- These tables store chat sessions and messages between users and the AI tutor + +-- Create chats table +create table public.chats ( + id uuid default gen_random_uuid() primary key, + user_id uuid references auth.users(id) on delete cascade not null, + title text, + created_at timestamp with time zone default timezone('utc'::text, now()) not null, + updated_at timestamp with time zone default timezone('utc'::text, now()) not null +); + +-- Enable RLS on chats +alter table public.chats enable row level security; + +-- RLS policies for chats +create policy "Users can view their own chats" + on public.chats for select using (auth.uid() = user_id); + +create policy "Users can insert their own chats" + on public.chats for insert with check (auth.uid() = user_id); + +create policy "Users can update their own chats" + on public.chats for update using (auth.uid() = user_id); + +create policy "Users can delete their own chats" + on public.chats for delete using (auth.uid() = user_id); + +-- Create chat_messages table +create table public.chat_messages ( + id uuid default gen_random_uuid() primary key, + chat_id uuid references public.chats(id) on delete cascade not null, + role text not null check (role in ('user', 'assistant', 'system')), + content jsonb not null, + created_at timestamp with time zone default timezone('utc'::text, now()) not null +); + +-- Enable RLS on chat_messages +alter table public.chat_messages enable row level security; + +-- RLS policies for chat_messages +create policy "Users can view messages in their chats" + on public.chat_messages for select + using (exists ( + select 1 from public.chats where chats.id = chat_messages.chat_id and chats.user_id = auth.uid() + )); + +create policy "Users can insert messages in their chats" + on public.chat_messages for insert + with check (exists ( + select 1 from public.chats where chats.id = chat_messages.chat_id and chats.user_id = auth.uid() + )); + +-- Create indexes for faster queries +create index idx_chats_user_id_updated_at on public.chats(user_id, updated_at desc); +create index idx_chat_messages_chat_id_created_at on public.chat_messages(chat_id, created_at); + +-- Create word_pairs table for flashcard deck +create table public.word_pairs ( + id uuid default gen_random_uuid() primary key, + user_id uuid references auth.users(id) on delete cascade not null, + source_word text not null, + translated_word text not null, + context_sentence text, + created_at timestamp with time zone default timezone('utc'::text, now()) not null, + unique(user_id, source_word, translated_word) +); + +-- Enable RLS on word_pairs +alter table public.word_pairs enable row level security; + +-- RLS policies for word_pairs +create policy "Users can view their own word pairs" + on public.word_pairs for select using (auth.uid() = user_id); + +create policy "Users can insert their own word pairs" + on public.word_pairs for insert with check (auth.uid() = user_id); + +create policy "Users can update their own word pairs" + on public.word_pairs for update using (auth.uid() = user_id); + +create policy "Users can delete their own word pairs" + on public.word_pairs for delete using (auth.uid() = user_id); + +-- Create index for faster word pair queries +create index idx_word_pairs_user_id on public.word_pairs(user_id); diff --git a/web/.env.example b/web/.env.example index 6c4dea0..aa0d5f8 100644 --- a/web/.env.example +++ b/web/.env.example @@ -1,3 +1,8 @@ +# For local development (Vite dev server running on host) VITE_SUPABASE_URL=http://127.0.0.1:54321 VITE_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0 VITE_AI_SERVICE_URL=http://localhost:8000 + +# For Docker development (uncomment if running web container in Docker) +# VITE_SUPABASE_URL=http://host.docker.internal:54321 +# VITE_AI_SERVICE_URL=http://host.docker.internal:8000 diff --git a/web/Dockerfile b/web/Dockerfile index 6fac758..4214338 100644 --- a/web/Dockerfile +++ b/web/Dockerfile @@ -13,7 +13,8 @@ COPY package.json pnpm-lock.yaml* ./ FROM base AS development # Install all dependencies (including devDependencies) -RUN pnpm install --frozen-lockfile +# Use --no-frozen-lockfile for development to allow adding new dependencies +RUN pnpm install --no-frozen-lockfile # Copy source code COPY . . @@ -27,7 +28,7 @@ CMD ["pnpm", "run", "dev", "--host", "0.0.0.0"] FROM base AS builder # Install all dependencies -RUN pnpm install --frozen-lockfile +RUN pnpm install --no-frozen-lockfile # Copy source code COPY . . diff --git a/web/package.json b/web/package.json index 19675ad..8e749cc 100644 --- a/web/package.json +++ b/web/package.json @@ -19,6 +19,7 @@ "@radix-ui/react-dialog": "^1.1.15", "@radix-ui/react-dropdown-menu": "^2.1.16", "@radix-ui/react-label": "^2.1.7", + "@radix-ui/react-scroll-area": "^1.2.2", "@radix-ui/react-select": "^2.2.6", "@radix-ui/react-separator": "^1.1.7", "@radix-ui/react-slot": "^1.2.3", diff --git a/web/src/App.tsx b/web/src/App.tsx index 08534fd..3b80859 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -9,6 +9,8 @@ import SignUpPage from './pages/signup' import WordsPage from './pages/words' import RandomPhrasePage from './pages/random-phrase' import SettingsPage from './pages/settings' +import ChatPage from './pages/chat' +import FlashcardsPage from './pages/flashcards' const queryClient = new QueryClient({ defaultOptions: { @@ -37,6 +39,8 @@ function App() { } /> } /> } /> + } /> + } /> } /> diff --git a/web/src/components/app-sidebar.tsx b/web/src/components/app-sidebar.tsx index 2a4521d..568235c 100644 --- a/web/src/components/app-sidebar.tsx +++ b/web/src/components/app-sidebar.tsx @@ -4,6 +4,8 @@ import { IconDatabase, IconInnerShadowTop, IconSparkles, + IconMessage, + IconCards, } from "@tabler/icons-react" import { NavDocuments } from "@/components/nav-documents" @@ -44,6 +46,16 @@ const data = { url: "/random-phrase", icon: IconSparkles, }, + { + name: "Chat", + url: "/chat", + icon: IconMessage, + }, + { + name: "Flashcards", + url: "/flashcards", + icon: IconCards, + }, ], } diff --git a/web/src/components/chat/chat-input.tsx b/web/src/components/chat/chat-input.tsx new file mode 100644 index 0000000..0b26d1b --- /dev/null +++ b/web/src/components/chat/chat-input.tsx @@ -0,0 +1,60 @@ +import { useState, useRef, useEffect } from 'react' +import { IconSend } from '@tabler/icons-react' +import { Button } from '@/components/ui/button' +import { Textarea } from '@/components/ui/textarea' + +interface ChatInputProps { + onSend: (message: string) => void + disabled?: boolean +} + +export function ChatInput({ onSend, disabled = false }: ChatInputProps) { + const [message, setMessage] = useState('') + const textareaRef = useRef(null) + + const handleSend = () => { + const trimmed = message.trim() + if (trimmed && !disabled) { + onSend(trimmed) + setMessage('') + } + } + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault() + handleSend() + } + } + + // Auto-focus on mount + useEffect(() => { + textareaRef.current?.focus() + }, []) + + return ( +
+
+