diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..2bf6ca23 --- /dev/null +++ b/.env.example @@ -0,0 +1,9 @@ +# Agent Configuration +AGENT_BASE_URL=http://127.0.0.1:3500 +AGENT_ID=agent_9ccc5545e93644bd9d7954e632a55a61 + +# Alternative: You can also set the full URL instead of BASE_URL + ID +# AGENT_FULL_URL=http://127.0.0.1:3500/agent_9ccc5545e93644bd9d7954e632a55a61 + +# Next.js Environment +NEXTJS_ENV=development \ No newline at end of file diff --git a/.github/workflows/sync-docs-full.yml b/.github/workflows/sync-docs-full.yml index 59f719d8..30c0f083 100644 --- a/.github/workflows/sync-docs-full.yml +++ b/.github/workflows/sync-docs-full.yml @@ -12,8 +12,7 @@ jobs: - name: Collect and validate files run: | set -euo pipefail - ./bin/collect-all-files.sh | \ - ./bin/validate-files.sh > all-files.txt + ./bin/collect-all-files.sh > all-files.txt echo "Files to sync:" cat all-files.txt diff --git a/.github/workflows/sync-docs.yml b/.github/workflows/sync-docs.yml index 9e28e839..110fff02 100644 --- a/.github/workflows/sync-docs.yml +++ b/.github/workflows/sync-docs.yml @@ -19,8 +19,7 @@ jobs: run: | set -euo pipefail git fetch origin ${{ github.event.before }} - ./bin/collect-changed-files.sh "${{ github.event.before }}" "${{ github.sha }}" | \ - ./bin/validate-files.sh > changed-files.txt + ./bin/collect-changed-files.sh "${{ github.event.before }}" "${{ github.sha }}" > changed-files.txt echo "Files to sync:" cat changed-files.txt diff --git a/.gitignore b/.gitignore index fa8246e7..d984131e 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,8 @@ yarn-error.log* # others .env*.local +.env.local +.env.production .vercel next-env.d.ts .open-next diff --git a/README.md b/README.md index 48a765c3..038932e0 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,30 @@ This project contains the Agentuity documentation website, created using Fumadocs and running on NextJS 15. -## Running +To make the search feature work, you must set up `.env.local` with the following steps. + +## Quick Start Guide + +1. **Navigate to the Agent Directory:** + ```bash + cd agent-docs + ``` + +2. **Start the Agent:** + ```bash + agentuity dev + ``` + +3. **Copy Environment Configuration:** + For local development, copy the `.env.example` file to `.env.local`: + ```bash + cp .env.example .env.local + ``` + +4. **Update `AGENT_ID`:** + If you are a contributor from outside the Agentuity organization, ensure that you update the `AGENT_ID` in your `.env.local` file with your specific agent ID from the `agentuity dev` run. + +## Running Docs Application ```bash npm run dev diff --git a/config.ts b/agent-docs/config.ts similarity index 100% rename from config.ts rename to agent-docs/config.ts diff --git a/agent-docs/src/agents/doc-processing/docs-orchestrator.ts b/agent-docs/src/agents/doc-processing/docs-orchestrator.ts index 3cdefbb7..c78d8682 100644 --- a/agent-docs/src/agents/doc-processing/docs-orchestrator.ts +++ b/agent-docs/src/agents/doc-processing/docs-orchestrator.ts @@ -1,6 +1,6 @@ import type { AgentContext } from '@agentuity/sdk'; import { processDoc } from './docs-processor'; -import { VECTOR_STORE_NAME } from '../../../../config'; +import { VECTOR_STORE_NAME } from '../../../config'; import type { SyncPayload, SyncStats } from './types'; /** diff --git a/agent-docs/src/agents/doc-qa/index.ts b/agent-docs/src/agents/doc-qa/index.ts index 54351f89..d49cef91 100644 --- a/agent-docs/src/agents/doc-qa/index.ts +++ b/agent-docs/src/agents/doc-qa/index.ts @@ -1,122 +1,29 @@ import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; -import { streamText } from 'ai'; -import { openai } from '@ai-sdk/openai'; - -import type { ChunkMetadata } from '../doc-processing/types'; -import { VECTOR_STORE_NAME, vectorSearchNumber } from '../../../../config'; -import type { RelevantDoc } from './types'; +import answerQuestion from './rag'; export default async function Agent( req: AgentRequest, resp: AgentResponse, ctx: AgentContext ) { - const prompt = await req.data.text(); - const relevantDocs = await retrieveRelevantDocs(ctx, prompt); - - const systemPrompt = ` -You are a developer documentation assistant. Your job is to answer user questions about the Agentuity platform as effectively and concisely as possible, adapting your style to the user's request. If the user asks for a direct answer, provide it without extra explanation. If they want an explanation, provide a clear and concise one. Use only the provided relevant documents to answer. - -You must not make up answers if the provided documents don't exist. You can be direct to the user that the documentations -don't seem to include what they are looking for. Lying to the user is prohibited as it only slows them down. Feel free to -suggest follow up questions if what they're asking for don't seem to have an answer in the document. You can provide them -a few related things that the documents contain that may interest them. - -For every answer, return a valid JSON object with: - 1. "answer": your answer to the user's question. - 2. "documents": an array of strings, representing the path of the documents you used to answer. - -If you use information from a document, include it in the "documents" array. If you do not use any documents, return an empty array for "documents". - -User question: -\`\`\` -${prompt} -\`\`\` + let jsonRequest: any = null; + let prompt: string; -Relevant documents: -${JSON.stringify(relevantDocs, null, 2)} - -Respond ONLY with a valid JSON object as described above. In your answer, you should format code blocks properly in Markdown style if the user needs answer in code block. -`.trim(); - - const llmResponse = await streamText({ - model: openai('gpt-4o'), - system: systemPrompt, - prompt: prompt, - maxTokens: 2048, - }); - - return resp.stream(llmResponse.textStream); -} - -async function retrieveRelevantDocs(ctx: AgentContext, prompt: string): Promise { - const dbQuery = { - query: prompt, - limit: vectorSearchNumber - } try { - - - const vectors = await ctx.vector.search(VECTOR_STORE_NAME, dbQuery); - - const uniquePaths = new Set(); - - vectors.forEach(vec => { - if (!vec.metadata) { - ctx.logger.warn('Vector missing metadata'); - return; - } - const path = typeof vec.metadata.path === 'string' ? vec.metadata.path : undefined; - if (!path) { - ctx.logger.warn('Vector metadata path is not a string'); - return; - } - uniquePaths.add(path); - }); - - const docs = await Promise.all( - Array.from(uniquePaths).map(async path => ({ - path, - content: await retrieveDocumentBasedOnPath(ctx, path) - })) - ); - - return docs; - } catch (err) { - ctx.logger.error('Error retrieving relevant docs: %o', err); - return []; - } -} - -async function retrieveDocumentBasedOnPath(ctx: AgentContext, path: string): Promise { - const dbQuery = { - query: ' ', - limit: 10000, - metadata: { - path: path + jsonRequest = await req.data.json(); + if (typeof jsonRequest === 'object' && jsonRequest !== null && 'message' in jsonRequest) { + prompt = String(jsonRequest.message || ''); + } else { + prompt = JSON.stringify(jsonRequest); } + } catch { + prompt = await req.data.text(); } - try { - const vectors = await ctx.vector.search(VECTOR_STORE_NAME, dbQuery); - - // Sort vectors by chunk index and concatenate text - const sortedVectors = vectors - .map(vec => { - const metadata = vec.metadata as ChunkMetadata; - return { - metadata, - index: metadata.chunkIndex - }; - }) - .sort((a, b) => a.index - b.index); - const fullText = sortedVectors - .map(vec => vec.metadata.text) - .join('\n\n'); - - return fullText; - } catch (err) { - ctx.logger.error('Error retrieving document by path %s: %o', path, err); - return ''; + if (!prompt.trim()) { + return resp.text("How can I help you?"); } + + const answer = await answerQuestion(ctx, prompt); + return resp.json(answer); } \ No newline at end of file diff --git a/agent-docs/src/agents/doc-qa/prompt.ts b/agent-docs/src/agents/doc-qa/prompt.ts new file mode 100644 index 00000000..178fea60 --- /dev/null +++ b/agent-docs/src/agents/doc-qa/prompt.ts @@ -0,0 +1,60 @@ +import type { AgentContext } from '@agentuity/sdk'; +import { generateObject } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import type { PromptType } from './types'; +import { PromptClassificationSchema } from './types'; + +/** + * Determines the prompt type based on the input string using LLM classification. + * Uses specific, measurable criteria to decide between Normal and Agentic RAG. + * @param ctx - Agent Context for logging and LLM access + * @param input - The input string to analyze + * @returns {Promise} - The determined PromptType + */ +export async function getPromptType(ctx: AgentContext, input: string): Promise { + const systemPrompt = ` +You are a query classifier that determines whether a user question requires simple retrieval (Normal) or complex reasoning (Thinking). + +Use these SPECIFIC criteria for classification: + +**THINKING (Agentic RAG) indicators:** +- Multi-step reasoning required (e.g., "compare and contrast", "analyze pros/cons") +- Synthesis across multiple concepts (e.g., "how does X relate to Y") +- Scenario analysis (e.g., "what would happen if...", "when should I use...") +- Troubleshooting/debugging questions requiring logical deduction +- Questions with explicit reasoning requests ("explain why", "walk me through") +- Comparative analysis ("which is better for...", "what are the trade-offs") + +**NORMAL (Simple RAG) indicators:** +- Direct factual lookups (e.g., "what is...", "how do I install...") +- Simple how-to questions with clear answers +- API reference queries +- Configuration/syntax questions +- Single-concept definitions + +Respond with a JSON object containing: +- type: "Normal" or "Thinking" +- confidence: 0.0-1.0 (how certain you are) +- reasoning: brief explanation of your classification + +Be conservative - when in doubt, default to "Normal" for better performance.`; + + try { + const result = await generateObject({ + model: openai('gpt-4o-mini'), // Use faster model for classification + system: systemPrompt, + prompt: `Classify this user query: "${input}"`, + schema: PromptClassificationSchema, + maxTokens: 200, + }); + + ctx.logger.info('Prompt classified as %s (confidence: %f): %s', + result.object.type, result.object.confidence, result.object.reasoning); + + return result.object.type as PromptType; + + } catch (error) { + ctx.logger.error('Error classifying prompt, defaulting to Normal: %o', error); + return 'Normal' as PromptType; + } +} diff --git a/agent-docs/src/agents/doc-qa/rag.ts b/agent-docs/src/agents/doc-qa/rag.ts new file mode 100644 index 00000000..42805791 --- /dev/null +++ b/agent-docs/src/agents/doc-qa/rag.ts @@ -0,0 +1,115 @@ +import type { AgentContext } from '@agentuity/sdk'; +import { generateObject } from 'ai'; +import { openai } from '@ai-sdk/openai'; + +import { retrieveRelevantDocs } from './retriever'; +import { AnswerSchema } from './types'; +import type { Answer } from './types'; + +export default async function answerQuestion(ctx: AgentContext, prompt: string) { + const relevantDocs = await retrieveRelevantDocs(ctx, prompt); + + const systemPrompt = ` +You are Agentuity's developer-documentation assistant. + +=== RULES === +1. Use ONLY the content inside tags to craft your reply. If the required information is missing, state that the docs do not cover it. +2. Never fabricate or guess undocumented details. +3. Ambiguity handling: + • When contains more than one distinct workflow or context that could satisfy the question, do **not** choose for the user. + • Briefly (≤ 2 sentences each) summarise each plausible interpretation and ask **one** clarifying question so the user can pick a path. + • Provide a definitive answer only after the ambiguity is resolved. +4. Answer style: + • If the question can be answered unambiguously from a single workflow, give a short, direct answer. + • Add an explanation only when the user explicitly asks for one. + • Format your response in **MDX (Markdown Extended)** format with proper syntax highlighting for code blocks. + • Use appropriate headings (##, ###) to structure longer responses. + • Wrap CLI commands in \`\`\`bash code blocks for proper syntax highlighting. + • Wrap code snippets in appropriate language blocks (e.g., \`\`\`typescript, \`\`\`json, \`\`\`javascript). + • Use **bold** for important terms and *italic* for emphasis when appropriate. + • Use > blockquotes for important notes or warnings. +5. You may suggest concise follow-up questions or related topics that are present in . +6. Keep a neutral, factual tone. + +=== OUTPUT FORMAT === +Return **valid JSON only** matching this TypeScript type: + +type LlmAnswer = { + answer: string; // The reply in MDX format or the clarifying question + documents: string[]; // Paths of documents actually cited +} + +The "answer" field should contain properly formatted MDX content that will render beautifully in a documentation site. +The "documents" field must contain the path to the documents you used to answer the question. On top of the path, you may include a specific heading of the document so that the navigation will take the user to the exact point of the document you reference. To format the heading, use the following convention: append the heading to the path using a hash symbol (#) followed by the heading text, replacing spaces with hyphens (-) and converting all characters to lowercase. If there are multiple identical headings, append an index to the heading in the format -index (e.g., #example-3 for the third occurrence of "Example"). For example, if the document path is "/docs/guide" and the heading is "Getting Started", the formatted path would be "/docs/guide#getting-started". +If you cited no documents, return an empty array. Do NOT wrap the JSON in Markdown or add any extra keys. + +=== MDX FORMATTING EXAMPLES === +For CLI commands: +\`\`\`bash +agentuity agent create my-agent "My agent description" bearer +\`\`\` + +For code examples: +\`\`\`typescript +import type { AgentRequest, AgentResponse, AgentContext } from "@agentuity/sdk"; + +export default async function Agent(req: AgentRequest, resp: AgentResponse, ctx: AgentContext) { + return resp.json({hello: 'world'}); +} +\`\`\` + +For structured responses: +## Creating a New Agent + +To create a new agent, use the CLI command: + +\`\`\`bash +agentuity agent create [name] [description] [auth_type] +\`\`\` + +**Parameters:** +- \`name\`: The agent name +- \`description\`: Agent description +- \`auth_type\`: Either \`bearer\` or \`none\` + +> **Note**: This command will create the agent in the Agentuity Cloud and set up local files. + + +${prompt} + + + +${JSON.stringify(relevantDocs, null, 2)} + +`; + + try { + const result = await generateObject({ + model: openai('gpt-4o'), + system: systemPrompt, + prompt: prompt, + schema: AnswerSchema, + maxTokens: 2048, + }); + return result.object; + } catch (error) { + ctx.logger.error('Error generating answer: %o', error); + + // Fallback response with MDX formatting + const fallbackAnswer: Answer = { + answer: `## Error + +I apologize, but I encountered an error while processing your question. + +**Please try:** +- Rephrasing your question +- Being more specific about what you're looking for +- Checking if your question relates to Agentuity's documented features + +> If the problem persists, please contact support.`, + documents: [] + }; + + return fallbackAnswer; + } +} \ No newline at end of file diff --git a/agent-docs/src/agents/doc-qa/retriever.ts b/agent-docs/src/agents/doc-qa/retriever.ts new file mode 100644 index 00000000..3c2b210d --- /dev/null +++ b/agent-docs/src/agents/doc-qa/retriever.ts @@ -0,0 +1,80 @@ +import type { AgentContext } from '@agentuity/sdk'; + +import type { ChunkMetadata } from '../doc-processing/types'; +import { VECTOR_STORE_NAME, vectorSearchNumber } from '../../../config'; +import type { RelevantDoc } from './types'; + +export async function retrieveRelevantDocs(ctx: AgentContext, prompt: string): Promise { + const dbQuery = { + query: prompt, + limit: vectorSearchNumber + } + try { + const vectors = await ctx.vector.search(VECTOR_STORE_NAME, dbQuery); + + const uniquePaths = new Set(); + + vectors.forEach(vec => { + if (!vec.metadata) { + ctx.logger.warn('Vector missing metadata'); + return; + } + const path = typeof vec.metadata.path === 'string' ? vec.metadata.path : undefined; + if (!path) { + ctx.logger.warn('Vector metadata path is not a string'); + return; + } + uniquePaths.add(path); + }); + + const docs = await Promise.all( + Array.from(uniquePaths).map(async path => ({ + path, + content: await retrieveDocumentBasedOnPath(ctx, path) + })) + ); + + return docs; + } catch (err) { + ctx.logger.error('Error retrieving relevant docs: %o', err); + return []; + } + } + + async function retrieveDocumentBasedOnPath(ctx: AgentContext, path: string): Promise { + const dbQuery = { + query: ' ', + limit: 1000, + metadata: { + path: path + } + } + try { + const vectors = await ctx.vector.search(VECTOR_STORE_NAME, dbQuery); + + // Sort vectors by chunk index and concatenate text + const sortedVectors = vectors + .map(vec => { + const metadata = vec.metadata; + if (!metadata || typeof metadata.chunkIndex !== 'number' || typeof metadata.text !== 'string') { + ctx.logger.warn('Invalid chunk metadata structure for path %s', path); + return null; + } + return { + metadata, + index: metadata.chunkIndex as number + }; + }) + .filter(item => item !== null) + .sort((a, b) => a.index - b.index); + + const fullText = sortedVectors + .map(vec => vec.metadata.text) + .join('\n\n'); + + return fullText; + } catch (err) { + ctx.logger.error('Error retrieving document by path %s: %o', path, err); + return ''; + } + } \ No newline at end of file diff --git a/agent-docs/src/agents/doc-qa/types.ts b/agent-docs/src/agents/doc-qa/types.ts index 9fa227ff..e1d989d4 100644 --- a/agent-docs/src/agents/doc-qa/types.ts +++ b/agent-docs/src/agents/doc-qa/types.ts @@ -1,5 +1,25 @@ -export interface RelevantDoc { - path: string; - content: string; - } - \ No newline at end of file +import { z } from 'zod'; + +export const RelevantDocSchema = z.object({ + path: z.string(), + content: z.string() +}); + +export const AnswerSchema = z.object({ + answer: z.string(), + documents: z.array(z.string()) +}); + +export const PromptTypeSchema = z.enum(['Normal', 'Thinking']); + +export const PromptClassificationSchema = z.object({ + type: PromptTypeSchema, + confidence: z.number().min(0).max(1), + reasoning: z.string() +}); + +// Generated TypeScript types +export type RelevantDoc = z.infer; +export type Answer = z.infer; +export type PromptType = z.infer; +export type PromptClassification = z.infer; \ No newline at end of file diff --git a/app/api/search/route.ts b/app/api/search/route.ts index df889626..eea69788 100644 --- a/app/api/search/route.ts +++ b/app/api/search/route.ts @@ -1,4 +1,177 @@ import { source } from '@/lib/source'; import { createFromSource } from 'fumadocs-core/search/server'; +import { NextRequest } from 'next/server'; +import { getAgentConfig } from '@/lib/env'; -export const { GET } = createFromSource(source); +// Create the default search handler +const { GET: defaultSearchHandler } = createFromSource(source); + +function documentPathToUrl(docPath: string): string { + // Remove the .md or .mdx extension before any # symbol + return '/' + docPath.replace(/\.mdx?(?=#|$)/, ''); +} + +// Helper function to get document title and description from source +function getDocumentMetadata(docPath: string): { title: string; description?: string } { + try { + const urlPath = documentPathToUrl(docPath).substring(1).split('/'); + const page = source.getPage(urlPath); + + if (page?.data) { + return { + title: page.data.title || formatPathAsTitle(docPath), + description: page.data.description + }; + } + } catch (error) { + console.warn(`Failed to get metadata for ${docPath}:`, error); + } + + return { title: formatPathAsTitle(docPath) }; +} + +function formatPathAsTitle(docPath: string): string { + return docPath + .replace(/\.mdx?$/, '') + .split('/') + .map(segment => segment.charAt(0).toUpperCase() + segment.slice(1)) + .join(' > '); +} + +function getDocumentSnippet(docPath: string, maxLength: number = 150): string { + try { + const urlPath = documentPathToUrl(docPath).substring(1).split('/'); + const page = source.getPage(urlPath); + + if (page?.data.description) { + return page.data.description.length > maxLength + ? page.data.description.substring(0, maxLength) + '...' + : page.data.description; + } + + // Fallback description based on path + const pathParts = docPath.replace(/\.mdx?$/, '').split('/'); + const section = pathParts[0]; + const topic = pathParts[pathParts.length - 1]; + + return `Learn about ${topic} in the ${section} section of our documentation.`; + } catch { + return `Documentation for ${formatPathAsTitle(docPath)}`; + } +} + +export async function GET(request: NextRequest) { + const { searchParams } = new URL(request.url); + const query = searchParams.get('query'); + + // If no query, return empty results + if (!query || query.trim().length === 0) { + return Response.json([]); + } + + try { + const agentConfig = getAgentConfig(); + + // Prepare headers + const headers: Record = { + 'Content-Type': 'application/json', + }; + + // Add bearer token if provided + if (agentConfig.bearerToken) { + headers['Authorization'] = `Bearer ${agentConfig.bearerToken}`; + } + + const response = await fetch(agentConfig.url, { + method: 'POST', + headers, + body: JSON.stringify({ message: query }), + }); + + if (!response.ok) { + throw new Error(`Agent API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json(); + const results = []; + + if (data?.answer?.trim()) { + results.push({ + id: `ai-answer-${Date.now()}`, + url: '#ai-answer', + title: 'AI Answer', + content: data.answer.trim(), + type: 'ai-answer' + }); + } + + // 2. Add related documents as clickable results + if (data.documents && Array.isArray(data.documents) && data.documents.length > 0) { + const uniqueDocuments = [...new Set(data.documents as string[])]; + + uniqueDocuments.forEach((docPath: string, index: number) => { + try { + const url = documentPathToUrl(docPath); + const metadata = getDocumentMetadata(docPath); + const snippet = getDocumentSnippet(docPath); + + results.push({ + id: `doc-${Date.now()}-${index}`, + url: url, + title: metadata.title, + content: snippet, + type: 'document' + }); + } catch (error) { + console.warn(`Failed to process document ${docPath}:`, error); + } + }); + } + + console.log('Returning results:', results.length, 'items'); + return Response.json(results); + + } catch (error) { + console.error('Error calling AI agent:', error); + + // Fallback to original Fumadocs search behavior if AI fails + console.log('Falling back to default search'); + try { + const fallbackResponse = await defaultSearchHandler(request); + const fallbackData = await fallbackResponse.json(); + + // Add a note that this is fallback search + if (Array.isArray(fallbackData) && fallbackData.length > 0) { + return Response.json([ + { + id: 'fallback-notice', + url: '#fallback', + title: '⚠️ AI Search Unavailable', + content: 'AI search is temporarily unavailable. Showing traditional search results below.', + type: 'ai-answer' + }, + ...fallbackData.map((item: Record, index: number) => ({ + ...item, + id: `fallback-${index}`, + type: 'document' + })) + ]); + } + + return fallbackResponse; + } catch (fallbackError) { + console.error('Fallback search also failed:', fallbackError); + + // Return error message as AI answer + return Response.json([ + { + id: 'error-notice', + url: '#error', + title: '❌ Search Error', + content: 'Search is temporarily unavailable. Please try again later or check our documentation directly.', + type: 'ai-answer' + } + ]); + } + } +} diff --git a/app/layout.tsx b/app/layout.tsx index 3fea5030..3dc1264c 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -2,8 +2,15 @@ import { RootProvider } from "fumadocs-ui/provider"; import { GeistSans } from "geist/font/sans"; import type { ReactNode } from "react"; import type { Metadata } from 'next'; +import CustomSearchDialog from "@/components/CustomSearchDialog"; +import { validateEnv } from "@/lib/env"; import "./global.css"; +// Validate environment variables at startup (server-side only) +if (typeof window === 'undefined') { + validateEnv(); +} + export const metadata: Metadata = { metadataBase: new URL('https://www.agentuity.dev'), title: 'Agentuity Docs', @@ -82,7 +89,12 @@ export default function Layout({ children }: { children: ReactNode }) { return ( - + {children} diff --git a/bin/validate-files.sh b/bin/validate-files.sh deleted file mode 100755 index 6752d38d..00000000 --- a/bin/validate-files.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# validate-files.sh -# Reads file paths from stdin, validates they exist and are safe -# Outputs only valid file paths - -echo "Validating file paths" >&2 - -valid_count=0 -invalid_count=0 - -# Read all input into an array first -mapfile -t files - -for file in "${files[@]}"; do - # Skip empty lines - if [ -z "$file" ]; then - continue - fi - - # Handle REMOVED: prefix - if [[ "$file" == REMOVED:* ]]; then - echo "$file" - ((valid_count++)) - continue - fi - - # Security check: prevent path traversal - if [[ "$file" == *".."* ]] || [[ "$file" == "/"* ]]; then - echo "Warning: Unsafe path detected, skipping: $file" >&2 - ((invalid_count++)) - continue - fi - - # Check if file exists - if [ -f "content/$file" ]; then - echo "$file" - echo " ✓ $file" >&2 - ((valid_count++)) - else - echo "Warning: File not found, skipping: $file" >&2 - ((invalid_count++)) - fi -done - -echo "Validation complete: $valid_count valid, $invalid_count invalid" >&2 - -# Exit with error if no valid files -if [ "$valid_count" -eq 0 ]; then - echo "Error: No valid files found" >&2 - exit 1 -fi \ No newline at end of file diff --git a/components/CustomSearchDialog.tsx b/components/CustomSearchDialog.tsx new file mode 100644 index 00000000..cb7443e5 --- /dev/null +++ b/components/CustomSearchDialog.tsx @@ -0,0 +1,4 @@ +'use client'; + +import { default as CustomSearchDialogImpl } from './CustomSearchDialog/index'; +export default CustomSearchDialogImpl; \ No newline at end of file diff --git a/components/CustomSearchDialog/MessageList.tsx b/components/CustomSearchDialog/MessageList.tsx new file mode 100644 index 00000000..3f1cd9c1 --- /dev/null +++ b/components/CustomSearchDialog/MessageList.tsx @@ -0,0 +1,128 @@ +'use client'; + +import { useRef, useEffect } from 'react'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; +import { User, HelpCircle, Loader2 } from 'lucide-react'; +import { AgentuityLogo } from '../icons/AgentuityLogo'; +import { MessageListProps, Message } from './types'; + +export function MessageList({ messages, loading, handleSourceClick }: MessageListProps) { + const messagesEndRef = useRef(null); + + // Auto-scroll to bottom when new messages arrive + useEffect(() => { + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); + }, [messages]); + + return ( +
+ {messages.length === 0 && } + + {messages.map((message) => ( + + ))} + + {loading && } + +
+
+ ); +} + +function EmptyState() { + return ( +
+
+ +
+

+ Ask a question +

+

+ Search our documentation or ask about Agentuity features +

+
+ ); +} + +function LoadingIndicator() { + return ( +
+
+ +
+
+
+ + Searching... +
+
+
+ ); +} + +interface MessageItemProps { + message: Message; + handleSourceClick: (url: string) => void; +} + +function MessageItem({ message, handleSourceClick }: MessageItemProps) { + return ( +
+ {message.type === 'ai' && ( +
+ +
+ )} + +
+
+ {message.type === 'ai' ? ( +
+ + {message.content} + +
+ ) : ( +

{message.content}

+ )} +
+ + {/* Sources for AI messages */} + {message.type === 'ai' && message.sources && message.sources.length > 0 && ( +
+

Related:

+ {message.sources.map((source) => ( + + ))} +
+ )} + +
+ {message.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} +
+
+ + {message.type === 'user' && ( +
+ +
+ )} +
+ ); +} \ No newline at end of file diff --git a/components/CustomSearchDialog/SearchInput.tsx b/components/CustomSearchDialog/SearchInput.tsx new file mode 100644 index 00000000..72aaa9bc --- /dev/null +++ b/components/CustomSearchDialog/SearchInput.tsx @@ -0,0 +1,64 @@ +'use client'; + +import { useRef, useEffect, KeyboardEvent } from 'react'; +import { Send } from 'lucide-react'; +import { SearchInputProps } from './types'; + +export function SearchInput({ currentInput, setCurrentInput, loading, sendMessage }: SearchInputProps) { + const textareaRef = useRef(null); + + useEffect(() => { + const textarea = textareaRef.current; + if (!textarea) return; + + textarea.style.height = 'auto'; + + const newHeight = Math.min(textarea.scrollHeight, 150); // Max height of 150px + textarea.style.height = `${newHeight}px`; + }, [currentInput]); + + useEffect(() => { + textareaRef.current?.focus(); + }, []); + + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Enter' && (!e.shiftKey || e.ctrlKey || e.metaKey)) { + e.preventDefault(); + if (currentInput.trim()) { + sendMessage(currentInput); + } + } + }; + + return ( +
+
+
+