diff --git a/apps/docs/content/docs/chat.mdx b/apps/docs/content/docs/chat.mdx
index a546a57..d244973 100644
--- a/apps/docs/content/docs/chat.mdx
+++ b/apps/docs/content/docs/chat.mdx
@@ -85,6 +85,89 @@ Tool renderers receive the tool result `data` as props. Return any React compone
---
+## Compound Components
+
+For full layout control, use the compound component pattern:
+
+### Basic Structure
+
+```tsx
+import { CopilotChat } from '@yourgpt/copilot-sdk/ui';
+
+
+ {/* Custom home screen */}
+
+
Welcome!
+
+
+
+
+ {/* Chat view uses default UI */}
+
+
+```
+
+### Available Components
+
+| Component | Description |
+|-----------|-------------|
+| `CopilotChat.Root` | Root container (alias for CopilotChat) |
+| `CopilotChat.HomeView` | Shows when no messages (home screen) |
+| `CopilotChat.ChatView` | Shows when there are messages |
+| `CopilotChat.Header` | Header slot (view-specific when nested) |
+| `CopilotChat.Footer` | Footer slot |
+| `CopilotChat.Input` | Auto-connected input |
+| `CopilotChat.Suggestions` | Suggestion buttons |
+| `CopilotChat.BackButton` | Starts new chat (requires persistence) |
+| `CopilotChat.ThreadPicker` | Thread switcher (requires persistence) |
+
+### View-Specific Headers
+
+Place `Header` inside `ChatView` to show it only in chat view:
+
+```tsx
+
+ {/* Home - no header */}
+
+
Welcome!
+
+
+
+ {/* Chat - header with navigation */}
+
+
+
+ Conversation
+
+
+ {/* Default messages + input render automatically */}
+
+
+```
+
+### Custom BackButton
+
+```tsx
+
+ ← Back to Home
+
+```
+
+### Context Hook
+
+Access chat context in custom components:
+
+```tsx
+import { useCopilotChatContext } from '@yourgpt/copilot-sdk/ui';
+
+function CustomComponent() {
+ const { view, send, isLoading } = useCopilotChatContext();
+ return ;
+}
+```
+
+---
+
## Build Your Own Chat
Use hooks for full control:
diff --git a/apps/docs/content/docs/customizations.mdx b/apps/docs/content/docs/customizations.mdx
index 7d1438a..ec6847e 100644
--- a/apps/docs/content/docs/customizations.mdx
+++ b/apps/docs/content/docs/customizations.mdx
@@ -144,6 +144,13 @@ The SDK exposes semantic CSS classes for advanced theme customization:
| `csdk-button-attach` | Attachment button |
| `csdk-followup` | Follow-up container |
| `csdk-followup-button` | Follow-up buttons |
+| `csdk-chat-header` | Compound Header slot |
+| `csdk-chat-footer` | Compound Footer slot |
+| `csdk-chat-home-view` | Home view container |
+| `csdk-chat-view` | Chat view container |
+| `csdk-back-button` | Back/New chat button |
+| `csdk-compound-input` | Compound Input wrapper |
+| `csdk-compound-suggestions` | Compound Suggestions wrapper |
### Example: Custom Theme with Component Styles
diff --git a/apps/docs/content/docs/server.mdx b/apps/docs/content/docs/server.mdx
index 5d12720..a8fd2dc 100644
--- a/apps/docs/content/docs/server.mdx
+++ b/apps/docs/content/docs/server.mdx
@@ -39,9 +39,9 @@ The Copilot SDK frontend connects to your backend API endpoint. Your server:
### Response
-The SDK supports two response formats:
+The SDK supports three response formats:
-
+
Simple text streaming for basic chat (no tools).
@@ -67,11 +67,29 @@ The SDK supports two response formats:
Use `result.toDataStreamResponse()` to return this format.
+
+ Complete response in a single JSON object. Use for batch processing, logging, or simpler integrations.
+
+ **Content-Type:** `application/json`
+
+ ```json
+ {
+ "text": "Hello! How can I help you today?",
+ "usage": {
+ "promptTokens": 10,
+ "completionTokens": 8,
+ "totalTokens": 18
+ }
+ }
+ ```
+
+ Use `generateText()` or `runtime.chat()` to return this format.
+
---
-## Framework Examples
+## Framework Examples (Streaming)
@@ -161,57 +179,347 @@ The SDK supports two response formats:
---
-## With Tools
+## Framework Examples (Non-Streaming)
-Add tools to let the AI call functions on your server:
+For use cases where you need the complete response before returning (batch processing, logging, simpler integration), use the non-streaming approach.
-```ts title="app/api/chat/route.ts"
-import { streamText, tool } from '@yourgpt/llm-sdk';
-import { openai } from '@yourgpt/llm-sdk/openai';
-import { z } from 'zod';
+### Response Format
-export async function POST(req: Request) {
- const { messages } = await req.json();
-
- const result = await streamText({
- model: openai('gpt-4o'),
- system: 'You are a helpful assistant.',
- messages,
- tools: {
- getWeather: tool({
- description: 'Get current weather for a city',
- parameters: z.object({
- city: z.string().describe('City name'),
- }),
- execute: async ({ city }) => {
- const data = await fetchWeatherAPI(city);
- return { temperature: data.temp, condition: data.condition };
- },
- }),
- searchProducts: tool({
- description: 'Search the product database',
- parameters: z.object({
- query: z.string(),
- limit: z.number().optional().default(10),
- }),
- execute: async ({ query, limit }) => {
- return await db.products.search(query, limit);
- },
- }),
- },
- maxSteps: 5,
- });
+**Content-Type:** `application/json`
- return result.toDataStreamResponse();
+```json
+{
+ "text": "Hello! How can I help you today?",
+ "usage": {
+ "promptTokens": 10,
+ "completionTokens": 8,
+ "totalTokens": 18
+ }
}
```
+### Using generateText
+
+
+
+ ```ts title="app/api/chat/route.ts"
+ import { generateText } from '@yourgpt/llm-sdk';
+ import { openai } from '@yourgpt/llm-sdk/openai';
+
+ export async function POST(req: Request) {
+ const { messages } = await req.json();
+
+ const result = await generateText({
+ model: openai('gpt-4o'),
+ system: 'You are a helpful assistant.',
+ messages,
+ });
+
+ return Response.json({
+ text: result.text,
+ usage: result.usage,
+ });
+ }
+ ```
+
+
+ ```ts title="server.ts"
+ import express from 'express';
+ import cors from 'cors';
+ import { generateText } from '@yourgpt/llm-sdk';
+ import { openai } from '@yourgpt/llm-sdk/openai';
+
+ const app = express();
+ app.use(cors());
+ app.use(express.json());
+
+ app.post('/api/chat', async (req, res) => {
+ const { messages } = req.body;
+
+ const result = await generateText({
+ model: openai('gpt-4o'),
+ system: 'You are a helpful assistant.',
+ messages,
+ });
+
+ res.json({
+ text: result.text,
+ usage: result.usage,
+ });
+ });
+
+ app.listen(3001, () => console.log('Server on http://localhost:3001'));
+ ```
+
+
+ ```ts title="server.ts"
+ import { createServer } from 'http';
+ import { generateText } from '@yourgpt/llm-sdk';
+ import { openai } from '@yourgpt/llm-sdk/openai';
+
+ createServer(async (req, res) => {
+ if (req.method === 'POST' && req.url === '/api/chat') {
+ const body = await getBody(req);
+ const { messages } = JSON.parse(body);
+
+ const result = await generateText({
+ model: openai('gpt-4o'),
+ system: 'You are a helpful assistant.',
+ messages,
+ });
+
+ res.writeHead(200, { 'Content-Type': 'application/json' });
+ res.end(JSON.stringify({
+ text: result.text,
+ usage: result.usage,
+ }));
+ }
+ }).listen(3001);
+
+ function getBody(req: any): Promise {
+ return new Promise((resolve) => {
+ let data = '';
+ req.on('data', (chunk: any) => data += chunk);
+ req.on('end', () => resolve(data));
+ });
+ }
+ ```
+
+
+
+### Using Runtime chat()
+
+The runtime also provides a `chat()` method for non-streaming:
+
+
+
+ ```ts title="app/api/chat/route.ts"
+ import { createRuntime } from '@yourgpt/llm-sdk';
+ import { createOpenAI } from '@yourgpt/llm-sdk/openai';
+
+ const runtime = createRuntime({
+ provider: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
+ model: 'gpt-4o',
+ systemPrompt: 'You are a helpful assistant.',
+ });
+
+ export async function POST(req: Request) {
+ const body = await req.json();
+
+ const { text, messages, toolCalls } = await runtime.chat(body);
+
+ return Response.json({
+ text,
+ messages,
+ toolCalls,
+ });
+ }
+ ```
+
+
+ ```ts title="server.ts"
+ import express from 'express';
+ import cors from 'cors';
+ import { createRuntime } from '@yourgpt/llm-sdk';
+ import { createOpenAI } from '@yourgpt/llm-sdk/openai';
+
+ const app = express();
+ app.use(cors());
+ app.use(express.json());
+
+ const runtime = createRuntime({
+ provider: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
+ model: 'gpt-4o',
+ systemPrompt: 'You are a helpful assistant.',
+ });
+
+ app.post('/api/chat', async (req, res) => {
+ const { text, messages, toolCalls } = await runtime.chat(req.body);
+
+ res.json({
+ text,
+ messages,
+ toolCalls,
+ });
+ });
+
+ app.listen(3001, () => console.log('Server on http://localhost:3001'));
+ ```
+
+
+ ```ts title="server.ts"
+ import { createServer } from 'http';
+ import { createRuntime } from '@yourgpt/llm-sdk';
+ import { createOpenAI } from '@yourgpt/llm-sdk/openai';
+
+ const runtime = createRuntime({
+ provider: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
+ model: 'gpt-4o',
+ systemPrompt: 'You are a helpful assistant.',
+ });
+
+ createServer(async (req, res) => {
+ if (req.method === 'POST' && req.url === '/api/chat') {
+ const body = await getBody(req);
+ const chatRequest = JSON.parse(body);
+
+ const { text, messages, toolCalls } = await runtime.chat(chatRequest);
+
+ res.writeHead(200, { 'Content-Type': 'application/json' });
+ res.end(JSON.stringify({ text, messages, toolCalls }));
+ }
+ }).listen(3001);
+
+ function getBody(req: any): Promise {
+ return new Promise((resolve) => {
+ let data = '';
+ req.on('data', (chunk: any) => data += chunk);
+ req.on('end', () => resolve(data));
+ });
+ }
+ ```
+
+
+
+### Using stream().collect()
+
+You can also collect a stream into a single response:
+
+```ts
+app.post('/api/chat', async (req, res) => {
+ const { text, messages, toolCalls } = await runtime.stream(req.body).collect();
+
+ res.json({ text, messages, toolCalls });
+});
+```
+
-Use `toDataStreamResponse()` when using tools to stream structured events including tool calls and results.
+**When to use non-streaming:**
+- Background processing or batch operations
+- When you need the full response before taking action
+- Simpler integration without SSE handling
+- Logging or analytics that need complete responses
---
+## With Tools
+
+Add tools to let the AI call functions on your server:
+
+
+
+ ```ts title="app/api/chat/route.ts"
+ import { streamText, tool } from '@yourgpt/llm-sdk';
+ import { openai } from '@yourgpt/llm-sdk/openai';
+ import { z } from 'zod';
+
+ export async function POST(req: Request) {
+ const { messages } = await req.json();
+
+ const result = await streamText({
+ model: openai('gpt-4o'),
+ system: 'You are a helpful assistant.',
+ messages,
+ tools: {
+ getWeather: tool({
+ description: 'Get current weather for a city',
+ parameters: z.object({
+ city: z.string().describe('City name'),
+ }),
+ execute: async ({ city }) => {
+ const data = await fetchWeatherAPI(city);
+ return { temperature: data.temp, condition: data.condition };
+ },
+ }),
+ searchProducts: tool({
+ description: 'Search the product database',
+ parameters: z.object({
+ query: z.string(),
+ limit: z.number().optional().default(10),
+ }),
+ execute: async ({ query, limit }) => {
+ return await db.products.search(query, limit);
+ },
+ }),
+ },
+ maxSteps: 5,
+ });
+
+ return result.toDataStreamResponse();
+ }
+ ```
+
+
+ Use `toDataStreamResponse()` when using tools to stream structured events including tool calls and results.
+
+
+
+ ```ts title="app/api/chat/route.ts"
+ import { generateText, tool } from '@yourgpt/llm-sdk';
+ import { openai } from '@yourgpt/llm-sdk/openai';
+ import { z } from 'zod';
+
+ export async function POST(req: Request) {
+ const { messages } = await req.json();
+
+ const result = await generateText({
+ model: openai('gpt-4o'),
+ system: 'You are a helpful assistant.',
+ messages,
+ tools: {
+ getWeather: tool({
+ description: 'Get current weather for a city',
+ parameters: z.object({
+ city: z.string().describe('City name'),
+ }),
+ execute: async ({ city }) => {
+ const data = await fetchWeatherAPI(city);
+ return { temperature: data.temp, condition: data.condition };
+ },
+ }),
+ searchProducts: tool({
+ description: 'Search the product database',
+ parameters: z.object({
+ query: z.string(),
+ limit: z.number().optional().default(10),
+ }),
+ execute: async ({ query, limit }) => {
+ return await db.products.search(query, limit);
+ },
+ }),
+ },
+ maxSteps: 5,
+ });
+
+ return Response.json({
+ text: result.text,
+ toolCalls: result.toolCalls,
+ toolResults: result.toolResults,
+ usage: result.usage,
+ });
+ }
+ ```
+
+ The response includes all tool calls and results:
+
+ ```json
+ {
+ "text": "The weather in Tokyo is 22°C and sunny.",
+ "toolCalls": [
+ { "id": "call_123", "name": "getWeather", "args": { "city": "Tokyo" } }
+ ],
+ "toolResults": [
+ { "toolCallId": "call_123", "result": { "temperature": 22, "condition": "sunny" } }
+ ],
+ "usage": { "promptTokens": 50, "completionTokens": 25, "totalTokens": 75 }
+ }
+ ```
+
+
+
+---
+
## Runtime API (Advanced)
For more control over the server, use `createRuntime()` instead of `streamText()`:
diff --git a/examples/nextjs-demo/app/compound-test/page.tsx b/examples/nextjs-demo/app/compound-test/page.tsx
new file mode 100644
index 0000000..6641594
--- /dev/null
+++ b/examples/nextjs-demo/app/compound-test/page.tsx
@@ -0,0 +1,235 @@
+"use client";
+
+import { CopilotProvider } from "@yourgpt/copilot-sdk/react";
+import { CopilotChat, useCopilotChatContext } from "@yourgpt/copilot-sdk/ui";
+import {
+ Sparkles,
+ BarChart3,
+ PenLine,
+ Lightbulb,
+ Code2,
+ Bot,
+ MessageSquare,
+ ChevronLeft,
+} from "lucide-react";
+
+// Suggestion card component
+function SuggestionCard({
+ icon,
+ title,
+ description,
+ message,
+}: {
+ icon: React.ReactNode;
+ title: string;
+ description: string;
+ message: string;
+}) {
+ const { send } = useCopilotChatContext();
+
+ return (
+
+ );
+}
+
+// Wrapper for suggestion cards (needs context)
+function SuggestionCards() {
+ return (
+
+ }
+ title="Analyze my data"
+ description="Get insights from your files"
+ message="Help me analyze my data and get insights"
+ />
+ }
+ title="Help me write"
+ description="Draft emails, documents, and more"
+ message="Help me write a professional email"
+ />
+ }
+ title="Brainstorm ideas"
+ description="Creative solutions for any challenge"
+ message="Help me brainstorm ideas for my project"
+ />
+ }
+ title="Write some code"
+ description="Build features, fix bugs, explain code"
+ message="Help me write code for a new feature"
+ />
+
+ );
+}
diff --git a/examples/nextjs-demo/app/page.tsx b/examples/nextjs-demo/app/page.tsx
index ce58135..14645a6 100644
--- a/examples/nextjs-demo/app/page.tsx
+++ b/examples/nextjs-demo/app/page.tsx
@@ -11,6 +11,11 @@ const demos = [
href: "/providers",
description: "OpenAI, Anthropic, Google side-by-side",
},
+ {
+ name: "Compound Components",
+ href: "/compound-test",
+ description: "Custom home screen with Chat.Home, Chat.Input",
+ },
{
name: "Ticketing Demo",
href: "/ticketing-demo",
diff --git a/packages/copilot-sdk/package.json b/packages/copilot-sdk/package.json
index d0e1227..5e7f285 100644
--- a/packages/copilot-sdk/package.json
+++ b/packages/copilot-sdk/package.json
@@ -1,6 +1,6 @@
{
"name": "@yourgpt/copilot-sdk",
- "version": "1.3.0",
+ "version": "1.4.1",
"description": "Build AI copilots with app context awareness",
"type": "module",
"exports": {
diff --git a/packages/copilot-sdk/src/react/provider/CopilotProvider.tsx b/packages/copilot-sdk/src/react/provider/CopilotProvider.tsx
index 5725775..2e1b306 100644
--- a/packages/copilot-sdk/src/react/provider/CopilotProvider.tsx
+++ b/packages/copilot-sdk/src/react/provider/CopilotProvider.tsx
@@ -62,6 +62,8 @@ export interface CopilotProviderProps {
onError?: (error: Error) => void;
/** Enable/disable streaming (default: true) */
streaming?: boolean;
+ /** Custom headers to send with each request */
+ headers?: Record;
/** Enable debug logging */
debug?: boolean;
/** Max tool execution iterations (default: 20) */
@@ -150,6 +152,7 @@ export function CopilotProvider({
onMessagesChange,
onError,
streaming,
+ headers,
debug = false,
maxIterations,
maxIterationsMessage,
@@ -207,6 +210,7 @@ export function CopilotProvider({
threadId,
initialMessages: uiInitialMessages,
streaming,
+ headers,
debug,
maxIterations,
maxIterationsMessage,
diff --git a/packages/copilot-sdk/src/ui/components/composed/chat/chat.tsx b/packages/copilot-sdk/src/ui/components/composed/chat/chat.tsx
index 1710762..0ce0c1f 100644
--- a/packages/copilot-sdk/src/ui/components/composed/chat/chat.tsx
+++ b/packages/copilot-sdk/src/ui/components/composed/chat/chat.tsx
@@ -1,6 +1,13 @@
"use client";
-import React, { useState, useCallback, useRef, useId } from "react";
+import React, {
+ useState,
+ useCallback,
+ useRef,
+ useId,
+ createContext,
+ useContext,
+} from "react";
import { cn } from "../../../lib/utils";
import {
ChatContainerRoot,
@@ -17,14 +24,420 @@ import {
} from "../../ui/prompt-input";
import { Loader } from "../../ui/loader";
import { Button } from "../../ui/button";
-import { StopIcon, PlusIcon, ArrowUpIcon, XIcon } from "../../icons";
+import {
+ StopIcon,
+ PlusIcon,
+ ArrowUpIcon,
+ XIcon,
+ ChevronLeftIcon,
+} from "../../icons";
import CopilotSDKLogo from "../../icons/copilot-sdk-logo";
import { ChatHeader } from "./chat-header";
import { Suggestions } from "./suggestions";
import { DefaultMessage } from "./default-message";
import { ChatWelcome } from "./chat-welcome";
-import type { ChatProps, PendingAttachment } from "./types";
+import type { ChatProps, PendingAttachment, MessageAttachment } from "./types";
import type { ToolExecutionData } from "../tools";
+import type { Thread } from "../../../../core/types/thread";
+import { ThreadPicker, type ThreadPickerProps } from "../../ui/thread-picker";
+
+// ============================================================================
+// Internal Context for Compound Components
+// ============================================================================
+
+interface CopilotChatInternalContext {
+ view: "home" | "chat";
+ send: (message: string, attachments?: MessageAttachment[]) => void;
+ isLoading: boolean;
+ onStop?: () => void;
+ attachmentsEnabled: boolean;
+ placeholder: string;
+ // Thread management
+ onNewChat?: () => void;
+ threads?: Thread[];
+ currentThreadId?: string | null;
+ onSwitchThread?: (id: string) => void;
+ onDeleteThread?: (id: string) => void;
+ isThreadBusy?: boolean;
+}
+
+const CopilotChatContext = createContext(
+ null,
+);
+
+/**
+ * Hook to access CopilotChat internal context.
+ * Must be used within CopilotChat compound components.
+ */
+export const useCopilotChatContext = () => {
+ const ctx = useContext(CopilotChatContext);
+ if (!ctx) {
+ throw new Error(
+ "useCopilotChatContext must be used within CopilotChat. " +
+ "Make sure you're using CopilotChat.Home, CopilotChat.Input, etc. inside ",
+ );
+ }
+ return ctx;
+};
+
+// ============================================================================
+// Compound Components
+// ============================================================================
+
+/**
+ * HomeView slot - renders only when there are no messages (home view).
+ * Use this to create a custom welcome/home screen.
+ */
+export interface HomeViewProps {
+ children: React.ReactNode;
+ className?: string;
+}
+
+function HomeView({ children, className }: HomeViewProps) {
+ const { view } = useCopilotChatContext();
+ if (view !== "home") return null;
+ return (
+
+
+ {children}
+
+
+ );
+}
+
+// Alias for backward compatibility
+export type { HomeViewProps as HomeProps };
+const Home = HomeView;
+
+/**
+ * ChatView slot - renders only when there are messages (chat view).
+ * Use this for custom chat UI layouts. If no children, renders default chat UI.
+ *
+ * When Header/Footer are placed inside ChatView (instead of at root level),
+ * they only show in chat view - view-specific by composition!
+ *
+ * @example View-specific header
+ * ```tsx
+ *
+ * Only shows in chat view!
+ *
+ * ```
+ */
+export interface ChatViewProps {
+ children?: React.ReactNode;
+ className?: string;
+}
+
+function ChatView({ children, className }: ChatViewProps) {
+ const { view } = useCopilotChatContext();
+ if (view !== "chat") return null;
+
+ // If children provided, render them in a minimal wrapper (no flex-1, user controls layout)
+ if (children) {
+ return (
+
+ {children}
+
+ );
+ }
+
+ // Marker for parent to render default chat content
+ return null;
+}
+
+// Internal marker to identify ChatView without children
+ChatView.displayName = "ChatView";
+
+/**
+ * Check if ChatView children consist only of Header/Footer components.
+ * If so, we should still render default chat content alongside them.
+ */
+function chatViewHasOnlyLayoutChildren(
+ chatViewElement: React.ReactElement | undefined,
+): boolean {
+ if (!chatViewElement?.props?.children) return false;
+
+ const childArray = React.Children.toArray(chatViewElement.props.children);
+ if (childArray.length === 0) return false;
+
+ // Check if ALL children are Header or Footer
+ return childArray.every(
+ (child) =>
+ React.isValidElement(child) &&
+ (child.type === Header || child.type === Footer),
+ );
+}
+
+/**
+ * Header slot - renders header content.
+ * Can be placed at root level (shows in both views) or inside HomeView/ChatView (view-specific).
+ */
+export interface HeaderProps {
+ children: React.ReactNode;
+ className?: string;
+}
+
+function Header({ children, className }: HeaderProps) {
+ return
{children}
;
+}
+
+/**
+ * Footer slot - renders footer content.
+ * Can be placed at root level (shows in both views) or inside HomeView/ChatView (view-specific).
+ */
+export interface FooterProps {
+ children: React.ReactNode;
+ className?: string;
+}
+
+function Footer({ children, className }: FooterProps) {
+ return