diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..ba68fae --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,46 @@ +name: Deploys & Previews +on: + push: +jobs: + deploy-www: + runs-on: ubuntu-latest + permissions: + pull-requests: write + issues: write + repository-projects: write + contents: write + steps: + - uses: actions/checkout@v2 + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: './www/package-lock.json' + - name: Install dependencies + run: | + cd www + npm ci + # - name: Run Prettier check + # run: | + # cd www + # npm run format:check + # - name: Run ESLint + # run: | + # cd www + # npm run lint + # - name: Run Lighthouse CI + # run: | + # cd www + # npm run lighthouse + - name: Deploy to Vercel + uses: amondnet/vercel-action@v20 + with: + vercel-token: ${{ secrets.VERCEL_TOKEN }} + github-token: ${{ secrets.GITHUB_TOKEN }} + vercel-org-id: ${{ secrets.VERCEL_ORG_ID}} + vercel-project-id: prj_5wJWsrSkl9yTZ3rY1DgEV7UpAtly + scope: ${{ secrets.VERCEL_ORG_ID }} + working-directory: ./www + github-comment: ${{ github.ref != 'refs/heads/production' }} + vercel-args: ${{ github.ref == 'refs/heads/production' && '--prod' || '' }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index d97d19a..38b2263 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,3 @@ venv/ .env -.DS_Store - -models/* -!models/.gitkeep -__pycache__ -benchmark/__pycache__ \ No newline at end of file +.DS_Store \ No newline at end of file diff --git a/www/.env.example b/www/.env.example new file mode 100644 index 0000000..c9ae91c --- /dev/null +++ b/www/.env.example @@ -0,0 +1 @@ +STACKS_API_KEY=[ask_hugh] \ No newline at end of file diff --git a/www/.gitignore b/www/.gitignore new file mode 100644 index 0000000..794c00c --- /dev/null +++ b/www/.gitignore @@ -0,0 +1,43 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# env files (can opt-in for committing if needed) +.env* +!.env.example + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts +.lighthouseci \ No newline at end of file diff --git a/www/.prettierignore b/www/.prettierignore new file mode 100644 index 0000000..e3e7020 --- /dev/null +++ b/www/.prettierignore @@ -0,0 +1,10 @@ +node_modules/ +.next/ +out/ +build/ +dist/ +*.log +.env* +.DS_Store +coverage/ +.nyc_output/ \ No newline at end of file diff --git a/www/.prettierrc b/www/.prettierrc new file mode 100644 index 0000000..58cdde3 --- /dev/null +++ b/www/.prettierrc @@ -0,0 +1,11 @@ +{ + "semi": true, + "trailingComma": "es5", + "singleQuote": true, + "printWidth": 80, + "tabWidth": 2, + "useTabs": false, + "bracketSpacing": true, + "arrowParens": "avoid", + "endOfLine": "lf" +} \ No newline at end of file diff --git a/www/.tool-versions b/www/.tool-versions new file mode 100644 index 0000000..21cdbab --- /dev/null +++ b/www/.tool-versions @@ -0,0 +1 @@ +nodejs 20.19.6 diff --git a/www/README.md b/www/README.md new file mode 100644 index 0000000..e215bc4 --- /dev/null +++ b/www/README.md @@ -0,0 +1,36 @@ +This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app). + +## Getting Started + +First, run the development server: + +```bash +npm run dev +# or +yarn dev +# or +pnpm dev +# or +bun dev +``` + +Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. + +You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. + +This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel. + +## Learn More + +To learn more about Next.js, take a look at the following resources: + +- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. +- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. + +You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome! + +## Deploy on Vercel + +The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. + +Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details. diff --git a/www/app/api/subscribe/route.ts b/www/app/api/subscribe/route.ts new file mode 100644 index 0000000..7c5e3f0 --- /dev/null +++ b/www/app/api/subscribe/route.ts @@ -0,0 +1,92 @@ +import { NextRequest, NextResponse } from 'next/server'; + +// Example in-memory store for demonstration +// In production, you'd use a real database or email service like Mailchimp, ConvertKit, etc. +const subscribedEmails = new Set([ + 'already@subscribed.com', // Example of already subscribed email for testing +]); + +interface SubscribeResponse { + success: boolean; + message: string; + status: 'subscribed' | 'already_subscribed' | 'error'; +} + +export async function POST(request: NextRequest): Promise> { + try { + const body = await request.json(); + const { email } = body; + + // Validate email + if (!email || typeof email !== 'string') { + return NextResponse.json( + { + success: false, + message: 'Email address is required.', + status: 'error', + }, + { status: 400 } + ); + } + + // Basic email validation + const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/; + if (!emailRegex.test(email)) { + return NextResponse.json( + { + success: false, + message: 'Please enter a valid email address.', + status: 'error', + }, + { status: 400 } + ); + } + + const normalizedEmail = email.toLowerCase().trim(); + + const response = await fetch('https://stacks.garden3d.net/api/contacts', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Api-Key': process.env.STACKS_API_KEY!, + }, + body: JSON.stringify({ + email: normalizedEmail, + sources: ['g3d:family_intelligence'] + }), + }); + + if (response.ok) { + return NextResponse.json( + { + success: true, + message: 'Thank you for subscribing! We\'ll keep you updated.', + status: 'subscribed', + }, + { status: 201 } + ); + } else { + console.error('Subscription error:', response.statusText); + return NextResponse.json( + { + success: false, + message: 'Something went wrong. Please try again later.', + status: 'error', + }, + { status: 500 } + ); + } + + } catch (error) { + console.error('Subscription error:', error); + + return NextResponse.json( + { + success: false, + message: 'Something went wrong. Please try again later.', + status: 'error', + }, + { status: 500 } + ); + } +} \ No newline at end of file diff --git a/www/app/favicon.ico b/www/app/favicon.ico new file mode 100644 index 0000000..718d6fe Binary files /dev/null and b/www/app/favicon.ico differ diff --git a/www/app/globals.css b/www/app/globals.css new file mode 100644 index 0000000..0dbd56e --- /dev/null +++ b/www/app/globals.css @@ -0,0 +1,490 @@ +@import "tailwindcss"; + +/* ============================================ + Font Faces + ============================================ */ + +@font-face { + font-family: 'Windsor Pro'; + src: url('/fonts/WindsorPro/WindsorProRg.TTF') format('truetype'); + font-weight: 400; + font-style: normal; + font-display: swap; +} + +@font-face { + font-family: 'Windsor Pro'; + src: url('/fonts/WindsorPro/WindsorProBold.TTF') format('truetype'); + font-weight: 700; + font-style: normal; + font-display: swap; +} + +@font-face { + font-family: 'Windsor Pro Condensed'; + src: url('/fonts/WindsorPro/WindsorProXBoldCn.TTF') format('truetype'); + font-weight: 800; + font-style: normal; + font-display: swap; +} + +@font-face { + font-family: 'Roobert'; + src: url('/fonts/Roobert/RoobertCollectionVF-TRIAL.woff2') format('woff2'), + url('/fonts/Roobert/RoobertCollectionVF-TRIAL.ttf') format('truetype'); + font-weight: 100 900; + font-style: normal; + font-display: swap; +} + +/* ============================================ + CSS Custom Properties + ============================================ */ + +:root { + /* Colors */ + --fi-green-100: #D7DDD4; + --fi-green-200: #CAD4C6; + --fi-green-300: #B8C6B0; + --fi-green-400: #7B8F5E; + --fi-green-500: #5E7B29; + --fi-green-600: #596647; + --fi-black-900: #313131; + --fi-black-1000: #000000; + + /* Theme */ + --background: var(--fi-green-100); + --foreground: var(--fi-black-1000); + + /* Font Families */ + --font-serif: 'Windsor Pro', Georgia, serif; + --font-sans: 'Roobert', -apple-system, BlinkMacSystemFont, sans-serif; + + /* Layout */ + --grid-gap: 1rem; /* 16px */ + --container-max: 1352px; + --container-padding: 24px; +} + +@media (min-width: 768px) { + :root { + --container-padding: 80px; + } +} + +@theme inline { + --color-background: var(--background); + --color-foreground: var(--foreground); + --font-sans: 'Roobert', -apple-system, BlinkMacSystemFont, sans-serif; + --font-serif: 'Windsor Pro', Georgia, serif; + + /* Custom Colors */ + --color-fi-green-100: var(--fi-green-100); + --color-fi-green-200: var(--fi-green-200); + --color-fi-green-300: var(--fi-green-300); + --color-fi-green-400: var(--fi-green-400); + --color-fi-green-500: var(--fi-green-500); + --color-fi-green-600: var(--fi-green-600); + --color-fi-black-900: var(--fi-black-900); + --color-fi-black-1000: var(--fi-black-1000); +} + +/* ============================================ + Base Styles + ============================================ */ + +html { + scroll-behavior: smooth; +} + +/* Scroll margin for section anchors - adds space above when scrolling to anchor */ +section[id] { + scroll-margin-top: 80px; +} + +body { + background: var(--background); + color: var(--foreground); + font-family: var(--font-sans); + font-size: 16px; + line-height: 1.5; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + text-rendering: optimizeLegibility; +} + +/* ============================================ + Typography + ============================================ */ + +/* h1 - Windsor Pro ExtraBold Condensed, 104px */ +h1 { + font-family: 'Windsor Pro Condensed', Georgia, serif; + font-size: 104px; + font-weight: 800; + line-height: 0.95; + letter-spacing: -0.02em; + margin: 0; + color: var(--fi-black-900); +} + +/* h2 - Roobert VF, 36px */ +h2 { + font-family: var(--font-sans); + font-size: 36px; + font-weight: 500; + line-height: 1.2; + letter-spacing: -0.01em; + color: var(--fi-black-900); +} + +/* h3 - windsorpro, 24px */ +h3 { + font-family: var(--font-serif); + font-size: 32px; + font-weight: 500; + line-height: 1.3; + letter-spacing: 0; + margin: 0; +} + +/* h3.windsorpro - Roobert, uppercase label style */ +h3.label { + font-family: var(--font-serif); + font-size: 20px; + font-weight: 500; + line-height: 1.4; + letter-spacing: 0.1em; + text-transform: uppercase; + margin: 0; +} + +/* .label - Roobert, general uppercase label style */ +.label { + font-family: var(--font-sans); + font-size: 12px; + font-weight: 500; + line-height: 1.4; + letter-spacing: 0.1em; + text-transform: uppercase; +} + +/* h4 - Roobert VF, 18px */ +h4 { + font-family: var(--font-sans); + font-size: 18px; + font-weight: 600; + line-height: 1.4; + letter-spacing: 0; + margin: 0; +} + +/* .byline - Roobert VF SemiMono Regular */ +.byline { + font-family: var(--font-sans); + font-size: 20px; + font-weight: 400; + line-height: 1.3; + font-variation-settings: 'MONO' 50, 'wght' 400; +} + +/* .nav - Roobert VF, navigation style */ +.nav { + font-family: var(--font-sans); + font-size: 14px; + font-weight: 400; + line-height: 1.5; + letter-spacing: 0.02em; +} + +/* p.large - Roobert VF, larger paragraph */ +p.large { + font-family: var(--font-sans); + font-size: 20px; + font-weight: 400; + line-height: 1.5; + letter-spacing: 0; + margin: 0; +} + +/* p - Roobert VF, regular paragraph */ +p { + font-family: var(--font-sans); + font-size: 16px; + font-weight: 400; + line-height: 1.5; + letter-spacing: 0; + margin: 0; +} + +p + p, +p.large + p.large, +p + h4 { + margin-top: 1em; +} + +/* .caption - Roobert VF, small caption text */ +.caption { + font-family: var(--font-sans); + font-size: 16px; + font-weight: 400; + line-height: 1.5; + margin-top: 1em; + letter-spacing: 0.01em; + color: var(--fi-black-900); + opacity: 0.7; + text-align: center; + text-wrap-style: balance; +} + +/* ============================================ + Responsive Typography + ============================================ */ + +@media (max-width: 768px) { + h1 { + font-size: 54px; + line-height: 1; + } + + h2 { + font-size: 24px; + } + + h3 { + font-size: 28px; + } + + p.large { + font-size: 18px; + } + + .byline { + font-size: 16px; + } +} + +@media (min-width: 1280px) { + h1 { + font-size: 124px; + } + + h2 { + font-size: 47px; + } +} + +/* ============================================ + Layout Utilities + ============================================ */ + +/* Constrained container - max 1352px, responsive padding */ +.container-content { + width: 100%; + max-width: var(--container-max); + margin-inline: auto; + padding-inline: var(--container-padding); +} + +/* Full-width container - no padding, full bleed */ +.container-full { + width: 100%; + padding-inline: 0; +} + +/* 12-column grid with built-in gap */ +.grid-layout { + display: grid; + grid-template-columns: repeat(12, minmax(0, 1fr)); + gap: var(--grid-gap); +} + +/* ============================================ + Content with Sidebar Layout + ============================================ */ + +.content-with-sidebar { + position: relative; + width: 100%; + max-width: var(--container-max); + margin-inline: auto; + padding-inline: var(--container-padding); +} + +.sidebar-nav { + display: none; +} + +/* Wrapper for all page content - allows sidebar to span full height */ +.page-content-wrapper { + position: relative; + width: 100%; + max-width: var(--container-max); + margin-inline: auto; + padding-inline: var(--container-padding); +} + +/* Fixed sidebar that spans the entire page content - absolute within wrapper, sticky inside */ +.sidebar-nav-fixed { + display: none; +} + +@media (min-width: 768px) { + .sidebar-nav-fixed { + display: block; + position: absolute; + left: var(--container-padding); + top: 0; + bottom: 0; + width: calc((100% - 2 * var(--container-padding) - 11 * var(--grid-gap)) / 12 * 2); + z-index: 10; + pointer-events: none; + } + + .sidebar-nav-fixed > * { + pointer-events: auto; + } +} + +/* Content section - used for alternating z-index layers */ +.content-section { + position: relative; + width: 100%; +} + +/* Remove padding from container-content inside content-section since wrapper already has it */ +.content-section .container-content { + padding-inline: 0; + max-width: none; +} + +.main-content { + width: 100%; + min-width: 0; +} + +/* Adjust container-content inside main-content to not double-pad */ +.main-content .container-content { + padding-inline: 0; + max-width: none; +} + +/* ============================================ + Media Row Component + ============================================ */ + +.media-row-wrapper { + width: 100vw; + margin-left: calc(-50vw + 50%); + padding-inline: 0; + position: relative; + z-index: 20; + background-color: var(--fi-green-100); +} + +.media-row { + display: flex; + flex-direction: column; + align-items: center; + overflow: hidden; + gap: 16px; + width: 100%; + padding-inline: 0; +} + +@media (max-width: 767px) { + .media-row { + height: auto !important; + } +} + +@media (min-width: 768px) { + .media-row { + flex-direction: row; + justify-content: center; + } +} + +.media-item { + flex: 0 0 auto; + border-radius: 0; + overflow: hidden; + width: 100%; +} + +@media (max-width: 767px) { + .media-item { + height: auto !important; + } +} + +@media (min-width: 768px) { + .media-item { + width: auto; + border-radius: 20px; + } +} + +.media-item img, +.media-item video { + width: 100%; + height: auto; + object-fit: cover; + display: block; + border-radius: 0; +} + +@media (min-width: 768px) { + .media-item img, + .media-item video { + height: 100%; + width: auto; + object-fit: contain; + border-radius: 20px; + } +} + +/* ============================================ + Animations + ============================================ */ + +/* Leaf spring grow animation from bottom-left */ +@keyframes leaf-spring-grow { + 0% { + transform: scale(0); + } + 50% { + transform: scale(1.06); + } + 70% { + transform: scale(0.97); + } + 85% { + transform: scale(1.02); + } + 100% { + transform: scale(1); + } +} + +.leaf-animate { + transform-origin: bottom left; + animation: leaf-spring-grow 0.7s ease-out both; + animation-delay: 0.5s; +} + +/* Tree SVG fade-in animation */ +@keyframes tree-fade-in { + 0% { + opacity: 0; + } + 100% { + opacity: 1; + } +} + +.tree-animate { + opacity: 0; + animation: tree-fade-in 0.6s ease-in-out forwards; + animation-delay: 0.5s; +} diff --git a/www/app/layout.tsx b/www/app/layout.tsx new file mode 100644 index 0000000..d0a3fd2 --- /dev/null +++ b/www/app/layout.tsx @@ -0,0 +1,21 @@ +import type { Metadata } from "next"; +import "./globals.css"; + +export const metadata: Metadata = { + title: "Family Intelligence", + description: "Bringing memories back home. Speculative Research in local LLMs.", +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + {children} + + + ); +} diff --git a/www/app/page.tsx b/www/app/page.tsx new file mode 100644 index 0000000..d5ce04c --- /dev/null +++ b/www/app/page.tsx @@ -0,0 +1,1007 @@ +import SectionHeader from "@/components/SectionHeader"; +import PullQuote from "@/components/PullQuote"; +import LeafIcon from "@/components/LeafIcon"; +import MediaRow from "@/components/MediaRow"; +import QuoteBox from "@/components/QuoteBox"; +import AudioPlayer from "@/components/AudioPlayer"; +import CodeSnippet from "@/components/CodeSnippet"; +import SubscribeForm from "@/components/SubscribeForm"; +import Navigation, { MobileNavigation } from "@/components/Navigation"; +import { AnimatedElement } from "@/components/PageAnimations"; + +const RESEARCH_LINKS = [ + { + part: "Part One", + title: "Oh! To be known by my computer: Social archetypes of LLMs", + url: "https://garden3d.substack.com/p/oh-to-be-known-by-my-computer", + }, + { + part: "Part Two", + title: "Off-Brain", + url: "https://garden3d.substack.com/p/off-brain", + }, + { + part: "Part Three", + title: "Where the Flower Grows", + url: "https://garden3d.substack.com/p/where-the-flower-grows", + }, +]; + +const SYSTEM_PROMPT_CODE = `system_prompt = """ +You are a Data Extraction Engine, not a creative writer. +Your job is to extract family history data with forensic accuracy. + +### GROUNDING RULES +1. **Extract ALL People**: You MUST extract EVERY person mentioned in the transcript, even if they're minor characters or only mentioned once. This includes: + - All named individuals (e.g., "John Doe", "Jane Doe", "Leilani") + - People referred to by first name only (e.g., "Jane", "Mia", "Paul") + - People referred to by relationship (e.g., "Mum", "Dad") - create entries for them + - DO NOT create separate entries for speakers - instead, identify which person each speaker likely is (see Speaker Identification below) +2. **Speaker Identification**: Analyze ALL context clues throughout the transcript to identify which person each speaker likely is: + - **Collect Multiple Clues**: A speaker may be referred to by different names/relationships in the same conversation: + * Direct address: "wouldn't it, Mia?" followed by SPEAKER_01 responding suggests SPEAKER_01 is Mia + * Relationship references: "Mum can tell you" suggests SPEAKER_01 is the mother + * First-person references: "Jane and I drove home" suggests SPEAKER_01 might be Jane + - **Name Consolidation**: If multiple clues point to different names for the same speaker, consider they might be the SAME person: + * Example: If SPEAKER_01 is addressed as "Mia" AND referred to as "Mum" AND says "Jane and I", these could all be the same person (Mia/Jane is the mother) + * Create ONE person entry with the most complete name (e.g., "Jane" if full name, or "Mia" if that's what's used most) + * Use the STRONGEST evidence (direct address is stronger than relationship reference) + - **Confidence Levels**: + * CERTAIN: Explicit statement like "I am John" or multiple strong clues all pointing to same person + * PROBABLE: Strong clues like direct address followed by response, OR multiple weaker clues converging on same person + * POSSIBLE: Single weak clue or conflicting clues + * UNKNOWN: No identification clues found + - **Evidence**: Quote ALL relevant text snippets that support the identification, especially if multiple clues point to the same person +2. **Quote Your Sources**: For every memory location, you must provide the EXACT substring from the text that proves it. +3. **No Normalization**: If the text says "Brothers Leagues Club", do not change it to "Bar" or "Coffee Shop". Keep the specific name. +4. **Context is Key**: If a location is vague (e.g., "recovery"), use the specific venue mentioned in context (e.g., "Race Club" or "Leagues Club"). +5. **Dates**: If the speakers debate a date (e.g., "84? No 85"), use the final agreed date. + +### DATABASE LAYOUT RULES +1. When working with IDs (either from the transcript as UUIDs, or locally generated temporary IDs), triple check that you reference the ID EXACTLY throughout the JSON and don't drop characters. +2. An Event node HAPPENED_AT one or more Location nodes. +3. An Event node is ATTENDED by a Person node, but a Location node is never ATTENDED by a Person node. +4. An Event node should always have a "year" property, and be __UNKNOWN__ if not stated. +5. An Event, Person and Location node should always have a "name" property, and be __UNKNOWN__ if not stated. +6. Two Person nodes are always connected by a RELATES_TO edge, and the edge always has a "type" property, describing the relationship, or __UNKNOWN__ if not stated. +7. A Person should never RELATED_TO a Location node, and a Location node should never be RELATED_TO a Person node. + +### REQUIRED JSON SCHEMA +{ + "nodes": [ + { + "id": "1" + "label": "Person" | "Event" | "Location", + "properties": { "name": "Bill" } + } + { + "id": "3", + "label": "Event", + "properties": { "year": "1984", "name": "Jan & Ian's Wedding" } + }, + { + "id": "4", + "label": "Location", + "properties": { "name": "Brother's League Club" } + }, + { + "id": "5", + "label": "Person", + "properties": { "name": "Paul Francis" } + } + ], + "edges": [ + { + "label": "RELATES_TO", + "source_id": "1", + "target_id": "2", + "properties": { "relationship": "WIFE" } + }, + { + "label": "HAPPENED_AT", + "source_id": "3", + "target_id": "4", + }, + { + "label": "ATTENDED", + "source_id": "5", + "target_id": "3" + }, + { + "label": "RELATES_TO", + "source_id": "5", + "target_id": "2", + "properties": { "relationship": "FRIEND" } + } + ] +} + +### ONE-SHOT EXAMPLE +Transcript: **123**: In 1984, no, in 1985, we got married at the bowls club, right Mia? **456**: Yeah, that's right. + +Example output: +{ + "nodes": [ + { + "id": "123", + "label": "Person", + "properties": { "name": "__UNKNOWN__" } + }, + { + "id": "456", + "label": "Person", + "properties": { "name": "Mia" } + }, + { + "label": "Event", + "properties": { "year": "1985", "name": "Wedding" } + }, + { + "id": "_1" + "label": "Location", + "properties": { "name": "bowls club" } + } + ], + "edges": [ + { + "label": "RELATES_TO", + "source_id": "123", + "target_id": "456", + "properties": { "relationship": "SPOUSE" } + }, + { + "label": "HAPPENED_AT", + "source_id": "3", + "target_id": "_1", + } + ] +} + +### CRITICAL REMINDER +- Extract EVERY person mentioned, no matter how briefly or how minor they seem +- If someone is mentioned by first name only, use that name (e.g., "Jane" not "Jane Unknown") +- If full names are given, use them (e.g., "John Doe", "Jane Doe") +- DO NOT create separate "Speaker 00" or "Speaker 01" entries - identify which real person each speaker is +- **MULTIPLE CLUES ANALYSIS**: When identifying speakers, look for ALL clues throughout the transcript: + * If SPEAKER_01 is addressed as "Mia" AND referred to as "Mum" AND says "Jane and I", these likely refer to the SAME person + * Consolidate: Create ONE person entry (use the most complete name, e.g., "Jane" if that's the full name) + * The fact that multiple different names/relationships point to the same speaker STRENGTHENS the identification +- For speaker_identifications: Use person_id from the people list, or null if unknown +- Scan the ENTIRE transcript systematically - don't miss anyone +- If a person appears multiple times with different names (e.g., "Jane" and "Jane Doe"), create ONE entry with the most complete name +- **Evidence field**: Include ALL relevant quotes when multiple clues converge, e.g., "Addressed as 'Mia' + referred to as 'Mum' + says 'Jane and I' - all point to same person" +"""`; + +export default function Home() { + return ( +
+ + {/* Fixed Work with Us Button */} + + +

Work with Us

+
+
+ + {/* Main Content */} +
+ {/* Hero Section */} +
+
+
+ {/* H1 Title with Leaf */} + +

+ Family Intelligence + +

+
+ + {/* H2 Subtitle */} + +

+ Bringing memories back home. +

+
+ + {/* Byline */} + + Speculative Research in local LLMs, +
+ by{" "} + + USB Club + {" "} + and{" "} + + Garden3D + + . +
+
+
+
+ + {/* Mobile Navigation - fixed position, shows/hides on scroll */} +
+ +
+
+ + {/* ===== PAGE CONTENT WITH SIDEBAR ===== */} +
+ {/* Fixed sidebar navigation (z-10) - positioned absolutely within wrapper, sticky inside */} + +
+ +
+
+ + {/* ===== SECTION: Text content (z-0) ===== */} +
+ {/* Tree SVG - positioned to sit on the sidebar nav divider line, aligned left */} + + + + {/* Section I: The Moment - 4 column width */} +
+
+
+
+ + +
+

+ We hesitate whispering our secrets to the cloud, guilty trading privacy for convenience. +

+ +

+ But we do, because there is joy and beauty in being known and understood by the computer. +

+ +

+ With careful architecture, we can feel safe speaking openly around an LLM, safe in the verifiable proof that our data's accessible to us alone. +

+ +

+ When we engineer for intimacy, we can bring families together, easily storing and safeguarding our memories for future generations to come. +

+
+ + {/* Pull Quote */} +
+ +

+ AI could help preserve the next +
+ millennium of family heritage, +

+

+ ...but we hesitate to share our +
+ cherished memories with the cloud. +

+
+
+
+
+
+
+
+
+ + {/* ===== SECTION: Wide media (z-20, above sidebar) ===== */} +
+
+
+ +
+
+
+ + {/* ===== SECTION: Text content (z-0) ===== */} +
+
+ {/* Section II: The Idea - 6 column layout */} +
+
+
+
+ + +
+

+ Private at-home intelligence is now in reach. While most of the industry is chasing always-online AI, we've been exploring the alternative. +

+ +
+ +
+ +

+ In Part One of this research, we dove into the case for private AI to explore what will be needed for an air gapped future. When weighing the use cases, families stuck out as both an early adopter and multi-generational beneficiary of local LLMs. Helpful today, crucial tomorrow. +

+ +

+ Archiving your family history is a cumbersome process, currently left to the one individual in the family with enough time and conviction to put a book together. LLMs are great at recording unstructured data into a maintainable archive, lowering the barrier to entry for anyone in the family to contribute to the family tree. +

+
+
+
+
+
+
+ + {/* ===== SECTION: Wide media (z-20) ===== */} +
+
+
+ +
+
+
+ + {/* ===== SECTION: Text content (z-0) ===== */} +
+
+ {/* Book Philosophy Text */} +
+
+
+
+

+ Family memories belong in the home. With recent AI advancements, this is the first time you're able to build a treasure trove of memories in such a frictionless way for your family. Our heritage and family history is extremely intimate data that many don't want to give to big tech. However there's more utility and longevity of this family information if its archived and browsable digitally. +

+

+ Previous generations stored family memories physically. Our generation is waking up to the fact that we're losing these memories unless we put systems in place to preserve them. +

+

+ These memories also need to be embodied, as objects of heritage. They cannot solely live in a phone or a black box home server. We believe in three tenets that these objects must uphold if they wish to be accepted as a new method of archiving. A family intelligence object must be Timeless in its ability to withstand generations, Observable to have an ease of control, and Trustworthy from first glance to the 100th entry into the family tree. +

+
+
+
+ + {/* Media Element - 8 Column - Character Traits */} +
+
+
+ Character traits diagram +
+

+ Form follows function. Tenets follow values. The bolded tenets are the ones that felt in harmony. +

+
+
+ + {/* Media Element - 6 Column - Family Categories */} +
+
+

+ Families come in all shapes and sizes. They're messy, heartwarming, dysfunctional, inspirational, chosen, bestowed upon us. We considered these many forms of a family to understand how we can design an heirloom that resonates with any family member. +

+ +
+ Family categories +
+

+ Families don't take a single form, they are a spectrum of structures. +

+
+

+ Objects inherently hold memories. Families already embed memories into static objects today, and pass them down their lineage to extend their heritage. A couple types of objects stood out in our research for both private and familial heirlooms. +

+
+
+ + {/* Objects as Memory Holders - Image */} +
+
+ Objects as memory holders research +
+
+ + {/* Objects as Memory Holders - Intro Text */} +
+
+

+ Objects emerge as long-term memory holders across personal, domestic, and technological forms. +

+
+

+ Taking cues from how families archive memories today, we concepted and play-tested different forms that speak to the three tenets of trustworthy, observable, and timeless. They're all around a medium size, able to be quickly thrown in a backpack on the way to Grandma's house, and they all aim to resemble something that already sits in the home today. New objects must meet people halfway to overcome the barrier of entry to change behavior. +

+
+
+
+
+ + {/* ===== SECTION: Wide media - Research Sketches + Product Grids (z-20) ===== */} +
+
+ {/* Media Element - Full Width No Padding - Research Sketches */} +
+
+ Research sketches +
+

+ From observation to form, we mapped how everyday objects become memory vessels. +

+
+ + {/* The Leaf - 12 Column Grid Layout */} +
+
+
+

The Leaf

+

+ An homage to the family tree, the Family Leaf mimics an alarm clock as a tabletop item and features a removable mic. Stand up the leaf to begin a session or use it as a remote to browse past recordings. +

+
+
+
+
+
+
+ The Leaf - removable mic being placed +
+
+ The Leaf - tabletop view +
+
+
+
+ The Leaf - full device view +
+
+
+
+ The Leaf - on wooden table +
+
+ The Leaf - detail view +
+
+
+
+
+ + {/* The Radio - 12 Column Grid Layout */} +
+
+
+

The Radio

+

+ The most sentimental of the bunch, the Family Radio is a contextually accurate and historically nostalgic way to browse your family's heritage. It's tactile interfaces keep memories grounded in the home. +

+
+
+
+
+
+
+ The Radio - screen detail +
+
+ The Radio - turning dial +
+
+
+
+ The Radio - full device +
+
+ The Radio - kitchen view +
+
+
+
+ The Radio - timeline view +
+
+ The Radio - kitchen photo +
+
+
+
+
+ + {/* The Family Book - 12 Column Grid Layout */} +
+
+
+

The Family Book

+

+ A modern update to the scrapbook, which stand the test of time for keeping family memories safe. The Family Book provides an intuitive interface for reading and writing your family's history. +

+
+
+
+
+
+ The Family Book - closed on table +
+
+ The Family Book - open view +
+
+ The Family Book - standing display +
+
+ The Family Book - crafting process +
+
+ The Family Book - in use +
+
+
+
+
+
+ + {/* ===== SECTION: Text content (z-0) ===== */} +
+
+ {/* Family Book Philosophy Text */} +
+
+
+

+ Imagine growing up and being able to spend full days diving into your family tree – your lineage, what your aunt studied in school, where that distant cousin is now, what's your grandma's favorite recipe. Building a system for family intelligence provides easy avenues to this information for all ages. We explored early wireframes that would aide family members looking to learn more. +

+
+
+
+
+ + {/* ===== SECTION: Wide media - Wireframes (z-20) ===== */} +
+
+
+ +
+
+
+ + {/* ===== SECTION: Text content - The System (z-0) ===== */} +
+
+ {/* Section III: The System - 6 column layout */} +
+
+
+
+ + +
+

+ Encouraged by the results of our initial local LLM tests, we filled out the system architecture and ran benchmarks on a wider set of home-ready computers. +

+ +

+ The main engineering challenge for building a local LLM system comes down to managing user expectations around performance. While larger cloud-based systems can scale up to enormous amounts of computing power, consumer hardware will need to utilize longer-running AI processes for heavy-duty data processing. These tasks will run on-device in the background and surface results to the user interface when ready. +

+ +

High-level Architecture

+

+ We focused on the main user flow of recording and processing a family conversation for architecting and benchmarking. Audio is one of the many modalities that this object of heritage will support. Let's take a look at our ETL Pipeline, a common pattern to Extract, Transform, and Load data. +

+
+
+
+
+ + {/* HLSD Diagram - 8 Column */} +
+
+
+ High-Level System Diagram showing Family Intelligence Runtime architecture +
+
+
+ + {/* HLSD Explanation Paragraphs */} +
+
+
+
+

+ This pipeline will serve in real-time to record and store memories as audio, understand them, and load them into an ontological (or categorized) representation. That data will underpin a social graph of nodes such as people, places, and events, and their edges such as relationships and actions. +

+ +

Step One: Extract and Chunk Audio

+

+ With a high degree of resilience, the device will first record chunks of audio directly to storage (e.g. an on-board microSD card) and encrypt it at rest to lower the likelihood of faults like memory overflow, data corruption, or inconsistency in later steps. This is our first step towards an idempotent and reliably consistent processing architecture. +

+ +

+ Lastly, to ensure family members feel safe and in control, we'll utilize a physical disconnect switch that allows for pausing of recording during the conversation – perfect for Grandma's dicey side stories. +

+
+
+
+ + {/* Extract Diagram */} +
+
+
+ Detect and process all unprocessed Transcript Chunks +
+
+
+ + {/* Step Two */} +
+
+
+
+

Step Two: Transform Audio to Recognize Speakers

+

+ As chunks are stored safely on disk, the system will pick them up and separate out audio and transcriptions for different speakers. These are often referred to as "Speaker Turns". A couple Python libraries and offline models backed by pyannote-audio and faster-whisper are helpful here. +

+ +

+ We store the voices as "voiceprints" so when future recording sessions feature the same speakers they can be logically connected in the social graph. Further, raw transcripts will be stored in the database to be interpreted in the next step. +

+
+
+
+ + {/* Transform Diagram - 8 Column */} +
+
+
+ Transform Speaker Diarisation diagram showing audio chunk to speaker turns to transcript and voiceprint storage +
+
+
+ + {/* Step Three and Code Snippet */} +
+
+
+
+

Step Three: Load into Ontological Vector Database

+

+ Finally, as raw transcripts are stored in the database, they'll be picked up and analyzed by the LLM, then compressed and stored for easy RAG retrieval and traversal through a graph database. +

+ +

+ For compression, Chain of Density (CoD) is a common prompting technique we can employ to ensure our speaker turns are vectorized at a high degree of detail and predictable length. +

+ +

+ For extracting a social graph, we can employ Few-Shot Prompting and strict JSON output to extract social relationships ready for entry into a traditional nodes + edges graph database. +

+
+
+
+ + {/* Load Diagram */} +
+
+
+ Detect and process all unprocessed Transcript Chunks +
+
+
+ + {/* Code Snippet */} +
+
+
+
+

+ Here is our example system prompt for the curious: +

+ +
+ +
+
+
+
+ + {/* Neo4j Diagram - 8 Column */} +
+
+
+ Neo4j graph database visualization +
+
+
+ + {/* Benchmarking our Chipsets */} +
+
+
+
+

Benchmarking our Chipsets

+

+ A key part of our thesis points to the drastic strides in processing speeds that both open source models and consumer-level chipsets have been making every 6 months. Its safe to bet that what might seem resource constrained and inefficient today is likely to be a breeze on hardware and architectures just a year from now. +

+

+ We wrote a lightweight test harness (view codebase) to asses the feasibility of running this workload on consumer grade hardware. We used this testing framework to run the same benchmark against three best-in-class chipsets. From least to most performant they are: +

+ +
+
+
+ + {/* Benchmarking set */} +
+
+
+ Benchmarking our Chipsets +
+
+
+ + {/* Benchmarking audio */} +
+
+
+

+ All benchmarks were run against a cute 3 minute and 42 second story of Hugh's parents explaining how they met in 1985. +

+ +
+ +
+
+
+
+ + {/* Benchmarking audio */} +
+
+
+

+ To benchmark the relative performance of these chips, we ran a speaker diarisation process with a local model of pyannote/speaker-diarization-3.1 (Hugging Face) via the pyannote-audio python library. +

+

+ As expected, the Thor & Orin drastically outperformed the Raspberry Pi 5 16gb, indicating that for the best possible UX we'll need to run the Application Runtime against a GPU enabled system for these processing loads. +

+

+ For future tests, we'd be interested to run the benchmarks against a smaller size chip to the Pi with an onboard GPU, such as Orange Pi 5, Khadas VIM4, ASUS Tinker Board, or even a Raspberry Pi 5 running an AI-capable HAT like the HAILO SC1785. +

+ Benchmarking Voiceprints Chart +

+ While these initial results are encouraging, we look forward to seeing the advancements in these test cases in the coming months. +

+
+
+
+
+
+ + {/* ===== SECTION: Wide media - Family Together (z-20) ===== */} +
+
+
+
+ Family together +
+

+ Intended for use, then for staying +

+
+
+
+ + {/* ===== SECTION: Text content - Work with Us (z-0) ===== */} +
+
+ {/* Section IV: Work with Us - 6 column layout */} +
+
+
+
+ + +
+

+ Garden3D helps forward-thinking teams explore the edges of local AI, speculative hardware, and product storytelling. We collaborate with brands, labs, and founders to design the next generation of tangible AI experiences. This research is in partnership with USB Club, a memory network for preserving what matters. +

+ +

+ For partnerships and collaborations, email us at{" "} + + + partner@intelligence.family + + + . +

+ +

Subscribe for Updates

+ +
+ + +
+ +

+ This is part four in our ongoing research on local AI. Checkout the previous research on our Substack: +

+ +
+
+

Local Intelligence Research

+
+ +
+ +
+
+

+ Published January 2026 by +
+ USB Club (Norm, Yatú) and Garden3D (Hugh) +

+ + + +
+

Thank you.

+
+
+
+
+
+
+
+
+
{/* end page-content-wrapper */} +
+
+ ); +} diff --git a/www/components/AudioPlayer.tsx b/www/components/AudioPlayer.tsx new file mode 100644 index 0000000..746e01e --- /dev/null +++ b/www/components/AudioPlayer.tsx @@ -0,0 +1,168 @@ +"use client"; + +import { useState, useRef, useEffect } from "react"; + +interface AudioPlayerProps { + src: string; + quote: string; + filename?: string; +} + +function formatTime(seconds: number): string { + if (!isFinite(seconds) || isNaN(seconds)) return "0:00"; + const mins = Math.floor(seconds / 60); + const secs = Math.floor(seconds % 60); + return `${mins}:${secs.toString().padStart(2, "0")}`; +} + +export default function AudioPlayer({ src, quote, filename }: AudioPlayerProps) { + const audioRef = useRef(null); + const progressRef = useRef(null); + const [isPlaying, setIsPlaying] = useState(false); + const [currentTime, setCurrentTime] = useState(0); + const [duration, setDuration] = useState(0); + const [isPressed, setIsPressed] = useState(false); + + useEffect(() => { + const audio = audioRef.current; + if (!audio) return; + + const updateDuration = () => { + if (audio.duration && isFinite(audio.duration)) { + setDuration(audio.duration); + } + }; + + const handleTimeUpdate = () => { + setCurrentTime(audio.currentTime); + // Also try to update duration on time update in case metadata wasn't loaded + updateDuration(); + }; + + const handleEnded = () => { + setIsPlaying(false); + setCurrentTime(0); + }; + + const handleCanPlay = () => { + updateDuration(); + }; + + audio.addEventListener("loadedmetadata", updateDuration); + audio.addEventListener("durationchange", updateDuration); + audio.addEventListener("canplay", handleCanPlay); + audio.addEventListener("timeupdate", handleTimeUpdate); + audio.addEventListener("ended", handleEnded); + + // Try to get duration immediately if already loaded + updateDuration(); + + return () => { + audio.removeEventListener("loadedmetadata", updateDuration); + audio.removeEventListener("durationchange", updateDuration); + audio.removeEventListener("canplay", handleCanPlay); + audio.removeEventListener("timeupdate", handleTimeUpdate); + audio.removeEventListener("ended", handleEnded); + }; + }, []); + + const togglePlayPause = () => { + const audio = audioRef.current; + if (!audio) return; + + if (isPlaying) { + audio.pause(); + } else { + audio.play(); + } + setIsPlaying(!isPlaying); + }; + + const handleProgressClick = (e: React.MouseEvent) => { + const audio = audioRef.current; + const progressBar = progressRef.current; + if (!audio || !progressBar) return; + + const rect = progressBar.getBoundingClientRect(); + const clickX = e.clientX - rect.left; + const width = rect.width; + const percentage = clickX / width; + const newTime = percentage * duration; + + audio.currentTime = newTime; + setCurrentTime(newTime); + }; + + const progress = duration > 0 ? (currentTime / duration) * 100 : 0; + const displayFilename = filename || src.split("/").pop() || "audio.wav"; + + return ( +
+ {/* Hidden Audio Element */} +