diff --git a/.github/workflows/main-branch.yml b/.github/workflows/main-branch.yml new file mode 100644 index 0000000..ff5d50a --- /dev/null +++ b/.github/workflows/main-branch.yml @@ -0,0 +1,160 @@ +name: Main Branch Delivery + +on: + push: + branches: [main] + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + name: Build and Upload Artifacts + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Build static site bundle + run: | + python - <<'PY' + import json + import shutil + import time + from pathlib import Path + + start = time.perf_counter() + root = Path('.') + dist = Path('dist') + if dist.exists(): + shutil.rmtree(dist) + dist.mkdir(parents=True, exist_ok=True) + + for asset in ['index.html', 'style.css', 'app.js', 'ai-instruct.txt']: + shutil.copy2(root / asset, dist / asset) + + duration = time.perf_counter() - start + report_dir = Path('ci_reports') + report_dir.mkdir(parents=True, exist_ok=True) + (report_dir / 'build_status.json').write_text( + json.dumps( + { + 'status': 'succeeded', + 'artifact': 'github-pages', + 'duration': duration, + }, + indent=2, + ) + ) + PY + + - name: Upload static artifact + uses: actions/upload-pages-artifact@v3 + with: + path: dist + + - name: Upload build report + if: always() + uses: actions/upload-artifact@v4 + with: + name: build-status-report + path: ci_reports/build_status.json + if-no-files-found: warn + + report-build: + name: Report Build Status + runs-on: ubuntu-latest + needs: build + if: ${{ always() && needs.build.result != 'cancelled' }} + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Download build report + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: build-status-report + path: ci_reports + + - name: Render build summary + run: python scripts/report_build.py + + run-tests: + name: Run Tests + runs-on: ubuntu-latest + needs: build + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run Tests + run: python tests/run_tests.py + + - name: Upload test report + if: always() + uses: actions/upload-artifact@v4 + with: + name: main-test-report + path: ci_reports/test_results.json + if-no-files-found: warn + + report-tests: + name: Report Tests Statuses + runs-on: ubuntu-latest + needs: run-tests + if: ${{ always() && needs.run-tests.result != 'cancelled' }} + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Download test report + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: main-test-report + path: ci_reports + + - name: Render summary + run: python scripts/report_tests.py + + deploy: + name: Deploy to Pages + runs-on: ubuntu-latest + needs: build + if: ${{ needs.build.result == 'success' }} + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Deploy + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml new file mode 100644 index 0000000..5b893b8 --- /dev/null +++ b/.github/workflows/pull-request.yml @@ -0,0 +1,53 @@ +name: Pull Request Checks + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +jobs: + run-tests: + name: Run Tests + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run Tests + run: python tests/run_tests.py + + - name: Upload test report + if: always() + uses: actions/upload-artifact@v4 + with: + name: pr-test-report + path: ci_reports/test_results.json + if-no-files-found: warn + + report-tests: + name: Report Tests Statuses + runs-on: ubuntu-latest + needs: run-tests + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Download test report + uses: actions/download-artifact@v4 + with: + name: pr-test-report + path: ci_reports + + - name: Render summary + run: python scripts/report_tests.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6df94c3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +__pycache__/ +*.py[cod] +ci_reports/*.json +dist/ +.env diff --git a/README.md b/README.md index e52ef8e..33970bd 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,55 @@ -# Voice Chat App - -[![Pull Request Workflow Status](../../actions/workflows/pull-request.yml/badge.svg)](../../actions/workflows/pull-request.yml) -[![Main Branch Workflow Status](../../actions/workflows/main-branch.yml/badge.svg?branch=main)](../../actions/workflows/main-branch.yml) -[![Main Branch Tests](../../actions/workflows/main-branch.yml/badge.svg?branch=main&job=Run%20Tests)](../../actions/workflows/main-branch.yml) - -A lightweight voice chat application prototype with automated continuous -integration. The workflows are designed so that deployments and pull requests -are never blocked by automated checks while still surfacing rich status -information. - -## Continuous Integration - -- **Pull Request Workflow** runs on every pull request event and executes all - tests located in the `tests/` directory. It publishes a markdown summary with - the individual results for quick review. -- **Main Branch Workflow** runs when changes land on `main`. It builds the static - site bundle, uploads artifacts, deploys to GitHub Pages, and runs the same test - suite. Build and test summaries are exposed through job outputs and workflow - badges for quick visibility. - -## Local Testing - -To execute the same checks locally, run: - -```bash -python tests/run_tests.py -``` - -The command writes a structured report to `ci_reports/test_results.json` that the -workflows reuse when generating their summaries. +# Unity Voice Chat Preview + +[![Pull Request Workflow Status](../../actions/workflows/pull-request.yml/badge.svg)](../../actions/workflows/pull-request.yml) +[![Main Branch Workflow Status](../../actions/workflows/main-branch.yml/badge.svg?branch=main)](../../actions/workflows/main-branch.yml) +[![Main Branch Tests](../../actions/workflows/main-branch.yml/badge.svg?branch=main&job=Run%20Tests)](../../actions/workflows/main-branch.yml) + +A responsive, speech-driven art experience powered by the Pollinations Unity +model. The interface now features dedicated activity monitors for Unity (left) +and the user microphone (right) so it is clear who is speaking at all times. +The microphone toggle stays anchored to the bottom of the screen for easy access +on both mobile and desktop. + +## Features + +- **Dual voice monitors** – modern circular visualizers spaced using the rule of + thirds, highlighting Unity (left) and the user (right) with independent + activity states. +- **Bottom-aligned mute control** – a persistent, centered control that guides + users through granting microphone permissions and starting conversations. +- **Graceful voice handling** – contextual ARIA labels, explicit error feedback, + and automatic re-listening when the browser allows continuous recognition. +- **Dynamic imagery** – every prompt swaps the blurred cinematic background using + the selected Pollinations image model (`flux`, `turbo`, or `kontext`). + +## Continuous Integration + +Two separate GitHub Actions workflows keep deployments fast and informative: + +- **Pull Request Checks** (`.github/workflows/pull-request.yml`) + - Runs on every pull request update. + - Executes each script under `tests/` via `tests/run_tests.py`. + - Publishes a markdown summary of individual test results. + +- **Main Branch Delivery** (`.github/workflows/main-branch.yml`) + - Triggers on pushes to `main` and manual dispatches. + - Builds the static bundle, uploads the GitHub Pages artifact, and records a + machine-readable build summary. + - Executes the same test suite and reports results without blocking deploys. + - Deploys successful builds to GitHub Pages. + +Badges at the top of this document surface the latest workflow and main-branch +test status directly from GitHub Actions. + +## Local Development + +Install the lightweight test dependency and run the suite: + +```bash +python -m pip install --upgrade pip +pip install -r requirements.txt +python tests/run_tests.py +``` + +The runner writes a structured report to `ci_reports/test_results.json` that the +workflows reuse when generating their summaries. diff --git a/app.js b/app.js index b6ce2b6..99f9df2 100644 --- a/app.js +++ b/app.js @@ -1,6 +1,8 @@ -const visualization = document.getElementById('visualization'); const background = document.getElementById('background'); const muteIndicator = document.getElementById('mute-indicator'); +const indicatorText = muteIndicator?.querySelector('.indicator-text') ?? null; +const aiCircle = document.querySelector('[data-role="ai"]'); +const userCircle = document.querySelector('[data-role="user"]'); let currentImageModel = 'flux'; let chatHistory = []; @@ -10,6 +12,7 @@ let isMuted = true; let hasMicPermission = false; const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; +const synth = window.speechSynthesis; window.addEventListener('load', async () => { await loadSystemPrompt(); @@ -18,6 +21,20 @@ window.addEventListener('load', async () => { await initializeVoiceControl(); }); +function setCircleState(circle, { speaking = false, listening = false, error = false, label = '' } = {}) { + if (!circle) { + return; + } + + circle.classList.toggle('is-speaking', speaking); + circle.classList.toggle('is-listening', listening); + circle.classList.toggle('is-error', error); + + if (label) { + circle.setAttribute('aria-label', label); + } +} + async function loadSystemPrompt() { try { const response = await fetch('ai-instruct.txt'); @@ -32,6 +49,10 @@ function setupSpeechRecognition() { if (!SpeechRecognition) { console.error('Speech recognition is not supported in this browser.'); alert('Speech recognition is not supported in this browser.'); + setCircleState(userCircle, { + label: 'Speech recognition is not supported in this browser', + error: true + }); return; } @@ -43,22 +64,52 @@ function setupSpeechRecognition() { recognition.onstart = () => { console.log('Voice recognition started.'); - if (visualization) { - visualization.style.borderColor = '#ff0000'; - } + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); + }; + + recognition.onaudiostart = () => { + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); + }; + + recognition.onspeechstart = () => { + setCircleState(userCircle, { + speaking: true, + listening: true, + label: 'Hearing you speak' + }); + }; + + recognition.onspeechend = () => { + setCircleState(userCircle, { + listening: true, + speaking: false, + label: 'Processing what you said' + }); }; recognition.onend = () => { console.log('Voice recognition stopped.'); - if (visualization) { - visualization.style.borderColor = '#ffffff'; - } + setCircleState(userCircle, { + listening: false, + speaking: false, + label: isMuted ? 'Microphone is muted' : 'Listening for your voice' + }); if (!isMuted) { try { recognition.start(); } catch (error) { console.error('Failed to restart recognition:', error); + setCircleState(userCircle, { + error: true, + label: 'Unable to restart microphone recognition' + }); } } }; @@ -66,6 +117,13 @@ function setupSpeechRecognition() { recognition.onresult = (event) => { const transcript = event.results[event.results.length - 1][0].transcript.trim(); console.log('User said:', transcript); + + setCircleState(userCircle, { + listening: true, + speaking: false, + label: 'Processing what you said' + }); + const isLocalCommand = handleVoiceCommand(transcript); if (!isLocalCommand) { getAIResponse(transcript); @@ -74,6 +132,12 @@ function setupSpeechRecognition() { recognition.onerror = (event) => { console.error('Speech recognition error:', event.error); + setCircleState(userCircle, { + error: true, + listening: false, + speaking: false, + label: `Microphone error: ${event.error}` + }); }; } @@ -85,6 +149,7 @@ async function initializeVoiceControl() { hasMicPermission = await requestMicPermission(); if (!hasMicPermission) { alert('Microphone access is required for voice control.'); + updateMuteIndicator(); return; } @@ -100,15 +165,26 @@ async function initializeVoiceControl() { async function requestMicPermission() { if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { alert('Microphone access is not supported in this browser.'); + setCircleState(userCircle, { + error: true, + label: 'Microphone access is not supported in this browser' + }); return false; } try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); stream.getTracks().forEach((track) => track.stop()); + setCircleState(userCircle, { + label: 'Microphone is muted' + }); return true; } catch (error) { console.error('Microphone permission denied:', error); + setCircleState(userCircle, { + error: true, + label: 'Microphone permission denied' + }); return false; } } @@ -118,10 +194,20 @@ function updateMuteIndicator() { return; } + muteIndicator.classList.add('is-visible'); + muteIndicator.setAttribute('aria-hidden', 'false'); + if (isMuted) { - muteIndicator.classList.remove('hidden'); + const message = hasMicPermission + ? 'Tap or click anywhere to unmute' + : 'Allow microphone access to start'; + indicatorText && (indicatorText.textContent = message); + muteIndicator.dataset.state = 'muted'; + muteIndicator.setAttribute('aria-label', 'Microphone muted. Tap to enable listening.'); } else { - muteIndicator.classList.add('hidden'); + indicatorText && (indicatorText.textContent = 'Listening… tap to mute'); + muteIndicator.dataset.state = 'listening'; + muteIndicator.setAttribute('aria-label', 'Microphone active. Tap to mute.'); } } @@ -143,26 +229,61 @@ async function attemptUnmute() { } isMuted = false; + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); updateMuteIndicator(); + try { recognition.start(); } catch (error) { console.error('Failed to start recognition:', error); + setCircleState(userCircle, { + error: true, + listening: false, + label: 'Unable to start microphone recognition' + }); + isMuted = true; + updateMuteIndicator(); } } -if (muteIndicator) { - muteIndicator.addEventListener('click', async (event) => { - event.stopPropagation(); - await attemptUnmute(); +function handleMuteToggle(event) { + event?.stopPropagation(); + + if (isMuted) { + attemptUnmute(); + return; + } + + isMuted = true; + setCircleState(userCircle, { + listening: false, + speaking: false, + label: 'Microphone is muted' }); + updateMuteIndicator(); + + if (recognition) { + recognition.stop(); + } } -document.addEventListener('click', async () => { - await attemptUnmute(); +muteIndicator?.addEventListener('click', handleMuteToggle); + +document.addEventListener('click', () => { + if (isMuted) { + attemptUnmute(); + } }); -const synth = window.speechSynthesis; +document.addEventListener('keydown', (event) => { + if ((event.key === 'Enter' || event.key === ' ') && isMuted) { + event.preventDefault(); + attemptUnmute(); + } +}); function speak(text) { if (synth.speaking) { @@ -170,35 +291,39 @@ function speak(text) { return; } - if (text !== '') { - const utterance = new SpeechSynthesisUtterance(text); - const voices = synth.getVoices(); - const ukFemaleVoice = voices.find((voice) => - voice.name.includes('Google UK English Female') || (voice.lang === 'en-GB' && voice.gender === 'female') - ); + if (text === '') { + return; + } - if (ukFemaleVoice) { - utterance.voice = ukFemaleVoice; - } else { - console.warn('UK English female voice not found, using default.'); - } + const utterance = new SpeechSynthesisUtterance(text); + const voices = synth.getVoices(); + const ukFemaleVoice = voices.find((voice) => + voice.name.includes('Google UK English Female') || (voice.lang === 'en-GB' && voice.gender === 'female') + ); - utterance.onstart = () => { - console.log('AI is speaking...'); - if (visualization) { - visualization.style.animation = 'pulse 1s infinite'; - } - }; + if (ukFemaleVoice) { + utterance.voice = ukFemaleVoice; + } else { + console.warn('UK English female voice not found, using default.'); + } - utterance.onend = () => { - console.log('AI finished speaking.'); - if (visualization) { - visualization.style.animation = ''; - } - }; + utterance.onstart = () => { + console.log('AI is speaking...'); + setCircleState(aiCircle, { + speaking: true, + label: 'Unity is speaking' + }); + }; - synth.speak(utterance); - } + utterance.onend = () => { + console.log('AI finished speaking.'); + setCircleState(aiCircle, { + speaking: false, + label: 'Unity is idle' + }); + }; + + synth.speak(utterance); } function handleVoiceCommand(command) { @@ -207,6 +332,11 @@ function handleVoiceCommand(command) { if (lowerCaseCommand.includes('mute my mic') || lowerCaseCommand.includes('mute microphone')) { isMuted = true; updateMuteIndicator(); + setCircleState(userCircle, { + listening: false, + speaking: false, + label: 'Microphone is muted' + }); if (recognition) { recognition.stop(); } @@ -217,6 +347,10 @@ function handleVoiceCommand(command) { if (lowerCaseCommand.includes('unmute my mic') || lowerCaseCommand.includes('unmute microphone')) { isMuted = false; updateMuteIndicator(); + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); if (recognition) { try { recognition.start(); @@ -230,6 +364,10 @@ function handleVoiceCommand(command) { if (lowerCaseCommand.includes('shut up') || lowerCaseCommand.includes('be quiet')) { synth.cancel(); + setCircleState(aiCircle, { + speaking: false, + label: 'Unity is idle' + }); return true; } @@ -304,15 +442,32 @@ async function getAIResponse(userInput) { }) }); + if (!textResponse.ok) { + throw new Error(`Pollinations text API returned ${textResponse.status}`); + } + const data = await textResponse.json(); - aiText = data.choices[0].message.content; + aiText = data.choices?.[0]?.message?.content ?? ''; - chatHistory.push({ role: 'assistant', content: aiText }); + if (!aiText) { + throw new Error('Received empty response from Pollinations AI'); + } + chatHistory.push({ role: 'assistant', content: aiText }); speak(aiText); } catch (error) { console.error('Error getting text from Pollinations AI:', error); + setCircleState(aiCircle, { + error: true, + label: 'Unity could not respond' + }); speak("Sorry, I couldn't get a text response."); + setTimeout(() => { + setCircleState(aiCircle, { + error: false, + label: 'Unity is idle' + }); + }, 2400); } try { diff --git a/ci_reports/.gitkeep b/ci_reports/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/index.html b/index.html index 21b08b6..45fb567 100644 --- a/index.html +++ b/index.html @@ -1,4 +1,4 @@ - + @@ -7,9 +7,22 @@ -
-
- + +
+
+
+
+
Unity
+
+
+
+
You
+
+
+
+ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0eb8cae --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +requests>=2.31.0 diff --git a/scripts/report_build.py b/scripts/report_build.py new file mode 100644 index 0000000..4059047 --- /dev/null +++ b/scripts/report_build.py @@ -0,0 +1,50 @@ +"""Produce a GitHub Actions summary for build status artifacts.""" + +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import Any, Dict + +ROOT = Path(__file__).resolve().parent.parent +REPORT_PATH = ROOT / "ci_reports" / "build_status.json" + + +def load_build_report() -> Dict[str, Any]: + if not REPORT_PATH.exists(): + return {} + return json.loads(REPORT_PATH.read_text()) + + +def main() -> None: + report = load_build_report() + if not report: + print("No build status information available.") + return + + status = report.get("status", "unknown") + artifact = report.get("artifact", "(none)") + duration = report.get("duration", 0.0) + emoji = "✅" if status == "succeeded" else "❌" + + summary_lines = [ + "| Status | Artifact | Duration (s) |", + "| --- | --- | --- |", + f"| {emoji} {status.title()} | {artifact} | {duration:.2f} |", + ] + + table = "\n".join(summary_lines) + print("Build Summary:\n") + print(table) + + summary_path = os.environ.get("GITHUB_STEP_SUMMARY") + if summary_path: + with open(summary_path, "a", encoding="utf-8") as handle: + handle.write("## Build Status\n\n") + handle.write(table) + handle.write("\n") + + +if __name__ == "__main__": + main() diff --git a/scripts/report_tests.py b/scripts/report_tests.py new file mode 100644 index 0000000..d66d54d --- /dev/null +++ b/scripts/report_tests.py @@ -0,0 +1,50 @@ +"""Render a human readable summary of test results for GitHub Actions.""" + +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import Any, Dict + +ROOT = Path(__file__).resolve().parent.parent +REPORT_PATH = ROOT / "ci_reports" / "test_results.json" + + +def load_results() -> Dict[str, Any]: + if not REPORT_PATH.exists(): + return {"results": []} + return json.loads(REPORT_PATH.read_text()) + + +def render_summary(results: Dict[str, Any]) -> str: + rows = ["| Test | Status | Details |", "| --- | --- | --- |"] + for entry in results.get("results", []): + name = entry.get("name", "Unnamed test") + status = entry.get("status", "unknown") + details = entry.get("details", "") + emoji = "✅" if status == "passed" else "❌" + rows.append(f"| {name} | {emoji} {status.title()} | {details} |") + return "\n".join(rows) + + +def main() -> None: + results = load_results() + if not results.get("results"): + print("No test results were generated.") + return + + summary_table = render_summary(results) + print("Test Summary:\n") + print(summary_table) + + summary_path = os.environ.get("GITHUB_STEP_SUMMARY") + if summary_path: + with open(summary_path, "a", encoding="utf-8") as handle: + handle.write("## Test Results\n\n") + handle.write(summary_table) + handle.write("\n") + + +if __name__ == "__main__": + main() diff --git a/style.css b/style.css index 552d6ae..01d3e69 100644 --- a/style.css +++ b/style.css @@ -1,69 +1,225 @@ -body { +:root { + color-scheme: dark; + --bg-color: #0b0b0f; + --accent-ai: #7c5cff; + --accent-user: #43d9bd; + --border-color: rgba(255, 255, 255, 0.18); + --text-color: #f5f5f5; + --muted-text: rgba(245, 245, 245, 0.82); +} + +* { + box-sizing: border-box; +} + +body { margin: 0; - padding: 0; - background-color: #121212; - color: #ffffff; - font-family: sans-serif; - overflow: hidden; + min-height: 100vh; + font-family: "Inter", "Segoe UI", system-ui, -apple-system, sans-serif; + color: var(--text-color); + background-color: var(--bg-color); + display: flex; + flex-direction: column; } #background { position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; + inset: 0; background-size: cover; background-position: center; + filter: blur(24px) saturate(120%); + transform: scale(1.02); + z-index: -2; +} + +#background::after { + content: ""; + position: absolute; + inset: 0; + background: linear-gradient(180deg, rgba(5, 6, 12, 0.85) 0%, rgba(5, 6, 12, 0.95) 80%, rgba(5, 6, 12, 1) 100%); z-index: -1; } -#visualization { +.layout { + flex: 1; + display: flex; + align-items: center; + justify-content: center; + padding: clamp(40px, 8vh, 120px) clamp(20px, 8vw, 120px); +} + +.voice-stage { + position: relative; + width: min(960px, 100%); + display: grid; + grid-template-columns: repeat(6, 1fr); + align-items: center; + gap: clamp(16px, 6vw, 96px); +} + +.voice-circle { + position: relative; + grid-column: span 2; + width: clamp(140px, 28vw, 240px); + aspect-ratio: 1/1; + border-radius: 50%; + border: 1.5px solid var(--border-color); + display: flex; + align-items: center; + justify-content: center; + background: rgba(12, 14, 24, 0.68); + backdrop-filter: blur(12px); + overflow: hidden; + transition: border-color 0.4s ease, box-shadow 0.4s ease, transform 0.4s ease; +} + +.voice-circle.ai { + grid-column: 2 / span 2; +} + +.voice-circle.user { + grid-column: 4 / span 2; +} + +.voice-circle .voice-label { + position: relative; + font-size: clamp(1rem, 2.2vw, 1.35rem); + letter-spacing: 0.08em; + text-transform: uppercase; + z-index: 2; +} + +.pulse-ring { position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 200px; - height: 200px; + inset: 12%; border-radius: 50%; - border: 2px solid #ffffff; + border: 2px solid transparent; + opacity: 0; + z-index: 1; } -#mute-indicator { - position: fixed; - bottom: 24px; - left: 50%; - transform: translateX(-50%); - background: rgba(0, 0, 0, 0.6); - padding: 12px 20px; - border-radius: 20px; - font-size: 0.95rem; - text-align: center; - letter-spacing: 0.5px; +.voice-circle.ai .pulse-ring { + border-color: rgba(124, 92, 255, 0.6); +} + +.voice-circle.user .pulse-ring { + border-color: rgba(67, 217, 189, 0.6); +} + +.voice-circle.is-speaking { + box-shadow: 0 0 42px -18px rgba(255, 255, 255, 0.6); + transform: translateY(-6px) scale(1.03); +} + +.voice-circle.is-speaking .pulse-ring { + animation: pulse 1.4s ease-in-out infinite; + opacity: 1; +} + +.voice-circle.is-listening { + border-color: rgba(67, 217, 189, 0.8); + box-shadow: 0 0 42px -12px rgba(67, 217, 189, 0.6); +} + +.voice-circle.is-error { + border-color: rgba(255, 99, 132, 0.8); + box-shadow: 0 0 38px -10px rgba(255, 99, 132, 0.5); +} + +.mute-indicator { + position: sticky; + bottom: 0; + margin: clamp(12px, 4vh, 32px) auto; + align-self: center; + padding: 14px 24px; + border-radius: 999px; + border: none; + background: rgba(12, 14, 24, 0.7); + border: 1px solid rgba(255, 255, 255, 0.18); + color: var(--muted-text); + font-size: clamp(0.9rem, 2.2vw, 1rem); + letter-spacing: 0.04em; cursor: pointer; - transition: opacity 0.3s ease; - z-index: 2; + transition: transform 0.25s ease, background 0.25s ease, opacity 0.25s ease; + display: inline-flex; + align-items: center; + justify-content: center; + max-width: min(460px, calc(100% - 32px)); + backdrop-filter: blur(10px); + z-index: 4; } -#mute-indicator:hover { - opacity: 0.85; +.mute-indicator:focus-visible { + outline: 3px solid rgba(124, 92, 255, 0.8); + outline-offset: 4px; } -.hidden { - display: none; +.mute-indicator:hover, +.mute-indicator:active { + transform: translateY(-2px) scale(1.01); + background: rgba(12, 14, 24, 0.82); +} + +.mute-indicator[data-state="listening"] { + background: rgba(67, 217, 189, 0.16); + border-color: rgba(67, 217, 189, 0.5); + color: var(--text-color); } @keyframes pulse { 0% { - transform: translate(-50%, -50%) scale(1); - opacity: 1; + transform: scale(1); } 50% { - transform: translate(-50%, -50%) scale(1.2); - opacity: 0.7; + transform: scale(1.12); } 100% { - transform: translate(-50%, -50%) scale(1); - opacity: 1; + transform: scale(1); + } +} + +@media (max-width: 720px) { + .voice-stage { + grid-template-columns: repeat(4, 1fr); + gap: clamp(18px, 10vw, 64px); } -} + + .voice-circle.ai { + grid-column: 1 / span 2; + } + + .voice-circle.user { + grid-column: 3 / span 2; + } +} + +@media (max-width: 520px) { + .layout { + padding-top: clamp(48px, 12vh, 80px); + padding-bottom: clamp(48px, 18vh, 120px); + } + + .voice-stage { + grid-template-columns: repeat(2, 1fr); + } + + .voice-circle { + grid-column: span 2; + justify-self: center; + } + + .voice-circle.ai { + margin-bottom: clamp(24px, 8vh, 40px); + } +} + +@media (prefers-reduced-motion: reduce) { + *, + *::before, + *::after { + animation-duration: 0.01ms !important; + animation-iteration-count: 1 !important; + transition-duration: 0.01ms !important; + scroll-behavior: auto !important; + } +} diff --git a/tests/run_tests.py b/tests/run_tests.py new file mode 100644 index 0000000..aa41c1a --- /dev/null +++ b/tests/run_tests.py @@ -0,0 +1,79 @@ +"""Lightweight test runner that executes every test module in this directory.""" + +from __future__ import annotations + +import importlib.util +import json +import sys +from pathlib import Path +from typing import Any, Dict, List + +ROOT = Path(__file__).resolve().parent.parent +TEST_DIR = Path(__file__).resolve().parent +REPORT_DIR = ROOT / "ci_reports" +REPORT_PATH = REPORT_DIR / "test_results.json" + + +def discover_tests() -> List[Path]: + return sorted(TEST_DIR.glob("test_*.py")) + + +def load_module(path: Path): + spec = importlib.util.spec_from_file_location(path.stem, path) + if spec is None or spec.loader is None: + raise ImportError(f"Could not load spec for {path}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def run_test_module(path: Path) -> Dict[str, Any]: + module = load_module(path) + if not hasattr(module, "run"): + raise AttributeError(f"Test module {path.name} must define a run() function") + result = module.run() + if not isinstance(result, dict): + raise TypeError(f"Test module {path.name} returned non-dict result: {result!r}") + return result + + +def main() -> int: + REPORT_DIR.mkdir(parents=True, exist_ok=True) + results: List[Dict[str, Any]] = [] + failures = 0 + + for test_path in discover_tests(): + print(f"\n⏱️ Running {test_path.name}...") + try: + result = run_test_module(test_path) + except Exception as exc: # noqa: BLE001 - keep runner resilient + failures += 1 + result = { + "name": test_path.stem, + "status": "failed", + "details": f"{type(exc).__name__}: {exc}", + "duration": 0.0, + } + else: + if result.get("status") != "passed": + failures += 1 + + results.append(result) + status = result.get("status", "unknown").upper() + details = result.get("details", "") + print(f" → {status}") + if details: + print(f" {details}") + + REPORT_PATH.write_text(json.dumps({"results": results}, indent=2)) + print(f"\n📄 Wrote detailed report to {REPORT_PATH.relative_to(ROOT)}") + + if failures: + print(f"❌ {failures} test(s) failed.") + else: + print("✅ All tests passed.") + return 1 if failures else 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tests/test_text_generation.py b/tests/test_text_generation.py new file mode 100644 index 0000000..2f445a6 --- /dev/null +++ b/tests/test_text_generation.py @@ -0,0 +1,59 @@ +"""Integration test for the Pollinations text API using the Unity model.""" + +from __future__ import annotations + +import json +import time +from typing import Any, Dict + +import requests + +TEST_NAME = "Pollinations Unity text response" +API_URL = "https://text.pollinations.ai/openai" + + +def run() -> Dict[str, Any]: + """Execute the test and return a structured result dictionary.""" + start = time.perf_counter() + payload = { + "model": "unity", + "messages": [ + {"role": "system", "content": "You are Unity, a concise greeter."}, + {"role": "user", "content": "Say hello and include the word Unity exactly once."}, + ], + } + + try: + response = requests.post(API_URL, json=payload, timeout=20) + duration = time.perf_counter() - start + response.raise_for_status() + data = response.json() + content = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip() + + if not content: + raise ValueError("No content returned from Pollinations API") + + if "unity" not in content.lower(): + raise AssertionError("Response did not mention Unity") + + return { + "name": TEST_NAME, + "status": "passed", + "details": content, + "duration": duration, + } + except Exception as exc: # noqa: BLE001 - broad catch to report failure details + duration = time.perf_counter() - start + return { + "name": TEST_NAME, + "status": "failed", + "details": f"{type(exc).__name__}: {exc}", + "duration": duration, + } + + +if __name__ == "__main__": + result = run() + print(json.dumps(result, indent=2, sort_keys=True)) + if result["status"] != "passed": + raise SystemExit(1)