diff --git a/.github/workflows/main-branch.yml b/.github/workflows/main-branch.yml new file mode 100644 index 0000000..ff5d50a --- /dev/null +++ b/.github/workflows/main-branch.yml @@ -0,0 +1,160 @@ +name: Main Branch Delivery + +on: + push: + branches: [main] + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + name: Build and Upload Artifacts + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Build static site bundle + run: | + python - <<'PY' + import json + import shutil + import time + from pathlib import Path + + start = time.perf_counter() + root = Path('.') + dist = Path('dist') + if dist.exists(): + shutil.rmtree(dist) + dist.mkdir(parents=True, exist_ok=True) + + for asset in ['index.html', 'style.css', 'app.js', 'ai-instruct.txt']: + shutil.copy2(root / asset, dist / asset) + + duration = time.perf_counter() - start + report_dir = Path('ci_reports') + report_dir.mkdir(parents=True, exist_ok=True) + (report_dir / 'build_status.json').write_text( + json.dumps( + { + 'status': 'succeeded', + 'artifact': 'github-pages', + 'duration': duration, + }, + indent=2, + ) + ) + PY + + - name: Upload static artifact + uses: actions/upload-pages-artifact@v3 + with: + path: dist + + - name: Upload build report + if: always() + uses: actions/upload-artifact@v4 + with: + name: build-status-report + path: ci_reports/build_status.json + if-no-files-found: warn + + report-build: + name: Report Build Status + runs-on: ubuntu-latest + needs: build + if: ${{ always() && needs.build.result != 'cancelled' }} + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Download build report + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: build-status-report + path: ci_reports + + - name: Render build summary + run: python scripts/report_build.py + + run-tests: + name: Run Tests + runs-on: ubuntu-latest + needs: build + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run Tests + run: python tests/run_tests.py + + - name: Upload test report + if: always() + uses: actions/upload-artifact@v4 + with: + name: main-test-report + path: ci_reports/test_results.json + if-no-files-found: warn + + report-tests: + name: Report Tests Statuses + runs-on: ubuntu-latest + needs: run-tests + if: ${{ always() && needs.run-tests.result != 'cancelled' }} + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Download test report + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: main-test-report + path: ci_reports + + - name: Render summary + run: python scripts/report_tests.py + + deploy: + name: Deploy to Pages + runs-on: ubuntu-latest + needs: build + if: ${{ needs.build.result == 'success' }} + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Deploy + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml new file mode 100644 index 0000000..5b893b8 --- /dev/null +++ b/.github/workflows/pull-request.yml @@ -0,0 +1,53 @@ +name: Pull Request Checks + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +jobs: + run-tests: + name: Run Tests + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run Tests + run: python tests/run_tests.py + + - name: Upload test report + if: always() + uses: actions/upload-artifact@v4 + with: + name: pr-test-report + path: ci_reports/test_results.json + if-no-files-found: warn + + report-tests: + name: Report Tests Statuses + runs-on: ubuntu-latest + needs: run-tests + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Download test report + uses: actions/download-artifact@v4 + with: + name: pr-test-report + path: ci_reports + + - name: Render summary + run: python scripts/report_tests.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6df94c3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +__pycache__/ +*.py[cod] +ci_reports/*.json +dist/ +.env diff --git a/README.md b/README.md index e52ef8e..33970bd 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,55 @@ -# Voice Chat App - -[](../../actions/workflows/pull-request.yml) -[](../../actions/workflows/main-branch.yml) -[](../../actions/workflows/main-branch.yml) - -A lightweight voice chat application prototype with automated continuous -integration. The workflows are designed so that deployments and pull requests -are never blocked by automated checks while still surfacing rich status -information. - -## Continuous Integration - -- **Pull Request Workflow** runs on every pull request event and executes all - tests located in the `tests/` directory. It publishes a markdown summary with - the individual results for quick review. -- **Main Branch Workflow** runs when changes land on `main`. It builds the static - site bundle, uploads artifacts, deploys to GitHub Pages, and runs the same test - suite. Build and test summaries are exposed through job outputs and workflow - badges for quick visibility. - -## Local Testing - -To execute the same checks locally, run: - -```bash -python tests/run_tests.py -``` - -The command writes a structured report to `ci_reports/test_results.json` that the -workflows reuse when generating their summaries. +# Unity Voice Chat Preview + +[](../../actions/workflows/pull-request.yml) +[](../../actions/workflows/main-branch.yml) +[](../../actions/workflows/main-branch.yml) + +A responsive, speech-driven art experience powered by the Pollinations Unity +model. The interface now features dedicated activity monitors for Unity (left) +and the user microphone (right) so it is clear who is speaking at all times. +The microphone toggle stays anchored to the bottom of the screen for easy access +on both mobile and desktop. + +## Features + +- **Dual voice monitors** – modern circular visualizers spaced using the rule of + thirds, highlighting Unity (left) and the user (right) with independent + activity states. +- **Bottom-aligned mute control** – a persistent, centered control that guides + users through granting microphone permissions and starting conversations. +- **Graceful voice handling** – contextual ARIA labels, explicit error feedback, + and automatic re-listening when the browser allows continuous recognition. +- **Dynamic imagery** – every prompt swaps the blurred cinematic background using + the selected Pollinations image model (`flux`, `turbo`, or `kontext`). + +## Continuous Integration + +Two separate GitHub Actions workflows keep deployments fast and informative: + +- **Pull Request Checks** (`.github/workflows/pull-request.yml`) + - Runs on every pull request update. + - Executes each script under `tests/` via `tests/run_tests.py`. + - Publishes a markdown summary of individual test results. + +- **Main Branch Delivery** (`.github/workflows/main-branch.yml`) + - Triggers on pushes to `main` and manual dispatches. + - Builds the static bundle, uploads the GitHub Pages artifact, and records a + machine-readable build summary. + - Executes the same test suite and reports results without blocking deploys. + - Deploys successful builds to GitHub Pages. + +Badges at the top of this document surface the latest workflow and main-branch +test status directly from GitHub Actions. + +## Local Development + +Install the lightweight test dependency and run the suite: + +```bash +python -m pip install --upgrade pip +pip install -r requirements.txt +python tests/run_tests.py +``` + +The runner writes a structured report to `ci_reports/test_results.json` that the +workflows reuse when generating their summaries. diff --git a/app.js b/app.js index b6ce2b6..99f9df2 100644 --- a/app.js +++ b/app.js @@ -1,6 +1,8 @@ -const visualization = document.getElementById('visualization'); const background = document.getElementById('background'); const muteIndicator = document.getElementById('mute-indicator'); +const indicatorText = muteIndicator?.querySelector('.indicator-text') ?? null; +const aiCircle = document.querySelector('[data-role="ai"]'); +const userCircle = document.querySelector('[data-role="user"]'); let currentImageModel = 'flux'; let chatHistory = []; @@ -10,6 +12,7 @@ let isMuted = true; let hasMicPermission = false; const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; +const synth = window.speechSynthesis; window.addEventListener('load', async () => { await loadSystemPrompt(); @@ -18,6 +21,20 @@ window.addEventListener('load', async () => { await initializeVoiceControl(); }); +function setCircleState(circle, { speaking = false, listening = false, error = false, label = '' } = {}) { + if (!circle) { + return; + } + + circle.classList.toggle('is-speaking', speaking); + circle.classList.toggle('is-listening', listening); + circle.classList.toggle('is-error', error); + + if (label) { + circle.setAttribute('aria-label', label); + } +} + async function loadSystemPrompt() { try { const response = await fetch('ai-instruct.txt'); @@ -32,6 +49,10 @@ function setupSpeechRecognition() { if (!SpeechRecognition) { console.error('Speech recognition is not supported in this browser.'); alert('Speech recognition is not supported in this browser.'); + setCircleState(userCircle, { + label: 'Speech recognition is not supported in this browser', + error: true + }); return; } @@ -43,22 +64,52 @@ function setupSpeechRecognition() { recognition.onstart = () => { console.log('Voice recognition started.'); - if (visualization) { - visualization.style.borderColor = '#ff0000'; - } + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); + }; + + recognition.onaudiostart = () => { + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); + }; + + recognition.onspeechstart = () => { + setCircleState(userCircle, { + speaking: true, + listening: true, + label: 'Hearing you speak' + }); + }; + + recognition.onspeechend = () => { + setCircleState(userCircle, { + listening: true, + speaking: false, + label: 'Processing what you said' + }); }; recognition.onend = () => { console.log('Voice recognition stopped.'); - if (visualization) { - visualization.style.borderColor = '#ffffff'; - } + setCircleState(userCircle, { + listening: false, + speaking: false, + label: isMuted ? 'Microphone is muted' : 'Listening for your voice' + }); if (!isMuted) { try { recognition.start(); } catch (error) { console.error('Failed to restart recognition:', error); + setCircleState(userCircle, { + error: true, + label: 'Unable to restart microphone recognition' + }); } } }; @@ -66,6 +117,13 @@ function setupSpeechRecognition() { recognition.onresult = (event) => { const transcript = event.results[event.results.length - 1][0].transcript.trim(); console.log('User said:', transcript); + + setCircleState(userCircle, { + listening: true, + speaking: false, + label: 'Processing what you said' + }); + const isLocalCommand = handleVoiceCommand(transcript); if (!isLocalCommand) { getAIResponse(transcript); @@ -74,6 +132,12 @@ function setupSpeechRecognition() { recognition.onerror = (event) => { console.error('Speech recognition error:', event.error); + setCircleState(userCircle, { + error: true, + listening: false, + speaking: false, + label: `Microphone error: ${event.error}` + }); }; } @@ -85,6 +149,7 @@ async function initializeVoiceControl() { hasMicPermission = await requestMicPermission(); if (!hasMicPermission) { alert('Microphone access is required for voice control.'); + updateMuteIndicator(); return; } @@ -100,15 +165,26 @@ async function initializeVoiceControl() { async function requestMicPermission() { if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { alert('Microphone access is not supported in this browser.'); + setCircleState(userCircle, { + error: true, + label: 'Microphone access is not supported in this browser' + }); return false; } try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); stream.getTracks().forEach((track) => track.stop()); + setCircleState(userCircle, { + label: 'Microphone is muted' + }); return true; } catch (error) { console.error('Microphone permission denied:', error); + setCircleState(userCircle, { + error: true, + label: 'Microphone permission denied' + }); return false; } } @@ -118,10 +194,20 @@ function updateMuteIndicator() { return; } + muteIndicator.classList.add('is-visible'); + muteIndicator.setAttribute('aria-hidden', 'false'); + if (isMuted) { - muteIndicator.classList.remove('hidden'); + const message = hasMicPermission + ? 'Tap or click anywhere to unmute' + : 'Allow microphone access to start'; + indicatorText && (indicatorText.textContent = message); + muteIndicator.dataset.state = 'muted'; + muteIndicator.setAttribute('aria-label', 'Microphone muted. Tap to enable listening.'); } else { - muteIndicator.classList.add('hidden'); + indicatorText && (indicatorText.textContent = 'Listening… tap to mute'); + muteIndicator.dataset.state = 'listening'; + muteIndicator.setAttribute('aria-label', 'Microphone active. Tap to mute.'); } } @@ -143,26 +229,61 @@ async function attemptUnmute() { } isMuted = false; + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); updateMuteIndicator(); + try { recognition.start(); } catch (error) { console.error('Failed to start recognition:', error); + setCircleState(userCircle, { + error: true, + listening: false, + label: 'Unable to start microphone recognition' + }); + isMuted = true; + updateMuteIndicator(); } } -if (muteIndicator) { - muteIndicator.addEventListener('click', async (event) => { - event.stopPropagation(); - await attemptUnmute(); +function handleMuteToggle(event) { + event?.stopPropagation(); + + if (isMuted) { + attemptUnmute(); + return; + } + + isMuted = true; + setCircleState(userCircle, { + listening: false, + speaking: false, + label: 'Microphone is muted' }); + updateMuteIndicator(); + + if (recognition) { + recognition.stop(); + } } -document.addEventListener('click', async () => { - await attemptUnmute(); +muteIndicator?.addEventListener('click', handleMuteToggle); + +document.addEventListener('click', () => { + if (isMuted) { + attemptUnmute(); + } }); -const synth = window.speechSynthesis; +document.addEventListener('keydown', (event) => { + if ((event.key === 'Enter' || event.key === ' ') && isMuted) { + event.preventDefault(); + attemptUnmute(); + } +}); function speak(text) { if (synth.speaking) { @@ -170,35 +291,39 @@ function speak(text) { return; } - if (text !== '') { - const utterance = new SpeechSynthesisUtterance(text); - const voices = synth.getVoices(); - const ukFemaleVoice = voices.find((voice) => - voice.name.includes('Google UK English Female') || (voice.lang === 'en-GB' && voice.gender === 'female') - ); + if (text === '') { + return; + } - if (ukFemaleVoice) { - utterance.voice = ukFemaleVoice; - } else { - console.warn('UK English female voice not found, using default.'); - } + const utterance = new SpeechSynthesisUtterance(text); + const voices = synth.getVoices(); + const ukFemaleVoice = voices.find((voice) => + voice.name.includes('Google UK English Female') || (voice.lang === 'en-GB' && voice.gender === 'female') + ); - utterance.onstart = () => { - console.log('AI is speaking...'); - if (visualization) { - visualization.style.animation = 'pulse 1s infinite'; - } - }; + if (ukFemaleVoice) { + utterance.voice = ukFemaleVoice; + } else { + console.warn('UK English female voice not found, using default.'); + } - utterance.onend = () => { - console.log('AI finished speaking.'); - if (visualization) { - visualization.style.animation = ''; - } - }; + utterance.onstart = () => { + console.log('AI is speaking...'); + setCircleState(aiCircle, { + speaking: true, + label: 'Unity is speaking' + }); + }; - synth.speak(utterance); - } + utterance.onend = () => { + console.log('AI finished speaking.'); + setCircleState(aiCircle, { + speaking: false, + label: 'Unity is idle' + }); + }; + + synth.speak(utterance); } function handleVoiceCommand(command) { @@ -207,6 +332,11 @@ function handleVoiceCommand(command) { if (lowerCaseCommand.includes('mute my mic') || lowerCaseCommand.includes('mute microphone')) { isMuted = true; updateMuteIndicator(); + setCircleState(userCircle, { + listening: false, + speaking: false, + label: 'Microphone is muted' + }); if (recognition) { recognition.stop(); } @@ -217,6 +347,10 @@ function handleVoiceCommand(command) { if (lowerCaseCommand.includes('unmute my mic') || lowerCaseCommand.includes('unmute microphone')) { isMuted = false; updateMuteIndicator(); + setCircleState(userCircle, { + listening: true, + label: 'Listening for your voice' + }); if (recognition) { try { recognition.start(); @@ -230,6 +364,10 @@ function handleVoiceCommand(command) { if (lowerCaseCommand.includes('shut up') || lowerCaseCommand.includes('be quiet')) { synth.cancel(); + setCircleState(aiCircle, { + speaking: false, + label: 'Unity is idle' + }); return true; } @@ -304,15 +442,32 @@ async function getAIResponse(userInput) { }) }); + if (!textResponse.ok) { + throw new Error(`Pollinations text API returned ${textResponse.status}`); + } + const data = await textResponse.json(); - aiText = data.choices[0].message.content; + aiText = data.choices?.[0]?.message?.content ?? ''; - chatHistory.push({ role: 'assistant', content: aiText }); + if (!aiText) { + throw new Error('Received empty response from Pollinations AI'); + } + chatHistory.push({ role: 'assistant', content: aiText }); speak(aiText); } catch (error) { console.error('Error getting text from Pollinations AI:', error); + setCircleState(aiCircle, { + error: true, + label: 'Unity could not respond' + }); speak("Sorry, I couldn't get a text response."); + setTimeout(() => { + setCircleState(aiCircle, { + error: false, + label: 'Unity is idle' + }); + }, 2400); } try { diff --git a/ci_reports/.gitkeep b/ci_reports/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/index.html b/index.html index 21b08b6..45fb567 100644 --- a/index.html +++ b/index.html @@ -1,4 +1,4 @@ - +
@@ -7,9 +7,22 @@ - - -