diff --git a/.dev.vars.example b/.dev.vars.example index 937f9d307..757ba58b8 100644 --- a/.dev.vars.example +++ b/.dev.vars.example @@ -4,10 +4,14 @@ ANTHROPIC_API_KEY=sk-ant-... # Local development mode - skips Cloudflare Access auth and bypasses device pairing -DEV_MODE=true +# DEV_MODE=true + +# E2E test mode - skips Cloudflare Access auth but keeps device pairing enabled +# Use this for automated tests that need to test the real pairing flow +# E2E_TEST_MODE=true # Enable debug routes at /debug/* (optional) -DEBUG_ROUTES=true +# DEBUG_ROUTES=true # Optional - set a fixed token instead of auto-generated MOLTBOT_GATEWAY_TOKEN=dev-token-change-in-prod diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9012e6c11..6230f0b8e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -5,9 +5,10 @@ on: branches: [main] pull_request: branches: [main] + workflow_dispatch: jobs: - test: + unit: runs-on: ubuntu-latest steps: @@ -27,3 +28,184 @@ jobs: - name: Run tests run: npm test + + e2e: + runs-on: ubuntu-latest + timeout-minutes: 20 + permissions: + contents: write + pull-requests: write + + strategy: + fail-fast: false + matrix: + config: + - name: base + env: {} + - name: telegram + env: + TELEGRAM_BOT_TOKEN: "fake-telegram-bot-token-for-e2e" + TELEGRAM_DM_POLICY: "pairing" + - name: discord + env: + DISCORD_BOT_TOKEN: "fake-discord-bot-token-for-e2e" + DISCORD_DM_POLICY: "pairing" + + name: e2e (${{ matrix.config.name }}) + + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + + - name: Install Playwright + run: npx playwright install --with-deps chromium + + - name: Install playwright-cli + run: npm install -g @playwright/cli + + - name: Install cctr + uses: taiki-e/install-action@v2 + with: + tool: cctr + + - name: Run E2E tests (${{ matrix.config.name }}) + id: e2e + continue-on-error: true + env: + # Cloud infrastructure credentials (from repo secrets with E2E_ prefix) + CLOUDFLARE_API_TOKEN: ${{ secrets.E2E_CLOUDFLARE_API_TOKEN }} + CF_ACCOUNT_ID: ${{ secrets.E2E_CF_ACCOUNT_ID }} + WORKERS_SUBDOMAIN: ${{ secrets.E2E_WORKERS_SUBDOMAIN }} + CF_ACCESS_TEAM_DOMAIN: ${{ secrets.E2E_CF_ACCESS_TEAM_DOMAIN }} + R2_ACCESS_KEY_ID: ${{ secrets.E2E_R2_ACCESS_KEY_ID }} + R2_SECRET_ACCESS_KEY: ${{ secrets.E2E_R2_SECRET_ACCESS_KEY }} + # AI provider (optional, for chat tests) + AI_GATEWAY_API_KEY: ${{ secrets.AI_GATEWAY_API_KEY }} + AI_GATEWAY_BASE_URL: ${{ secrets.AI_GATEWAY_BASE_URL }} + # Unique test run ID for parallel isolation + E2E_TEST_RUN_ID: ${{ github.run_id }}-${{ matrix.config.name }} + # Matrix-specific config + TELEGRAM_BOT_TOKEN: ${{ matrix.config.env.TELEGRAM_BOT_TOKEN }} + TELEGRAM_DM_POLICY: ${{ matrix.config.env.TELEGRAM_DM_POLICY }} + DISCORD_BOT_TOKEN: ${{ matrix.config.env.DISCORD_BOT_TOKEN }} + DISCORD_DM_POLICY: ${{ matrix.config.env.DISCORD_DM_POLICY }} + run: cctr -vv test/e2e + + - name: Convert video and generate thumbnail + id: convert + if: always() + run: | + sudo apt-get update -qq && sudo apt-get install -y -qq ffmpeg imagemagick bc + if ls /tmp/moltworker-e2e-videos/*.webm 1>/dev/null 2>&1; then + for webm in /tmp/moltworker-e2e-videos/*.webm; do + mp4="${webm%.webm}.mp4" + thumb="${webm%.webm}.png" + + # Convert to mp4 + ffmpeg -y -i "$webm" -c:v libx264 -preset fast -crf 22 -c:a aac "$mp4" + + # Extract middle frame as thumbnail + duration=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$mp4") + midpoint=$(echo "$duration / 2" | bc -l) + ffmpeg -y -ss "$midpoint" -i "$mp4" -vframes 1 -update 1 -q:v 2 "$thumb" + + # Add play button overlay using ImageMagick + width=$(identify -format '%w' "$thumb") + height=$(identify -format '%h' "$thumb") + cx=$((width / 2)) + cy=$((height / 2)) + convert "$thumb" \ + -fill 'rgba(0,0,0,0.6)' -draw "circle ${cx},${cy} $((cx+50)),${cy}" \ + -fill 'white' -draw "polygon $((cx-15)),$((cy-25)) $((cx-15)),$((cy+25)) $((cx+30)),${cy}" \ + "$thumb" + + echo "video_path=$mp4" >> $GITHUB_OUTPUT + echo "video_name=$(basename $mp4)" >> $GITHUB_OUTPUT + echo "thumb_path=$thumb" >> $GITHUB_OUTPUT + echo "thumb_name=$(basename $thumb)" >> $GITHUB_OUTPUT + done + echo "has_video=true" >> $GITHUB_OUTPUT + else + echo "has_video=false" >> $GITHUB_OUTPUT + fi + + - name: Prepare video for upload + id: prepare + if: always() && steps.convert.outputs.has_video == 'true' + run: | + mkdir -p /tmp/e2e-video-upload/videos/${{ github.run_id }}-${{ matrix.config.name }} + cp "${{ steps.convert.outputs.video_path }}" /tmp/e2e-video-upload/videos/${{ github.run_id }}-${{ matrix.config.name }}/ + cp "${{ steps.convert.outputs.thumb_path }}" /tmp/e2e-video-upload/videos/${{ github.run_id }}-${{ matrix.config.name }}/ + echo "video_url=https://github.com/${{ github.repository }}/raw/e2e-artifacts-${{ matrix.config.name }}/videos/${{ github.run_id }}-${{ matrix.config.name }}/${{ steps.convert.outputs.video_name }}" >> $GITHUB_OUTPUT + echo "thumb_url=https://github.com/${{ github.repository }}/raw/e2e-artifacts-${{ matrix.config.name }}/videos/${{ github.run_id }}-${{ matrix.config.name }}/${{ steps.convert.outputs.thumb_name }}" >> $GITHUB_OUTPUT + + - name: Upload video to e2e-artifacts branch + if: always() && steps.convert.outputs.has_video == 'true' + uses: peaceiris/actions-gh-pages@v4 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: /tmp/e2e-video-upload + publish_branch: e2e-artifacts-${{ matrix.config.name }} + keep_files: true + + - name: Delete old video comments + if: always() && github.event_name == 'pull_request' + uses: actions/github-script@v7 + with: + script: | + const marker = ''; + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + for (const comment of comments) { + if (comment.body.includes(marker)) { + await github.rest.issues.deleteComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: comment.id, + }); + } + } + + - name: Comment on PR with video + if: always() && github.event_name == 'pull_request' && steps.prepare.outputs.video_url + uses: peter-evans/create-or-update-comment@v4 + with: + issue-number: ${{ github.event.pull_request.number }} + body: | + + ## E2E Test Recording (${{ matrix.config.name }}) + + ${{ steps.e2e.outcome == 'success' && 'βœ… Tests passed' || '❌ Tests failed' }} + + [![E2E Test Video](${{ steps.prepare.outputs.thumb_url }})](${{ steps.prepare.outputs.video_url }}) + + - name: Add video link to summary + if: always() + run: | + echo "## E2E Test Recording" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + if [ "${{ steps.convert.outputs.has_video }}" == "true" ]; then + echo "πŸ“Ή [Download video](${{ steps.prepare.outputs.video_url }})" >> $GITHUB_STEP_SUMMARY + else + echo "⚠️ No video recording found" >> $GITHUB_STEP_SUMMARY + fi + + - name: Fail if E2E tests failed + if: steps.e2e.outcome == 'failure' + run: exit 1 diff --git a/.gitignore b/.gitignore index d3bb70515..fad199338 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,25 @@ Thumbs.db # Docker build artifacts *.tar + +# Veta agent memory +.veta/ + +# greger.el conversation +*.greger + +# playwright-cli +.playwright-cli/ + +# Terraform +*.tfstate +*.tfstate.* +.terraform/ +.terraform.lock.hcl +terraform.tfvars + +# E2E test credentials +test/e2e/.dev.vars + +# Temporary e2e wrangler configs +.wrangler-e2e-*.jsonc \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..16ccfceb7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,37 @@ +## Contributing + +We welcome contributions, but with a few short rules: + +- **Create issues first** for anything non-trivial (typos, doc fixes, glaring errors). Explain why, the impact, and how you intend to solve it _first_, before putting time and energy (or tokens) into a PR. + +- **You cannot be offended if we close a PR** or otherwise decide not to merge your work. We're maintaining Moltworker for (many, many) others, and we're ultimately the ones that have to maintain the code. This is especially true if we believe your PR to be AI driven without any human-in-the-loop review or explanation. Not all ideas or work makes it. If it's critical to your workflow, you can fork it! + +- **Demonstrate that you've tested your work** - whether via manual testing, automated tests, or a mix of both. You may be quizzed here. + +## AI Contributions + +> Heavily inspired and influenced by [Ghostty's AI policy](https://github.com/ghostty-org/ghostty/blob/main/AI_POLICY.md) + +**First**: AI tooling is incredibly powerful, and enabled much of Moltworker itself to exist! But it's a tool: and the wielder of the tool is ultimately responsible for their output. + +We have a few rules regarding AI usage: + +- **All AI usage in any form must be disclosed.** You must state the tool you used (e.g. Claude Code, Cursor, Amp) along with the extent that the work was AI-assisted. + +- **Pull requests created in any way by AI can only be for accepted issues.** Drive-by pull requests that do not reference an accepted issue will be closed. If AI isn't disclosed but a maintainer suspects its use, the PR will be closed. If you want to share code for a non-accepted issue, open a discussion or attach it to an existing discussion. + +- **Pull requests created by AI must have been fully verified with human use.** AI must not create hypothetically correct code that hasn't been tested. Importantly, you must not allow AI to write code for platforms or environments you don't have access to manually test on. + +- **Issues and discussions can use AI assistance but must have a full human-in-the-loop.** This means that any content generated with AI must have been reviewed _and edited_ by a human before submission. AI is very good at being overly verbose and including noise that distracts from the main point. Humans must do their research and trim this down. + +- **No AI-generated media is allowed (art, images, videos, audio, etc.).** Text and code are the only acceptable AI-generated content, per the other rules in this policy. + +These rules apply only to outside contributions to Ghostty. Maintainers are exempt from these rules and may use AI tools at their discretion; they've proven themselves trustworthy to apply good judgment. + +## There are Humans Here + +Please remember that this software is ultimately maintained by humans. + +Every discussion, issue, and pull request is read and reviewed by humans (and sometimes machines, too). It is a boundary point at which people interact with each other and the work done. It is rude and disrespectful to approach this boundary with low-effort, unqualified work, since it puts the burden of validation on the maintainer. + +In a perfect world, AI would produce high-quality, accurate work every time. But today, that reality depends on the driver of the AI. And today, most drivers of AI are just not good enough. So, until either the people get better, the AI gets better, or both, we have to have strict rules to protect maintainers. diff --git a/Dockerfile b/Dockerfile index 3fb55a30d..d7fd5d313 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,8 +4,14 @@ FROM docker.io/cloudflare/sandbox:0.7.0 # The base image has Node 20, we need to replace it with Node 22 # Using direct binary download for reliability ENV NODE_VERSION=22.13.1 -RUN apt-get update && apt-get install -y xz-utils ca-certificates rsync \ - && curl -fsSLk https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz -o /tmp/node.tar.xz \ +RUN ARCH="$(dpkg --print-architecture)" \ + && case "${ARCH}" in \ + amd64) NODE_ARCH="x64" ;; \ + arm64) NODE_ARCH="arm64" ;; \ + *) echo "Unsupported architecture: ${ARCH}" >&2; exit 1 ;; \ + esac \ + && apt-get update && apt-get install -y xz-utils ca-certificates rsync \ + && curl -fsSLk https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-${NODE_ARCH}.tar.xz -o /tmp/node.tar.xz \ && tar -xJf /tmp/node.tar.xz -C /usr/local --strip-components=1 \ && rm /tmp/node.tar.xz \ && node --version \ diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index 6f49dd7f5..cbdaf5350 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,17 @@ -# Moltbot on Cloudflare Workers +# OpenClaw on Cloudflare Workers -Run [Moltbot](https://molt.bot/) personal AI assistant in a [Cloudflare Sandbox](https://developers.cloudflare.com/sandbox/). +Run [OpenClaw](https://github.com/openclaw/openclaw) (formerly Moltbot, formerly Clawdbot) personal AI assistant in a [Cloudflare Sandbox](https://developers.cloudflare.com/sandbox/). -> **Experimental:** This is a proof of concept demonstrating that Moltbot can run in Cloudflare Sandbox. It is not officially supported and may break without notice. Use at your own risk. +![moltworker architecture](./assets/logo.png) -[![Deploy to Cloudflare](https://deploy.workers.cloudflare.com/button)](https://deploy.workers.cloudflare.com/?url=https://github.com/cloudflare/moltworker) +> **Experimental:** This is a proof of concept demonstrating that OpenClaw can run in Cloudflare Sandbox. It is not officially supported and may break without notice. Use at your own risk. -![moltworker architecture](./assets/logo.png) +[![Deploy to Cloudflare](https://deploy.workers.cloudflare.com/button)](https://deploy.workers.cloudflare.com/?url=https://github.com/cloudflare/moltworker) ## Requirements - [Workers Paid plan](https://www.cloudflare.com/plans/developer-platform/) ($5 USD/month) β€” required for Cloudflare Sandbox containers -- [Anthropic API key](https://console.anthropic.com/) β€” for Claude access, or you can AI Gateway's [Unified Billing](https://developers.cloudflare.com/ai-gateway/features/unified-billing/) +- [Anthropic API key](https://console.anthropic.com/) β€” for Claude access, or you can use AI Gateway's [Unified Billing](https://developers.cloudflare.com/ai-gateway/features/unified-billing/) The following Cloudflare features used by this project have free tiers: - Cloudflare Access (authentication) @@ -19,9 +19,9 @@ The following Cloudflare features used by this project have free tiers: - AI Gateway (optional, for API routing/analytics) - R2 Storage (optional, for persistence) -## What is Moltbot? +## What is OpenClaw? -[Moltbot](https://molt.bot/) is a personal AI assistant with a gateway architecture that connects to multiple chat platforms. Key features: +[OpenClaw](https://github.com/openclaw/openclaw) (formerly Moltbot, formerly Clawdbot) is a personal AI assistant with a gateway architecture that connects to multiple chat platforms. Key features: - **Control UI** - Web-based chat interface at the gateway - **Multi-channel support** - Telegram, Discord, Slack @@ -29,7 +29,7 @@ The following Cloudflare features used by this project have free tiers: - **Persistent conversations** - Chat history and context across sessions - **Agent runtime** - Extensible AI capabilities with workspace and skills -This project packages Moltbot to run in a [Cloudflare Sandbox](https://developers.cloudflare.com/sandbox/) container, providing a fully managed, always-on deployment without needing to self-host. Optional R2 storage enables persistence across container restarts. +This project packages OpenClaw to run in a [Cloudflare Sandbox](https://developers.cloudflare.com/sandbox/) container, providing a fully managed, always-on deployment without needing to self-host. Optional R2 storage enables persistence across container restarts. ## Architecture @@ -52,7 +52,7 @@ npx wrangler secret put ANTHROPIC_API_KEY # Generate and set a gateway token (required for remote access) # Save this token - you'll need it to access the Control UI -export MOLTBOT_GATEWAY_TOKEN=$(openssl rand -base64 32 | tr -d '=+/' | head -c 32) +export MOLTBOT_GATEWAY_TOKEN=$(openssl rand -hex 32) echo "Your gateway token: $MOLTBOT_GATEWAY_TOKEN" echo "$MOLTBOT_GATEWAY_TOKEN" | npx wrangler secret put MOLTBOT_GATEWAY_TOKEN @@ -197,7 +197,7 @@ R2 storage uses a backup/restore approach for simplicity: **On container startup:** - If R2 is mounted and contains backup data, it's restored to the moltbot config directory -- Moltbot uses its default paths (no special configuration needed) +- OpenClaw uses its default paths (no special configuration needed) **During operation:** - A cron job runs every 5 minutes to sync the moltbot config to R2 @@ -267,7 +267,7 @@ npm run deploy ## Optional: Browser Automation (CDP) -This worker includes a Chrome DevTools Protocol (CDP) shim that enables browser automation capabilities. This allows Moltbot to control a headless browser for tasks like web scraping, screenshots, and automated testing. +This worker includes a Chrome DevTools Protocol (CDP) shim that enables browser automation capabilities. This allows OpenClaw to control a headless browser for tasks like web scraping, screenshots, and automated testing. ### Setup @@ -300,7 +300,8 @@ npm run deploy | `GET /cdp/json/new` | Create a new browser target | | `WS /cdp/devtools/browser/{id}` | WebSocket connection for CDP commands | -All endpoints require the `CDP_SECRET` header for authentication. +All endpoints require the `CDP_SECRET` as "secret" parameter for authentication. +example: https://your-worker.workers.dev/cdp?secret=`CDP_SECRET` ## Built-in Skills @@ -386,7 +387,7 @@ The `AI_GATEWAY_*` variables take precedence over `ANTHROPIC_*` if both are set. ### Authentication Layers -Moltbot in Cloudflare Sandbox uses multiple authentication layers: +OpenClaw in Cloudflare Sandbox uses multiple authentication layers: 1. **Cloudflare Access** - Protects admin routes (`/_admin/`, `/api/*`, `/debug/*`). Only authenticated users can manage devices. @@ -414,7 +415,7 @@ Moltbot in Cloudflare Sandbox uses multiple authentication layers: ## Links -- [Moltbot](https://molt.bot/) -- [Moltbot Docs](https://docs.molt.bot) +- [OpenClaw](https://github.com/openclaw/openclaw) +- [OpenClaw Docs](https://docs.openclaw.ai/) - [Cloudflare Sandbox Docs](https://developers.cloudflare.com/sandbox/) - [Cloudflare Access Docs](https://developers.cloudflare.com/cloudflare-one/policies/access/) diff --git a/package.json b/package.json index c90c60fe7..3d19c95dd 100644 --- a/package.json +++ b/package.json @@ -37,5 +37,15 @@ "wrangler": "^4.50.0" }, "author": "", - "license": "MIT" + "license": "Apache-2.0", + "cloudflare": { + "bindings": { + "ANTHROPIC_API_KEY": { + "description": "Your [Anthropic API key](https://console.anthropic.com/)." + }, + "MOLTBOT_GATEWAY_TOKEN": { + "description": "Token for gateway access. Generate with: openssl rand -hex 32" + } + } + } } diff --git a/src/auth/middleware.test.ts b/src/auth/middleware.test.ts index 1c49ce605..caeb71061 100644 --- a/src/auth/middleware.test.ts +++ b/src/auth/middleware.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, vi, beforeEach } from 'vitest'; -import { isDevMode, extractJWT } from './middleware'; +import { isDevMode, isE2ETestMode, extractJWT } from './middleware'; import type { MoltbotEnv } from '../types'; import type { Context } from 'hono'; import type { AppEnv } from '../types'; @@ -32,6 +32,28 @@ describe('isDevMode', () => { }); }); +describe('isE2ETestMode', () => { + it('returns true when E2E_TEST_MODE is "true"', () => { + const env = createMockEnv({ E2E_TEST_MODE: 'true' }); + expect(isE2ETestMode(env)).toBe(true); + }); + + it('returns false when E2E_TEST_MODE is undefined', () => { + const env = createMockEnv(); + expect(isE2ETestMode(env)).toBe(false); + }); + + it('returns false when E2E_TEST_MODE is "false"', () => { + const env = createMockEnv({ E2E_TEST_MODE: 'false' }); + expect(isE2ETestMode(env)).toBe(false); + }); + + it('returns false when E2E_TEST_MODE is any other value', () => { + const env = createMockEnv({ E2E_TEST_MODE: 'yes' }); + expect(isE2ETestMode(env)).toBe(false); + }); +}); + describe('extractJWT', () => { // Helper to create a mock context function createMockContext(options: { @@ -158,6 +180,17 @@ describe('createAccessMiddleware', () => { expect(setMock).toHaveBeenCalledWith('accessUser', { email: 'dev@localhost', name: 'Dev User' }); }); + it('skips auth and sets dev user when E2E_TEST_MODE is true', async () => { + const { c, setMock } = createFullMockContext({ env: { E2E_TEST_MODE: 'true' } }); + const middleware = createAccessMiddleware({ type: 'json' }); + const next = vi.fn(); + + await middleware(c, next); + + expect(next).toHaveBeenCalled(); + expect(setMock).toHaveBeenCalledWith('accessUser', { email: 'dev@localhost', name: 'Dev User' }); + }); + it('returns 500 JSON error when CF Access not configured', async () => { const { c, jsonMock } = createFullMockContext({ env: {} }); const middleware = createAccessMiddleware({ type: 'json' }); diff --git a/src/auth/middleware.ts b/src/auth/middleware.ts index a1b7d2296..0b170a995 100644 --- a/src/auth/middleware.ts +++ b/src/auth/middleware.ts @@ -13,12 +13,19 @@ export interface AccessMiddlewareOptions { } /** - * Check if running in development mode (skips CF Access auth) + * Check if running in development mode (skips CF Access auth + device pairing) */ export function isDevMode(env: MoltbotEnv): boolean { return env.DEV_MODE === 'true'; } +/** + * Check if running in E2E test mode (skips CF Access auth but keeps device pairing) + */ +export function isE2ETestMode(env: MoltbotEnv): boolean { + return env.E2E_TEST_MODE === 'true'; +} + /** * Extract JWT from request headers or cookies */ @@ -42,8 +49,8 @@ export function createAccessMiddleware(options: AccessMiddlewareOptions) { const { type, redirectOnMissing = false } = options; return async (c: Context, next: Next) => { - // Skip auth in dev mode - if (isDevMode(c.env)) { + // Skip auth in dev mode or E2E test mode + if (isDevMode(c.env) || isE2ETestMode(c.env)) { c.set('accessUser', { email: 'dev@localhost', name: 'Dev User' }); return next(); } diff --git a/src/config.ts b/src/config.ts index 77e68fa70..2576af9b5 100644 --- a/src/config.ts +++ b/src/config.ts @@ -11,5 +11,10 @@ export const STARTUP_TIMEOUT_MS = 180_000; /** Mount path for R2 persistent storage inside the container */ export const R2_MOUNT_PATH = '/data/moltbot'; -/** R2 bucket name for persistent storage */ -export const R2_BUCKET_NAME = 'moltbot-data'; +/** + * R2 bucket name for persistent storage. + * Can be overridden via R2_BUCKET_NAME env var for test isolation. + */ +export function getR2BucketName(env?: { R2_BUCKET_NAME?: string }): string { + return env?.R2_BUCKET_NAME || 'moltbot-data'; +} diff --git a/src/gateway/env.test.ts b/src/gateway/env.test.ts index 3594e18da..29f033dbd 100644 --- a/src/gateway/env.test.ts +++ b/src/gateway/env.test.ts @@ -135,4 +135,39 @@ describe('buildEnvVars', () => { TELEGRAM_BOT_TOKEN: 'tg', }); }); + + it('handles trailing slash in AI_GATEWAY_BASE_URL for OpenAI', () => { + const env = createMockEnv({ + AI_GATEWAY_API_KEY: 'sk-gateway-key', + AI_GATEWAY_BASE_URL: 'https://gateway.ai.cloudflare.com/v1/123/my-gw/openai/', + }); + const result = buildEnvVars(env); + expect(result.OPENAI_API_KEY).toBe('sk-gateway-key'); + expect(result.OPENAI_BASE_URL).toBe('https://gateway.ai.cloudflare.com/v1/123/my-gw/openai'); + expect(result.AI_GATEWAY_BASE_URL).toBe('https://gateway.ai.cloudflare.com/v1/123/my-gw/openai'); + expect(result.ANTHROPIC_API_KEY).toBeUndefined(); + }); + + it('handles trailing slash in AI_GATEWAY_BASE_URL for Anthropic', () => { + const env = createMockEnv({ + AI_GATEWAY_API_KEY: 'sk-gateway-key', + AI_GATEWAY_BASE_URL: 'https://gateway.ai.cloudflare.com/v1/123/my-gw/anthropic/', + }); + const result = buildEnvVars(env); + expect(result.ANTHROPIC_API_KEY).toBe('sk-gateway-key'); + expect(result.ANTHROPIC_BASE_URL).toBe('https://gateway.ai.cloudflare.com/v1/123/my-gw/anthropic'); + expect(result.AI_GATEWAY_BASE_URL).toBe('https://gateway.ai.cloudflare.com/v1/123/my-gw/anthropic'); + expect(result.OPENAI_API_KEY).toBeUndefined(); + }); + + it('handles multiple trailing slashes in AI_GATEWAY_BASE_URL', () => { + const env = createMockEnv({ + AI_GATEWAY_API_KEY: 'sk-gateway-key', + AI_GATEWAY_BASE_URL: 'https://gateway.ai.cloudflare.com/v1/123/my-gw/openai///', + }); + const result = buildEnvVars(env); + expect(result.OPENAI_API_KEY).toBe('sk-gateway-key'); + expect(result.OPENAI_BASE_URL).toBe('https://gateway.ai.cloudflare.com/v1/123/my-gw/openai'); + expect(result.AI_GATEWAY_BASE_URL).toBe('https://gateway.ai.cloudflare.com/v1/123/my-gw/openai'); + }); }); diff --git a/src/gateway/env.ts b/src/gateway/env.ts index 26f1887e8..a57e781bd 100644 --- a/src/gateway/env.ts +++ b/src/gateway/env.ts @@ -9,7 +9,9 @@ import type { MoltbotEnv } from '../types'; export function buildEnvVars(env: MoltbotEnv): Record { const envVars: Record = {}; - const isOpenAIGateway = env.AI_GATEWAY_BASE_URL?.endsWith('/openai'); + // Normalize the base URL by removing trailing slashes + const normalizedBaseUrl = env.AI_GATEWAY_BASE_URL?.replace(/\/+$/, ''); + const isOpenAIGateway = normalizedBaseUrl?.endsWith('/openai'); // AI Gateway vars take precedence // Map to the appropriate provider env var based on the gateway endpoint @@ -30,13 +32,13 @@ export function buildEnvVars(env: MoltbotEnv): Record { } // Pass base URL (used by start-moltbot.sh to determine provider) - if (env.AI_GATEWAY_BASE_URL) { - envVars.AI_GATEWAY_BASE_URL = env.AI_GATEWAY_BASE_URL; + if (normalizedBaseUrl) { + envVars.AI_GATEWAY_BASE_URL = normalizedBaseUrl; // Also set the provider-specific base URL env var if (isOpenAIGateway) { - envVars.OPENAI_BASE_URL = env.AI_GATEWAY_BASE_URL; + envVars.OPENAI_BASE_URL = normalizedBaseUrl; } else { - envVars.ANTHROPIC_BASE_URL = env.AI_GATEWAY_BASE_URL; + envVars.ANTHROPIC_BASE_URL = normalizedBaseUrl; } } else if (env.ANTHROPIC_BASE_URL) { envVars.ANTHROPIC_BASE_URL = env.ANTHROPIC_BASE_URL; diff --git a/src/gateway/r2.test.ts b/src/gateway/r2.test.ts index e4228dfab..ea2a2f2ba 100644 --- a/src/gateway/r2.test.ts +++ b/src/gateway/r2.test.ts @@ -88,6 +88,25 @@ describe('mountR2Storage', () => { ); }); + it('uses custom bucket name from R2_BUCKET_NAME env var', async () => { + const { sandbox, mountBucketMock } = createMockSandbox({ mounted: false }); + const env = createMockEnvWithR2({ + R2_ACCESS_KEY_ID: 'key123', + R2_SECRET_ACCESS_KEY: 'secret', + CF_ACCOUNT_ID: 'account123', + R2_BUCKET_NAME: 'moltbot-e2e-test123', + }); + + const result = await mountR2Storage(sandbox, env); + + expect(result).toBe(true); + expect(mountBucketMock).toHaveBeenCalledWith( + 'moltbot-e2e-test123', + '/data/moltbot', + expect.any(Object) + ); + }); + it('returns true immediately when bucket is already mounted', async () => { const { sandbox, mountBucketMock } = createMockSandbox({ mounted: true }); const env = createMockEnvWithR2(); diff --git a/src/gateway/r2.ts b/src/gateway/r2.ts index 0887d59e7..302c61d7d 100644 --- a/src/gateway/r2.ts +++ b/src/gateway/r2.ts @@ -1,6 +1,6 @@ import type { Sandbox } from '@cloudflare/sandbox'; import type { MoltbotEnv } from '../types'; -import { R2_MOUNT_PATH, R2_BUCKET_NAME } from '../config'; +import { R2_MOUNT_PATH, getR2BucketName } from '../config'; /** * Check if R2 is already mounted by looking at the mount table @@ -45,9 +45,10 @@ export async function mountR2Storage(sandbox: Sandbox, env: MoltbotEnv): Promise return true; } + const bucketName = getR2BucketName(env); try { - console.log('Mounting R2 bucket at', R2_MOUNT_PATH); - await sandbox.mountBucket(R2_BUCKET_NAME, R2_MOUNT_PATH, { + console.log('Mounting R2 bucket', bucketName, 'at', R2_MOUNT_PATH); + await sandbox.mountBucket(bucketName, R2_MOUNT_PATH, { endpoint: `https://${env.CF_ACCOUNT_ID}.r2.cloudflarestorage.com`, // Pass credentials explicitly since we use R2_* naming instead of AWS_* credentials: { diff --git a/src/index.ts b/src/index.ts index 3ee1f5c20..ed08910cf 100644 --- a/src/index.ts +++ b/src/index.ts @@ -28,6 +28,7 @@ import { MOLTBOT_PORT } from './config'; import { createAccessMiddleware } from './auth'; import { ensureMoltbotGateway, findExistingMoltbotProcess, syncToR2 } from './gateway'; import { publicRoutes, api, adminUi, debug, cdp } from './routes'; +import { redactSensitiveParams } from './utils/logging'; import loadingPageHtml from './assets/loading.html'; import configErrorHtml from './assets/config-error.html'; @@ -38,11 +39,11 @@ function transformErrorMessage(message: string, host: string): string { if (message.includes('gateway token missing') || message.includes('gateway token mismatch')) { return `Invalid or missing token. Visit https://${host}?token={REPLACE_WITH_YOUR_TOKEN}`; } - + if (message.includes('pairing required')) { return `Pairing required. Visit https://${host}/_admin/`; } - + return message; } @@ -54,17 +55,21 @@ export { Sandbox }; */ function validateRequiredEnv(env: MoltbotEnv): string[] { const missing: string[] = []; + const isTestMode = env.DEV_MODE === 'true' || env.E2E_TEST_MODE === 'true'; if (!env.MOLTBOT_GATEWAY_TOKEN) { missing.push('MOLTBOT_GATEWAY_TOKEN'); } - if (!env.CF_ACCESS_TEAM_DOMAIN) { - missing.push('CF_ACCESS_TEAM_DOMAIN'); - } + // CF Access vars not required in dev/test mode since auth is skipped + if (!isTestMode) { + if (!env.CF_ACCESS_TEAM_DOMAIN) { + missing.push('CF_ACCESS_TEAM_DOMAIN'); + } - if (!env.CF_ACCESS_AUD) { - missing.push('CF_ACCESS_AUD'); + if (!env.CF_ACCESS_AUD) { + missing.push('CF_ACCESS_AUD'); + } } // Check for AI Gateway or direct Anthropic configuration @@ -94,12 +99,12 @@ function validateRequiredEnv(env: MoltbotEnv): string[] { */ function buildSandboxOptions(env: MoltbotEnv): SandboxOptions { const sleepAfter = env.SANDBOX_SLEEP_AFTER?.toLowerCase() || 'never'; - + // 'never' means keep the container alive indefinitely if (sleepAfter === 'never') { return { keepAlive: true }; } - + // Otherwise, use the specified duration return { sleepAfter }; } @@ -114,7 +119,8 @@ const app = new Hono(); // Middleware: Log every request app.use('*', async (c, next) => { const url = new URL(c.req.url); - console.log(`[REQ] ${c.req.method} ${url.pathname}${url.search}`); + const redactedSearch = redactSensitiveParams(url); + console.log(`[REQ] ${c.req.method} ${url.pathname}${redactedSearch}`); console.log(`[REQ] Has ANTHROPIC_API_KEY: ${!!c.env.ANTHROPIC_API_KEY}`); console.log(`[REQ] DEV_MODE: ${c.env.DEV_MODE}`); console.log(`[REQ] DEBUG_ROUTES: ${c.env.DEBUG_ROUTES}`); @@ -147,28 +153,28 @@ app.route('/cdp', cdp); // Middleware: Validate required environment variables (skip in dev mode and for debug routes) app.use('*', async (c, next) => { const url = new URL(c.req.url); - + // Skip validation for debug routes (they have their own enable check) if (url.pathname.startsWith('/debug')) { return next(); } - + // Skip validation in dev mode if (c.env.DEV_MODE === 'true') { return next(); } - + const missingVars = validateRequiredEnv(c.env); if (missingVars.length > 0) { console.error('[CONFIG] Missing required environment variables:', missingVars.join(', ')); - + const acceptsHtml = c.req.header('Accept')?.includes('text/html'); if (acceptsHtml) { // Return a user-friendly HTML error page const html = configErrorHtml.replace('{{MISSING_VARS}}', missingVars.join(', ')); return c.html(html, 503); } - + // Return JSON error for API requests return c.json({ error: 'Configuration error', @@ -177,7 +183,7 @@ app.use('*', async (c, next) => { hint: 'Set these using: wrangler secret put ', }, 503); } - + return next(); }); @@ -185,11 +191,11 @@ app.use('*', async (c, next) => { app.use('*', async (c, next) => { // Determine response type based on Accept header const acceptsHtml = c.req.header('Accept')?.includes('text/html'); - const middleware = createAccessMiddleware({ + const middleware = createAccessMiddleware({ type: acceptsHtml ? 'html' : 'json', - redirectOnMissing: acceptsHtml + redirectOnMissing: acceptsHtml }); - + return middleware(c, next); }); @@ -222,21 +228,21 @@ app.all('*', async (c) => { // Check if gateway is already running const existingProcess = await findExistingMoltbotProcess(sandbox); const isGatewayReady = existingProcess !== null && existingProcess.status === 'running'; - + // For browser requests (non-WebSocket, non-API), show loading page if gateway isn't ready const isWebSocketRequest = request.headers.get('Upgrade')?.toLowerCase() === 'websocket'; const acceptsHtml = request.headers.get('Accept')?.includes('text/html'); - + if (!isGatewayReady && !isWebSocketRequest && acceptsHtml) { console.log('[PROXY] Gateway not ready, serving loading page'); - + // Start the gateway in the background (don't await) c.executionCtx.waitUntil( ensureMoltbotGateway(sandbox, c.env).catch((err: Error) => { console.error('[PROXY] Background gateway start failed:', err); }) ); - + // Return the loading page immediately return c.html(loadingPageHtml); } @@ -264,101 +270,129 @@ app.all('*', async (c) => { // Proxy to Moltbot with WebSocket message interception if (isWebSocketRequest) { + const debugLogs = c.env.DEBUG_ROUTES === 'true'; + const redactedSearch = redactSensitiveParams(url); + console.log('[WS] Proxying WebSocket connection to Moltbot'); - console.log('[WS] URL:', request.url); - console.log('[WS] Search params:', url.search); - + if (debugLogs) { + console.log('[WS] URL:', url.pathname + redactedSearch); + } + // Get WebSocket connection to the container const containerResponse = await sandbox.wsConnect(request, MOLTBOT_PORT); console.log('[WS] wsConnect response status:', containerResponse.status); - + // Get the container-side WebSocket const containerWs = containerResponse.webSocket; if (!containerWs) { console.error('[WS] No WebSocket in container response - falling back to direct proxy'); return containerResponse; } - - console.log('[WS] Got container WebSocket, setting up interception'); - + + if (debugLogs) { + console.log('[WS] Got container WebSocket, setting up interception'); + } + // Create a WebSocket pair for the client const [clientWs, serverWs] = Object.values(new WebSocketPair()); - + // Accept both WebSockets serverWs.accept(); containerWs.accept(); - - console.log('[WS] Both WebSockets accepted'); - console.log('[WS] containerWs.readyState:', containerWs.readyState); - console.log('[WS] serverWs.readyState:', serverWs.readyState); - + + if (debugLogs) { + console.log('[WS] Both WebSockets accepted'); + console.log('[WS] containerWs.readyState:', containerWs.readyState); + console.log('[WS] serverWs.readyState:', serverWs.readyState); + } + // Relay messages from client to container serverWs.addEventListener('message', (event) => { - console.log('[WS] Client -> Container:', typeof event.data, typeof event.data === 'string' ? event.data.slice(0, 200) : '(binary)'); + if (debugLogs) { + console.log('[WS] Client -> Container:', typeof event.data, typeof event.data === 'string' ? event.data.slice(0, 200) : '(binary)'); + } if (containerWs.readyState === WebSocket.OPEN) { containerWs.send(event.data); - } else { + } else if (debugLogs) { console.log('[WS] Container not open, readyState:', containerWs.readyState); } }); - + // Relay messages from container to client, with error transformation containerWs.addEventListener('message', (event) => { - console.log('[WS] Container -> Client (raw):', typeof event.data, typeof event.data === 'string' ? event.data.slice(0, 500) : '(binary)'); + if (debugLogs) { + console.log('[WS] Container -> Client (raw):', typeof event.data, typeof event.data === 'string' ? event.data.slice(0, 500) : '(binary)'); + } let data = event.data; - + // Try to intercept and transform error messages if (typeof data === 'string') { try { const parsed = JSON.parse(data); - console.log('[WS] Parsed JSON, has error.message:', !!parsed.error?.message); + if (debugLogs) { + console.log('[WS] Parsed JSON, has error.message:', !!parsed.error?.message); + } if (parsed.error?.message) { - console.log('[WS] Original error.message:', parsed.error.message); + if (debugLogs) { + console.log('[WS] Original error.message:', parsed.error.message); + } parsed.error.message = transformErrorMessage(parsed.error.message, url.host); - console.log('[WS] Transformed error.message:', parsed.error.message); + if (debugLogs) { + console.log('[WS] Transformed error.message:', parsed.error.message); + } data = JSON.stringify(parsed); } } catch (e) { - console.log('[WS] Not JSON or parse error:', e); + if (debugLogs) { + console.log('[WS] Not JSON or parse error:', e); + } } } - + if (serverWs.readyState === WebSocket.OPEN) { serverWs.send(data); - } else { + } else if (debugLogs) { console.log('[WS] Server not open, readyState:', serverWs.readyState); } }); - + // Handle close events serverWs.addEventListener('close', (event) => { - console.log('[WS] Client closed:', event.code, event.reason); + if (debugLogs) { + console.log('[WS] Client closed:', event.code, event.reason); + } containerWs.close(event.code, event.reason); }); - + containerWs.addEventListener('close', (event) => { - console.log('[WS] Container closed:', event.code, event.reason); + if (debugLogs) { + console.log('[WS] Container closed:', event.code, event.reason); + } // Transform the close reason (truncate to 123 bytes max for WebSocket spec) let reason = transformErrorMessage(event.reason, url.host); if (reason.length > 123) { reason = reason.slice(0, 120) + '...'; } - console.log('[WS] Transformed close reason:', reason); + if (debugLogs) { + console.log('[WS] Transformed close reason:', reason); + } serverWs.close(event.code, reason); }); - + // Handle errors serverWs.addEventListener('error', (event) => { console.error('[WS] Client error:', event); containerWs.close(1011, 'Client error'); }); - + containerWs.addEventListener('error', (event) => { console.error('[WS] Container error:', event); serverWs.close(1011, 'Container error'); }); - - console.log('[WS] Returning intercepted WebSocket response'); + + if (debugLogs) { + console.log('[WS] Returning intercepted WebSocket response'); + } return new Response(null, { status: 101, webSocket: clientWs, @@ -368,12 +402,12 @@ app.all('*', async (c) => { console.log('[HTTP] Proxying:', url.pathname + url.search); const httpResponse = await sandbox.containerFetch(request, MOLTBOT_PORT); console.log('[HTTP] Response status:', httpResponse.status); - + // Add debug header to verify worker handled the request const newHeaders = new Headers(httpResponse.headers); newHeaders.set('X-Worker-Debug', 'proxy-to-moltbot'); newHeaders.set('X-Debug-Path', url.pathname); - + return new Response(httpResponse.body, { status: httpResponse.status, statusText: httpResponse.statusText, @@ -395,7 +429,7 @@ async function scheduled( console.log('[cron] Starting backup sync to R2...'); const result = await syncToR2(sandbox, env); - + if (result.success) { console.log('[cron] Backup sync completed successfully at', result.lastSync); } else { diff --git a/src/logging.test.ts b/src/logging.test.ts new file mode 100644 index 000000000..41e97f58b --- /dev/null +++ b/src/logging.test.ts @@ -0,0 +1,73 @@ +import { describe, it, expect } from 'vitest'; +import { redactSensitiveParams } from './utils/logging'; + +describe('redactSensitiveParams', () => { + it('returns empty string for URL with no query params', () => { + const url = new URL('https://example.com/path'); + expect(redactSensitiveParams(url)).toBe(''); + }); + + it('preserves non-sensitive query params', () => { + const url = new URL('https://example.com/path?page=1&sort=name'); + expect(redactSensitiveParams(url)).toBe('?page=1&sort=name'); + }); + + it('redacts param with "secret" in key (case insensitive)', () => { + const url = new URL('https://example.com/cdp?secret=abc123'); + expect(redactSensitiveParams(url)).toBe('?secret=%5BREDACTED%5D'); + }); + + it('redacts param with "SECRET" in key (uppercase)', () => { + const url = new URL('https://example.com/cdp?CDP_SECRET=abc123'); + expect(redactSensitiveParams(url)).toBe('?CDP_SECRET=%5BREDACTED%5D'); + }); + + it('redacts param with "token" in key', () => { + const url = new URL('https://example.com/path?token=xyz789'); + expect(redactSensitiveParams(url)).toBe('?token=%5BREDACTED%5D'); + }); + + it('redacts param with "key" in key', () => { + const url = new URL('https://example.com/path?api_key=sk-12345'); + expect(redactSensitiveParams(url)).toBe('?api_key=%5BREDACTED%5D'); + }); + + it('redacts param with "password" in key', () => { + const url = new URL('https://example.com/path?password=hunter2'); + expect(redactSensitiveParams(url)).toBe('?password=%5BREDACTED%5D'); + }); + + it('redacts param with "auth" in key', () => { + const url = new URL('https://example.com/path?auth_code=abc'); + expect(redactSensitiveParams(url)).toBe('?auth_code=%5BREDACTED%5D'); + }); + + it('redacts param with "credential" in key', () => { + const url = new URL('https://example.com/path?credential=xyz'); + expect(redactSensitiveParams(url)).toBe('?credential=%5BREDACTED%5D'); + }); + + it('redacts param when sensitive pattern is in value', () => { + const url = new URL('https://example.com/path?data=contains-secret-inside'); + expect(redactSensitiveParams(url)).toBe('?data=%5BREDACTED%5D'); + }); + + it('redacts multiple sensitive params while preserving others', () => { + const url = new URL('https://example.com/path?page=1&token=abc&secret=xyz&sort=name'); + const result = redactSensitiveParams(url); + expect(result).toContain('page=1'); + expect(result).toContain('sort=name'); + expect(result).toContain('token=%5BREDACTED%5D'); + expect(result).toContain('secret=%5BREDACTED%5D'); + }); + + it('redacts gateway_token (real world example)', () => { + const url = new URL('https://moltbot.workers.dev/?token=abc123def456'); + expect(redactSensitiveParams(url)).toBe('?token=%5BREDACTED%5D'); + }); + + it('redacts CDP secret query param (issue #85 scenario)', () => { + const url = new URL('https://moltbot.workers.dev/cdp/json/version?secret=my-cdp-secret'); + expect(redactSensitiveParams(url)).toBe('?secret=%5BREDACTED%5D'); + }); +}); diff --git a/src/types.ts b/src/types.ts index bb82c8ca4..d0fe5450a 100644 --- a/src/types.ts +++ b/src/types.ts @@ -18,6 +18,7 @@ export interface MoltbotEnv { CLAWDBOT_BIND_MODE?: string; DEV_MODE?: string; // Set to 'true' for local dev (skips CF Access auth + moltbot device pairing) + E2E_TEST_MODE?: string; // Set to 'true' for E2E tests (skips CF Access auth but keeps device pairing) DEBUG_ROUTES?: string; // Set to 'true' to enable /debug/* routes SANDBOX_SLEEP_AFTER?: string; // How long before sandbox sleeps: 'never' (default), or duration like '10m', '1h' TELEGRAM_BOT_TOKEN?: string; @@ -32,6 +33,7 @@ export interface MoltbotEnv { // R2 credentials for bucket mounting (set via wrangler secret) R2_ACCESS_KEY_ID?: string; R2_SECRET_ACCESS_KEY?: string; + R2_BUCKET_NAME?: string; // Override bucket name (default: 'moltbot-data') CF_ACCOUNT_ID?: string; // Cloudflare account ID for R2 endpoint // Browser Rendering binding for CDP shim BROWSER?: Fetcher; diff --git a/src/utils/logging.ts b/src/utils/logging.ts new file mode 100644 index 000000000..f9747d04c --- /dev/null +++ b/src/utils/logging.ts @@ -0,0 +1,20 @@ +/** + * Redact sensitive query parameters from URL for safe logging. + * Redacts any param containing: secret, token, key, password, auth, credential + */ +export function redactSensitiveParams(url: URL): string { + const sensitivePatterns = /secret|token|key|password|auth|credential/i; + const params = new URLSearchParams(url.search); + const redactedParams = new URLSearchParams(); + + for (const [key, value] of params) { + if (sensitivePatterns.test(key) || sensitivePatterns.test(value)) { + redactedParams.set(key, '[REDACTED]'); + } else { + redactedParams.set(key, value); + } + } + + const search = redactedParams.toString(); + return search ? `?${search}` : ''; +} diff --git a/start-moltbot.sh b/start-moltbot.sh index c58330a63..286a4d67f 100644 --- a/start-moltbot.sh +++ b/start-moltbot.sh @@ -187,17 +187,31 @@ if (process.env.TELEGRAM_BOT_TOKEN) { config.channels.telegram = config.channels.telegram || {}; config.channels.telegram.botToken = process.env.TELEGRAM_BOT_TOKEN; config.channels.telegram.enabled = true; - config.channels.telegram.dm = config.channels.telegram.dm || {}; - config.channels.telegram.dm.policy = process.env.TELEGRAM_DM_POLICY || 'pairing'; + const telegramDmPolicy = process.env.TELEGRAM_DM_POLICY || 'pairing'; + config.channels.telegram.dmPolicy = telegramDmPolicy; + if (process.env.TELEGRAM_DM_ALLOW_FROM) { + // Explicit allowlist: "123,456,789" β†’ ['123', '456', '789'] + config.channels.telegram.allowFrom = process.env.TELEGRAM_DM_ALLOW_FROM.split(','); + } else if (telegramDmPolicy === 'open') { + // "open" policy requires allowFrom: ["*"] + config.channels.telegram.allowFrom = ['*']; + } } // Discord configuration +// Note: Discord uses nested dm.policy, not flat dmPolicy like Telegram +// See: https://github.com/moltbot/moltbot/blob/v2026.1.24-1/src/config/zod-schema.providers-core.ts#L147-L155 if (process.env.DISCORD_BOT_TOKEN) { config.channels.discord = config.channels.discord || {}; config.channels.discord.token = process.env.DISCORD_BOT_TOKEN; config.channels.discord.enabled = true; + const discordDmPolicy = process.env.DISCORD_DM_POLICY || 'pairing'; config.channels.discord.dm = config.channels.discord.dm || {}; - config.channels.discord.dm.policy = process.env.DISCORD_DM_POLICY || 'pairing'; + config.channels.discord.dm.policy = discordDmPolicy; + // "open" policy requires allowFrom: ["*"] + if (discordDmPolicy === 'open') { + config.channels.discord.dm.allowFrom = ['*']; + } } // Slack configuration @@ -212,7 +226,7 @@ if (process.env.SLACK_BOT_TOKEN && process.env.SLACK_APP_TOKEN) { // Usage: Set AI_GATEWAY_BASE_URL or ANTHROPIC_BASE_URL to your endpoint like: // https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/anthropic // https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/openai -const baseUrl = process.env.AI_GATEWAY_BASE_URL || process.env.ANTHROPIC_BASE_URL || ''; +const baseUrl = (process.env.AI_GATEWAY_BASE_URL || process.env.ANTHROPIC_BASE_URL || '').replace(/\/+$/, ''); const isOpenAI = baseUrl.endsWith('/openai'); if (isOpenAI) { diff --git a/test/e2e/.dev.vars.example b/test/e2e/.dev.vars.example new file mode 100644 index 000000000..0233663cb --- /dev/null +++ b/test/e2e/.dev.vars.example @@ -0,0 +1,120 @@ +# Cloud E2E Test Credentials +# Copy this file to .dev.vars and fill in your values +# DO NOT commit .dev.vars to git! + +# ============================================================================= +# CLOUDFLARE_API_TOKEN +# ============================================================================= +# Required: Cloudflare API token with specific permissions for e2e tests. +# +# How to create: +# 1. Go to https://dash.cloudflare.com/profile/api-tokens +# 2. Click "Create Token" +# 3. Click "Create Custom Token" (at the bottom) +# 4. Configure the token: +# +# Token name: moltworker-e2e-tests (or whatever you prefer) +# +# Permissions (add all of these): +# β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +# β”‚ Account β”‚ Workers Scripts β”‚ Edit β”‚ +# β”‚ Account β”‚ Workers R2 Storage β”‚ Edit β”‚ +# β”‚ Account β”‚ Cloudflare Containers β”‚ Edit β”‚ +# β”‚ Account β”‚ Access: Apps and Policies β”‚ Edit β”‚ +# β”‚ Account β”‚ Access: Service Tokens β”‚ Edit β”‚ +# β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +# +# Account Resources: +# - Include: Your account (or "All accounts" if you have multiple) +# +# Client IP Address Filtering: (optional, leave blank for no restrictions) +# +# TTL: (optional, set an expiry if desired) +# +# 5. Click "Continue to summary" +# 6. Click "Create Token" +# 7. Copy the token value (you won't see it again!) +# +CLOUDFLARE_API_TOKEN= + +# ============================================================================= +# CF_ACCOUNT_ID +# ============================================================================= +# Required: Your Cloudflare account ID +# +# How to find: +# 1. Go to https://dash.cloudflare.com/ +# 2. Click the "..." menu next to your account name in the sidebar +# 3. Click "Copy Account ID" +# +# Or: Dashboard β†’ any zone β†’ Overview β†’ scroll down to "API" section +# +CF_ACCOUNT_ID= + +# ============================================================================= +# WORKERS_SUBDOMAIN +# ============================================================================= +# Required: Your workers.dev subdomain +# +# This is the subdomain part of your workers.dev URL. +# For example, if your workers deploy to "my-worker.myaccount.workers.dev", +# then your WORKERS_SUBDOMAIN is "myaccount". +# +# How to find: +# 1. Go to https://dash.cloudflare.com/ β†’ Workers & Pages +# 2. Look at any deployed worker's URL, or +# 3. Go to Workers & Pages β†’ Overview β†’ your subdomain is shown at the top +# +WORKERS_SUBDOMAIN= + +# ============================================================================= +# CF_ACCESS_TEAM_DOMAIN +# ============================================================================= +# Required: Your Cloudflare Access team domain +# +# This is your Zero Trust organization's domain, typically in the format: +# "yourteam.cloudflareaccess.com" +# +# How to find: +# 1. Go to https://one.dash.cloudflare.com/ (Zero Trust dashboard) +# 2. Go to Settings β†’ Custom Pages +# 3. Your team domain is shown at the top (e.g., "yourteam.cloudflareaccess.com") +# +# Or: Look at any Access login page URL - it will be https://yourteam.cloudflareaccess.com/... +# +CF_ACCESS_TEAM_DOMAIN= + +# ============================================================================= +# R2_ACCESS_KEY_ID and R2_SECRET_ACCESS_KEY +# ============================================================================= +# Required: R2 API credentials for bucket mounting inside the container +# +# How to create: +# 1. Go to https://dash.cloudflare.com/ β†’ R2 β†’ Overview +# 2. Click "Manage R2 API Tokens" (top right) +# 3. Click "Create API Token" +# 4. Configure: +# - Token name: moltworker-e2e (or whatever you prefer) +# - Permissions: Object Read & Write +# - Specify bucket(s): You can leave as "Apply to all buckets" or +# limit to buckets starting with "moltbot-" for safety +# - TTL: (optional) +# 5. Click "Create API Token" +# 6. Copy both the "Access Key ID" and "Secret Access Key" +# (Secret is only shown once!) +# +R2_ACCESS_KEY_ID= +R2_SECRET_ACCESS_KEY= + +# ============================================================================= +# OPTIONAL SETTINGS +# ============================================================================= + +# Unique test run ID for isolation (default: "local") +# In CI, set this to the PR number or a unique identifier to allow parallel runs +# E2E_TEST_RUN_ID=local + +# AI provider credentials (at least one recommended for chat/conversation tests) +# AI_GATEWAY_API_KEY= +# AI_GATEWAY_BASE_URL= +# ANTHROPIC_API_KEY= diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 000000000..6c1c05aa3 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,185 @@ +# E2E tests for Moltworker + +End-to-end tests that deploy real Moltworker instances to Cloudflare infrastructure. + +## Why cloud-based e2e tests? + +These tests run against actual Cloudflare infrastructureβ€”the same environment users get when they deploy Moltworker themselves. This catches issues that local testing can't: + +- **R2 bucket mounting** only works in production (not with `wrangler dev`) +- **Container cold starts** and sandbox behavior +- **Cloudflare Access** authentication flows +- **Real network latency** and timeout handling + +## Architecture + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Test runner β”‚ +β”‚ β”‚ +β”‚ cctr test/e2e/ β”‚ +β”‚ β”œβ”€β”€ _setup.txt (start server, browser, video) β”‚ +β”‚ β”œβ”€β”€ pairing_and_conversation.txt β”‚ +β”‚ └── _teardown.txt (stop everything, clean up cloud resources) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Cloud infrastructure β”‚ +β”‚ β”‚ +β”‚ Terraform (main.tf) Wrangler deploy Access API β”‚ +β”‚ β”œβ”€β”€ Service token β†’ β”œβ”€β”€ Worker β†’ β”œβ”€β”€ App β”‚ +β”‚ └── R2 bucket β”œβ”€β”€ Container └── Policies β”‚ +β”‚ └── Secrets β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Deployed worker β”‚ +β”‚ β”‚ +β”‚ https://moltbot-sandbox-e2e-{id}.{subdomain}.workers.dev β”‚ +β”‚ β”‚ +β”‚ Protected by Cloudflare Access: β”‚ +β”‚ - Service token (for automated tests) β”‚ +β”‚ - @cloudflare.com emails (for manual debugging) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +### Test flow + +1. **Terraform** creates isolated resources: service token + R2 bucket +2. **Wrangler** deploys worker with unique name (timestamp + random suffix) +3. **Access API** creates Access application (must be after worker existsβ€”workers.dev domains require the worker to exist first) +4. **Playwright** opens browser with Access headers, navigates to worker +5. **Tests run** with video recording capturing the full UI flow +6. **Teardown** deletes everything: Access app β†’ worker β†’ R2 bucket β†’ service token + +### Key design decisions + +- **Unique IDs per test run**: `$(date +%s)-$(openssl rand -hex 4)` ensures parallel test runs don't conflict +- **Access created post-deploy**: Terraform can't create Access apps for non-existent domains +- **Container names**: Derived from worker name as `{worker-name}-sandbox` + +## Test framework: cctr + playwright-cli + +Tests use two complementary tools: + +### [cctr](https://github.com/andreasjansson/cctr) - CLI Corpus Test Runner + +cctr runs test where each test case is a command line script, e.g. + +``` +=== +navigate to admin page to approve device +%require +=== +TOKEN=$(cat "$CCTR_FIXTURE_DIR/gateway-token.txt") +WORKER_URL=$(cat "$CCTR_FIXTURE_DIR/worker-url.txt") +./pw --session=moltworker-e2e open "$WORKER_URL/_admin/?token=$TOKEN" +--- +``` + +Key features: +- **Plain text format**: Easy to read and write +- **`%require` directive**: If this test fails, skip all subsequent tests +- **Variables**: Capture dynamic output with `{{ name }}` +- **Fixtures**: `fixture/` directory copied to temp dir for each suite +- **Setup/teardown**: `_setup.txt` and `_teardown.txt` run before/after tests + +### [playwright-cli](https://github.com/microsoft/playwright-cli) - Browser automation CLI + +playwright-cli provides shell-friendly browser automation. Instead of writing JavaScript test files, you control the browser with CLI commands: + +```bash +# Open a page +playwright-cli --session=test open "https://example.com" + +# Run arbitrary Playwright code +playwright-cli --session=test run-code "async page => { + await page.waitForSelector('text=Hello'); +}" + +# Take screenshots, record video +playwright-cli --session=test video-start +playwright-cli --session=test screenshot +``` + +The `./pw` wrapper in our fixture works around a playwright-cli bug where errors don't set a non-zero exit code. It detects `### Error` in the output and exits with code 1, making errors fail the test properly. + +## Example test + +Here's a complete test that approves a device and sends a chat message: + +``` +=== +wait for Approve All button and click it +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + const btn = await page.waitForSelector('button:has-text(\"Approve All\")', { timeout: 120000 }); + await btn.click(); +}" +--- + +=== +wait for approval to complete +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('text=No pending pairing requests', { timeout: 120000 }); +}" +--- + +=== +type math question into chat +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + const textarea = await page.waitForSelector('textarea'); + await textarea.fill('What is 847293 + 651824? Reply with just the number.'); +}" +--- + +=== +wait for response containing the correct answer +=== +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('text=1499117', { timeout: 120000 }); +}" +--- +``` + +## Running the e2e test suite locally + +### Prerequisites + +1. Copy `.dev.vars.example` to `.dev.vars` and fill in credentials (see file for detailed instructions) +2. Install dependencies: `npm install` +3. Install cctr: `brew install andreasjansson/tap/cctr` or `cargo install cctr` +4. Install playwright-cli: `npm install -g playwright-cli` + +### Run tests + +```bash +# Run all e2e tests +cctr test/e2e/ + +# Run with verbose output +cctr test/e2e/ -v + +# Run specific test file +cctr test/e2e/ -p pairing + +# Watch test output in real-time (for debugging) +cctr test/e2e/ -vv +``` + +### Run headed (see the browser) + +```bash +PLAYWRIGHT_HEADED=1 cctr test/e2e/ +``` + +### View test videos + +Videos are saved to `/tmp/moltworker-e2e-videos/` after each run. diff --git a/test/e2e/_setup.txt b/test/e2e/_setup.txt new file mode 100644 index 000000000..63d907aa2 --- /dev/null +++ b/test/e2e/_setup.txt @@ -0,0 +1,44 @@ +=== +start moltworker server +=== +./start-server -v +--- +{{ s }} +--- +where +* strip(s) endswith "ready" + +=== +start playwright browser +=== +./start-browser +--- +ready + +=== +start video recording +=== +./pw --session=moltworker-e2e video-start +--- +{{ output }} +--- +where +* output contains "Video recording started" + +=== +navigate to main page and wait for worker to be ready +%require +=== +TOKEN=$(cat "$CCTR_FIXTURE_DIR/gateway-token.txt") +WORKER_URL=$(cat "$CCTR_FIXTURE_DIR/worker-url.txt") +./pw --session=moltworker-e2e open "$WORKER_URL/?token=$TOKEN" +# Wait for pairing required message (worker shows loading screen first, then UI loads) +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('text=Pairing required', { timeout: 300000 }); +}" +echo "Worker is ready" +--- +{{ output }} +--- +where +* output contains "Worker is ready" diff --git a/test/e2e/_teardown.txt b/test/e2e/_teardown.txt new file mode 100644 index 000000000..1b7888e6e --- /dev/null +++ b/test/e2e/_teardown.txt @@ -0,0 +1,50 @@ +=== +stop video recording +=== +./pw --session=moltworker-e2e video-stop || true +--- +{{ output }} +--- +where +* output contains "Video" or output contains "Error" or output contains "No" + +=== +save video recording +=== +mkdir -p /tmp/moltworker-e2e-videos +datetime=$(date +%Y%m%d-%H%M%S) +for f in ./.playwright-cli/*.webm; do + if [ -f "$f" ]; then + cp "$f" "/tmp/moltworker-e2e-videos/${datetime}.webm" + echo "video saved to /tmp/moltworker-e2e-videos/${datetime}.webm" + fi +done +# Always succeed even if no video +echo "video cleanup complete" +--- +{{ output }} +--- +where +* output contains "video" + +=== +stop playwright browser +=== +./stop-browser || true +echo "browser stopped" +--- +{{ output }} +--- +where +* output contains "stopped" + +=== +stop moltworker server and destroy cloud resources +=== +# This deletes the worker AND destroys terraform resources (Access app, service token, R2 bucket) +./stop-server +--- +{{ s }} +--- +where +* strip(s) endswith "stopped" diff --git a/test/e2e/fixture/curl-auth b/test/e2e/fixture/curl-auth new file mode 100755 index 000000000..71767bab3 --- /dev/null +++ b/test/e2e/fixture/curl-auth @@ -0,0 +1,26 @@ +#!/bin/bash +# Wrapper for curl that adds Cloudflare Access service token headers +# +# Usage: ./curl-auth [curl-args...] +# +# Automatically adds CF-Access-Client-Id and CF-Access-Client-Secret headers +# using values from $CCTR_FIXTURE_DIR + +set -e + +if [ -z "$CCTR_FIXTURE_DIR" ]; then + CCTR_FIXTURE_DIR="/tmp/e2e-cloud-manual" +fi + +CF_ACCESS_CLIENT_ID=$(cat "$CCTR_FIXTURE_DIR/cf-access-client-id.txt" 2>/dev/null || echo "") +CF_ACCESS_CLIENT_SECRET=$(cat "$CCTR_FIXTURE_DIR/cf-access-client-secret.txt" 2>/dev/null || echo "") + +if [ -z "$CF_ACCESS_CLIENT_ID" ] || [ -z "$CF_ACCESS_CLIENT_SECRET" ]; then + echo "Error: Access credentials not found in $CCTR_FIXTURE_DIR" >&2 + exit 1 +fi + +exec curl \ + -H "CF-Access-Client-Id: $CF_ACCESS_CLIENT_ID" \ + -H "CF-Access-Client-Secret: $CF_ACCESS_CLIENT_SECRET" \ + "$@" diff --git a/test/e2e/fixture/pw b/test/e2e/fixture/pw new file mode 100755 index 000000000..f4472c9f8 --- /dev/null +++ b/test/e2e/fixture/pw @@ -0,0 +1,28 @@ +#!/bin/bash +# Wrapper for playwright-cli that returns non-zero exit code on errors. +# +# playwright-cli has a bug where it ignores the isError flag returned from +# the daemon. In program.js line ~279, it only does: +# +# console.log(result.text); +# session.close(); +# +# But it should also do: +# +# if (result.isError) process.exit(1); +# +# Until this is fixed upstream, we detect errors by checking for "### Error" +# in the output (which is the format used by browserServerBackend.js). +# +# See: https://github.com/microsoft/playwright/blob/main/packages/playwright/src/mcp/terminal/program.ts + +output=$(playwright-cli "$@" 2>&1) +exit_code=$? + +echo "$output" + +if echo "$output" | grep -q "^### Error"; then + exit 1 +fi + +exit $exit_code diff --git a/test/e2e/fixture/server/create-access-app b/test/e2e/fixture/server/create-access-app new file mode 100755 index 000000000..34fed4c5d --- /dev/null +++ b/test/e2e/fixture/server/create-access-app @@ -0,0 +1,107 @@ +#!/bin/bash +# Create Access application to protect the deployed worker +# Must be called AFTER the worker is deployed +set -e + +WORKER_NAME="$1" +SERVICE_TOKEN_ID="$2" + +if [ -z "$WORKER_NAME" ] || [ -z "$SERVICE_TOKEN_ID" ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +: "${CLOUDFLARE_API_TOKEN:?CLOUDFLARE_API_TOKEN is required}" +: "${CLOUDFLARE_ACCOUNT_ID:?CLOUDFLARE_ACCOUNT_ID is required}" +: "${WORKERS_SUBDOMAIN:?WORKERS_SUBDOMAIN is required}" + +WORKER_DOMAIN="${WORKER_NAME}.${WORKERS_SUBDOMAIN}.workers.dev" +APP_NAME="e2e-${WORKER_NAME}" + +echo "Creating Access application for $WORKER_DOMAIN..." >&2 + +# Create the Access application +APP_RESPONSE=$(curl -s -X POST \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/apps" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" \ + --data '{ + "name": "'"$APP_NAME"'", + "domain": "'"$WORKER_DOMAIN"'", + "type": "self_hosted", + "session_duration": "24h", + "auto_redirect_to_identity": false, + "app_launcher_visible": false + }') + +APP_ID=$(echo "$APP_RESPONSE" | jq -r '.result.id // empty') +APP_AUD=$(echo "$APP_RESPONSE" | jq -r '.result.aud // empty') + +if [ -z "$APP_ID" ]; then + echo "Failed to create Access application:" >&2 + echo "$APP_RESPONSE" | jq >&2 + exit 1 +fi + +echo "Created Access application: $APP_ID" >&2 + +# Create Service Auth policy to allow our service token +echo "Creating Service Auth policy..." >&2 +POLICY_RESPONSE=$(curl -s -X POST \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/apps/$APP_ID/policies" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" \ + --data '{ + "name": "E2E Service Token", + "decision": "non_identity", + "precedence": 1, + "include": [ + { + "service_token": { + "token_id": "'"$SERVICE_TOKEN_ID"'" + } + } + ] + }') + +POLICY_SUCCESS=$(echo "$POLICY_RESPONSE" | jq -r '.success') +if [ "$POLICY_SUCCESS" != "true" ]; then + echo "Failed to create service token policy:" >&2 + echo "$POLICY_RESPONSE" | jq >&2 + # Clean up the app we just created + curl -s -X DELETE \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/apps/$APP_ID" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" >/dev/null + exit 1 +fi +echo "Created Service Auth policy" >&2 + +# Create Allow policy for Cloudflare employees +echo "Creating Cloudflare employees policy..." >&2 +POLICY_RESPONSE=$(curl -s -X POST \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/apps/$APP_ID/policies" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" \ + --data '{ + "name": "Cloudflare Employees", + "decision": "allow", + "precedence": 2, + "include": [ + { + "email_domain": { + "domain": "cloudflare.com" + } + } + ] + }') + +POLICY_SUCCESS=$(echo "$POLICY_RESPONSE" | jq -r '.success') +if [ "$POLICY_SUCCESS" != "true" ]; then + echo "Warning: Failed to create Cloudflare employees policy (non-fatal):" >&2 + echo "$POLICY_RESPONSE" | jq >&2 +fi +echo "Created Cloudflare employees policy" >&2 + +# Output the app ID and AUD for use by other scripts +echo "$APP_ID" +echo "$APP_AUD" diff --git a/test/e2e/fixture/server/delete-worker b/test/e2e/fixture/server/delete-worker new file mode 100755 index 000000000..9b08123a4 --- /dev/null +++ b/test/e2e/fixture/server/delete-worker @@ -0,0 +1,19 @@ +#!/bin/bash +# Delete the deployed e2e worker +set -e + +WORKER_NAME="$1" +if [ -z "$WORKER_NAME" ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +: "${CLOUDFLARE_API_TOKEN:?CLOUDFLARE_API_TOKEN is required}" + +echo "Deleting worker: $WORKER_NAME" >&2 + +# Delete the worker using wrangler +# Use --force to skip confirmation prompt +npx wrangler delete --name "$WORKER_NAME" --force 2>&1 || true + +echo "Worker deleted: $WORKER_NAME" >&2 diff --git a/test/e2e/fixture/server/deploy b/test/e2e/fixture/server/deploy new file mode 100755 index 000000000..f139dddde --- /dev/null +++ b/test/e2e/fixture/server/deploy @@ -0,0 +1,83 @@ +#!/bin/bash +# Deploy the worker to Cloudflare with e2e configuration +set -e + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Find project directory - use CCTR_TEST_PATH if available (cctr copies fixture to temp dir) +if [ -n "$CCTR_TEST_PATH" ]; then + PROJECT_DIR="$(cd "$CCTR_TEST_PATH/../.." && pwd)" +else + FIXTURE_DIR="$(dirname "$SCRIPT_DIR")" + PROJECT_DIR="$(cd "$FIXTURE_DIR/../.." && pwd)" +fi + +# Required environment variables +: "${CLOUDFLARE_API_TOKEN:?CLOUDFLARE_API_TOKEN is required}" +: "${CF_ACCOUNT_ID:?CF_ACCOUNT_ID is required}" +: "${R2_ACCESS_KEY_ID:?R2_ACCESS_KEY_ID is required}" +: "${R2_SECRET_ACCESS_KEY:?R2_SECRET_ACCESS_KEY is required}" +: "${MOLTBOT_GATEWAY_TOKEN:?MOLTBOT_GATEWAY_TOKEN is required}" + +# Read terraform outputs +TERRAFORM_OUTPUT="$1" +if [ -z "$TERRAFORM_OUTPUT" ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +WORKER_NAME=$(echo "$TERRAFORM_OUTPUT" | jq -r '.worker_name.value') +R2_BUCKET=$(echo "$TERRAFORM_OUTPUT" | jq -r '.r2_bucket_name.value') + +# Get CF_ACCESS_TEAM_DOMAIN from environment +: "${CF_ACCESS_TEAM_DOMAIN:?CF_ACCESS_TEAM_DOMAIN is required}" + +cd "$PROJECT_DIR" + +# Build first +echo "Building project..." >&2 +npm run build >&2 + +# Export account ID for all wrangler commands (and unset deprecated name) +export CLOUDFLARE_ACCOUNT_ID="$CF_ACCOUNT_ID" + +# Generate a temporary wrangler config with unique worker name +# This ensures the container name is also unique (container name = worker-name + class-name) +E2E_CONFIG="$PROJECT_DIR/.wrangler-e2e-$WORKER_NAME.jsonc" +echo "Generating e2e config: $E2E_CONFIG" >&2 + +# Copy config and replace the name field (sed handles JSONC comments fine) +sed 's/"name": "moltbot-sandbox"/"name": "'"$WORKER_NAME"'"/' "$PROJECT_DIR/wrangler.jsonc" > "$E2E_CONFIG" + +# Deploy using the e2e-specific config +echo "Deploying worker: $WORKER_NAME to account $CLOUDFLARE_ACCOUNT_ID" >&2 +npx wrangler deploy \ + --config "$E2E_CONFIG" \ + --var "DEBUG_ROUTES:true" \ + --var "E2E_TEST_MODE:true" \ + >&2 + +# Clean up temp config +rm -f "$E2E_CONFIG" + +# Set secrets for the deployed worker +echo "Setting worker secrets..." >&2 +echo "$MOLTBOT_GATEWAY_TOKEN" | npx wrangler secret put MOLTBOT_GATEWAY_TOKEN --name "$WORKER_NAME" >&2 +echo "$CF_ACCESS_TEAM_DOMAIN" | npx wrangler secret put CF_ACCESS_TEAM_DOMAIN --name "$WORKER_NAME" >&2 +echo "$R2_ACCESS_KEY_ID" | npx wrangler secret put R2_ACCESS_KEY_ID --name "$WORKER_NAME" >&2 +echo "$R2_SECRET_ACCESS_KEY" | npx wrangler secret put R2_SECRET_ACCESS_KEY --name "$WORKER_NAME" >&2 +echo "$R2_BUCKET" | npx wrangler secret put R2_BUCKET_NAME --name "$WORKER_NAME" >&2 +echo "$CLOUDFLARE_ACCOUNT_ID" | npx wrangler secret put CF_ACCOUNT_ID --name "$WORKER_NAME" >&2 + +# Set AI provider keys if available +if [ -n "$AI_GATEWAY_API_KEY" ]; then + echo "$AI_GATEWAY_API_KEY" | npx wrangler secret put AI_GATEWAY_API_KEY --name "$WORKER_NAME" >&2 +fi +if [ -n "$AI_GATEWAY_BASE_URL" ]; then + echo "$AI_GATEWAY_BASE_URL" | npx wrangler secret put AI_GATEWAY_BASE_URL --name "$WORKER_NAME" >&2 +fi +if [ -n "$ANTHROPIC_API_KEY" ]; then + echo "$ANTHROPIC_API_KEY" | npx wrangler secret put ANTHROPIC_API_KEY --name "$WORKER_NAME" >&2 +fi + +echo "Worker deployed: $WORKER_NAME" >&2 diff --git a/test/e2e/fixture/server/main.tf b/test/e2e/fixture/server/main.tf new file mode 100755 index 000000000..b3a2aeb54 --- /dev/null +++ b/test/e2e/fixture/server/main.tf @@ -0,0 +1,32 @@ +terraform { + required_providers { + cloudflare = { + source = "cloudflare/cloudflare" + version = "~> 5.0" + } + } +} + +provider "cloudflare" { + api_token = var.cloudflare_api_token +} + +# Service Token for automated testing (available for future use) +resource "cloudflare_zero_trust_access_service_token" "e2e" { + account_id = var.cloudflare_account_id + name = "moltbot-e2e-${var.test_run_id}" + duration = "8760h" +} + +# R2 bucket for E2E tests (isolated from production) +resource "cloudflare_r2_bucket" "e2e" { + account_id = var.cloudflare_account_id + name = "moltbot-e2e-${var.test_run_id}" + location = "WNAM" +} + +# NOTE: Access application is NOT created here because workers.dev domains +# require the worker to exist first. Instead: +# - E2E_TEST_MODE=true in the worker skips Access validation +# - Authentication is done via MOLTBOT_GATEWAY_TOKEN +# - Service token is created above for potential future use diff --git a/test/e2e/fixture/server/outputs.tf b/test/e2e/fixture/server/outputs.tf new file mode 100755 index 000000000..d834cb1b4 --- /dev/null +++ b/test/e2e/fixture/server/outputs.tf @@ -0,0 +1,30 @@ +output "worker_url" { + description = "URL of the deployed e2e worker" + value = "https://moltbot-sandbox-e2e-${var.test_run_id}.${var.workers_subdomain}.workers.dev" +} + +output "worker_name" { + description = "Name of the deployed worker" + value = "moltbot-sandbox-e2e-${var.test_run_id}" +} + +output "service_token_id" { + description = "Service token ID (for creating Access policies)" + value = cloudflare_zero_trust_access_service_token.e2e.id +} + +output "service_token_client_id" { + description = "Service token Client ID for authentication" + value = cloudflare_zero_trust_access_service_token.e2e.client_id +} + +output "service_token_client_secret" { + description = "Service token Client Secret for authentication" + value = cloudflare_zero_trust_access_service_token.e2e.client_secret + sensitive = true +} + +output "r2_bucket_name" { + description = "Name of the R2 bucket for this e2e test run" + value = cloudflare_r2_bucket.e2e.name +} diff --git a/test/e2e/fixture/server/start b/test/e2e/fixture/server/start new file mode 100755 index 000000000..1b0e59628 --- /dev/null +++ b/test/e2e/fixture/server/start @@ -0,0 +1,122 @@ +#!/bin/bash +# Start cloud e2e infrastructure and deploy worker +# +# This script: +# 1. Creates Access application, service token, and R2 bucket via terraform +# 2. Deploys the worker with appropriate secrets +# 3. Waits for the worker to be ready +# 4. Outputs connection info for tests +set -e + +VERBOSE=false +if [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then + VERBOSE=true +fi + +log() { + if [ "$VERBOSE" = true ]; then + echo "[cloud-e2e] $*" >&2 + fi +} + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +FIXTURE_DIR="$(dirname "$SCRIPT_DIR")" + +# Support running directly (not via cctr) for manual debugging +if [ -z "$CCTR_TEST_PATH" ]; then + # Running directly - E2E_DIR is parent of fixture dir + E2E_DIR="$(dirname "$FIXTURE_DIR")" + log "CCTR_TEST_PATH not set, using E2E_DIR: $E2E_DIR" +else + # Running via cctr - CCTR_TEST_PATH points to original test dir + E2E_DIR="$CCTR_TEST_PATH" +fi + +if [ -z "$CCTR_FIXTURE_DIR" ]; then + CCTR_FIXTURE_DIR="/tmp/e2e-cloud-manual" + mkdir -p "$CCTR_FIXTURE_DIR" + log "CCTR_FIXTURE_DIR not set, using: $CCTR_FIXTURE_DIR" +fi + +# Source .dev.vars if it exists (for local development) +if [ -f "$E2E_DIR/.dev.vars" ]; then + log "Loading credentials from $E2E_DIR/.dev.vars" + set -a + source "$E2E_DIR/.dev.vars" + set +a +fi + +# Check required environment variables +: "${CLOUDFLARE_API_TOKEN:?CLOUDFLARE_API_TOKEN is required}" +: "${CF_ACCOUNT_ID:?CF_ACCOUNT_ID is required}" +: "${WORKERS_SUBDOMAIN:?WORKERS_SUBDOMAIN is required}" +: "${CF_ACCESS_TEAM_DOMAIN:?CF_ACCESS_TEAM_DOMAIN is required}" +: "${R2_ACCESS_KEY_ID:?R2_ACCESS_KEY_ID is required}" +: "${R2_SECRET_ACCESS_KEY:?R2_SECRET_ACCESS_KEY is required}" + +# Use timestamp + random suffix for truly unique IDs (avoids conflicts from stale resources) +export E2E_TEST_RUN_ID="${E2E_TEST_RUN_ID:-$(date +%s)-$(openssl rand -hex 4)}" + +# Generate a gateway token for this test run +GATEWAY_TOKEN="${MOLTBOT_GATEWAY_TOKEN:-e2e-cloud-$(openssl rand -hex 16)}" +export MOLTBOT_GATEWAY_TOKEN="$GATEWAY_TOKEN" + +log "Starting cloud e2e infrastructure..." +log "Test run ID: $E2E_TEST_RUN_ID" + +# Clean up any stale terraform state from previous runs +rm -rf "$SCRIPT_DIR/.terraform" "$SCRIPT_DIR/terraform.tfstate"* "$SCRIPT_DIR/.terraform.lock.hcl" + +# Step 1: Apply terraform to create Access app, service token, R2 bucket +log "Step 1: Creating cloud infrastructure with terraform..." +cd "$SCRIPT_DIR" +TERRAFORM_OUTPUT=$("$SCRIPT_DIR/terraform-apply") +log "Terraform output: $TERRAFORM_OUTPUT" + +# Parse terraform outputs +WORKER_URL=$(echo "$TERRAFORM_OUTPUT" | jq -r '.worker_url.value') +WORKER_NAME=$(echo "$TERRAFORM_OUTPUT" | jq -r '.worker_name.value') +ACCESS_AUD=$(echo "$TERRAFORM_OUTPUT" | jq -r '.access_application_aud.value') +SERVICE_TOKEN_CLIENT_ID=$(echo "$TERRAFORM_OUTPUT" | jq -r '.service_token_client_id.value') +SERVICE_TOKEN_CLIENT_SECRET=$(echo "$TERRAFORM_OUTPUT" | jq -r '.service_token_client_secret.value') +R2_BUCKET=$(echo "$TERRAFORM_OUTPUT" | jq -r '.r2_bucket_name.value') + +log "Worker URL: $WORKER_URL" +log "Worker name: $WORKER_NAME" +log "Access AUD: $ACCESS_AUD" +log "Service token client ID: $SERVICE_TOKEN_CLIENT_ID" +log "R2 bucket: $R2_BUCKET" + +# Save outputs for other scripts +echo "$TERRAFORM_OUTPUT" > "$CCTR_FIXTURE_DIR/terraform-output.json" +echo "$WORKER_URL" > "$CCTR_FIXTURE_DIR/worker-url.txt" +echo "$WORKER_NAME" > "$CCTR_FIXTURE_DIR/worker-name.txt" +echo "$GATEWAY_TOKEN" > "$CCTR_FIXTURE_DIR/gateway-token.txt" +echo "$SERVICE_TOKEN_CLIENT_ID" > "$CCTR_FIXTURE_DIR/cf-access-client-id.txt" +echo "$SERVICE_TOKEN_CLIENT_SECRET" > "$CCTR_FIXTURE_DIR/cf-access-client-secret.txt" +echo "$E2E_TEST_RUN_ID" > "$CCTR_FIXTURE_DIR/test-run-id.txt" +echo "$R2_BUCKET" > "$CCTR_FIXTURE_DIR/r2-bucket-name.txt" +echo "${WORKER_NAME}-sandbox" > "$CCTR_FIXTURE_DIR/container-name.txt" + +# Step 2: Deploy the worker +log "Step 2: Deploying worker..." +"$SCRIPT_DIR/deploy" "$TERRAFORM_OUTPUT" + +# Step 3: Create Access application (must be after worker exists) +log "Step 3: Creating Access application..." +SERVICE_TOKEN_ID=$(echo "$TERRAFORM_OUTPUT" | jq -r '.service_token_id.value') +export CLOUDFLARE_ACCOUNT_ID="$CF_ACCOUNT_ID" +ACCESS_OUTPUT=$("$SCRIPT_DIR/create-access-app" "$WORKER_NAME" "$SERVICE_TOKEN_ID") +ACCESS_APP_ID=$(echo "$ACCESS_OUTPUT" | head -1) +ACCESS_AUD=$(echo "$ACCESS_OUTPUT" | tail -1) +echo "$ACCESS_APP_ID" > "$CCTR_FIXTURE_DIR/access-app-id.txt" +echo "$ACCESS_AUD" > "$CCTR_FIXTURE_DIR/access-aud.txt" +log "Access app ID: $ACCESS_APP_ID" +log "Access AUD: $ACCESS_AUD" + +log "Cloud e2e infrastructure deployed!" +log "Worker URL: $WORKER_URL" +log "Gateway token: $GATEWAY_TOKEN" +log "Note: Worker may still be starting - browser will wait for it" +sleep 1 # Let stderr flush before stdout +echo "ready" diff --git a/test/e2e/fixture/server/stop b/test/e2e/fixture/server/stop new file mode 100755 index 000000000..73cc2fe49 --- /dev/null +++ b/test/e2e/fixture/server/stop @@ -0,0 +1,125 @@ +#!/bin/bash +# Stop and clean up ALL cloud e2e infrastructure +# +# This script: +# 1. Deletes the deployed worker +# 2. Deletes the R2 bucket (may fail if not empty - requires manual cleanup) +# 3. Deletes the service token +# 4. Cleans up local state files +set -e + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Find E2E_DIR for .dev.vars +if [ -n "$CCTR_TEST_PATH" ]; then + E2E_DIR="$CCTR_TEST_PATH" +else + FIXTURE_DIR="$(dirname "$SCRIPT_DIR")" + E2E_DIR="$(dirname "$FIXTURE_DIR")" +fi + +# Source .dev.vars if it exists +if [ -f "$E2E_DIR/.dev.vars" ]; then + set -a + source "$E2E_DIR/.dev.vars" + set +a +fi + +# Export CLOUDFLARE_ACCOUNT_ID (wrangler prefers this over CF_ACCOUNT_ID) +export CLOUDFLARE_ACCOUNT_ID="${CF_ACCOUNT_ID:-}" + +# Support running directly (not via cctr) +if [ -z "$CCTR_FIXTURE_DIR" ]; then + CCTR_FIXTURE_DIR="/tmp/e2e-cloud-manual" +fi + +echo "Stopping cloud e2e infrastructure..." >&2 + +# Read saved values from fixture dir +WORKER_NAME=$(cat "$CCTR_FIXTURE_DIR/worker-name.txt" 2>/dev/null || echo "") +R2_BUCKET=$(cat "$CCTR_FIXTURE_DIR/r2-bucket-name.txt" 2>/dev/null || echo "") +TEST_RUN_ID=$(cat "$CCTR_FIXTURE_DIR/test-run-id.txt" 2>/dev/null || echo "") +ACCESS_APP_ID=$(cat "$CCTR_FIXTURE_DIR/access-app-id.txt" 2>/dev/null || echo "") + +# Step 0: Delete the Access application first (so it stops protecting the worker) +if [ -n "$ACCESS_APP_ID" ] && [ -n "$CLOUDFLARE_API_TOKEN" ] && [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then + echo "Deleting Access application: $ACCESS_APP_ID" >&2 + curl -s -X DELETE \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/apps/$ACCESS_APP_ID" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" >/dev/null 2>&1 || true + echo "Access application deleted" >&2 +fi + +# Step 1: Delete the deployed worker +if [ -n "$WORKER_NAME" ]; then + echo "Deleting worker: $WORKER_NAME" >&2 + "$SCRIPT_DIR/delete-worker" "$WORKER_NAME" 2>&1 || true +fi + +# Step 1b: Delete the container application +CONTAINER_NAME=$(cat "$CCTR_FIXTURE_DIR/container-name.txt" 2>/dev/null || echo "${WORKER_NAME}-sandbox") +if [ -n "$WORKER_NAME" ] && [ -n "$CLOUDFLARE_API_TOKEN" ] && [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then + echo "Deleting container: $CONTAINER_NAME" >&2 + # Find the container ID + CONTAINER_ID=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/containers/applications" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" | \ + jq -r ".result[] | select(.name == \"$CONTAINER_NAME\") | .id" 2>/dev/null || echo "") + + if [ -n "$CONTAINER_ID" ]; then + curl -s -X DELETE \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/containers/applications/$CONTAINER_ID" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" >/dev/null 2>&1 || true + echo "Container deleted" >&2 + fi +fi + +# Step 2: Delete R2 bucket +# Note: wrangler doesn't have a command to list/delete objects, so if the bucket +# has objects it will fail. Use the dashboard or aws cli for manual cleanup. +if [ -n "$R2_BUCKET" ]; then + echo "Deleting R2 bucket: $R2_BUCKET" >&2 + if ! npx wrangler r2 bucket delete "$R2_BUCKET" 2>&1; then + echo "Warning: Failed to delete R2 bucket (may not be empty). Manual cleanup required." >&2 + fi +fi + +# Step 3: Delete service token via API +if [ -n "$TEST_RUN_ID" ] && [ -n "$CLOUDFLARE_API_TOKEN" ] && [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then + echo "Deleting service token: moltbot-e2e-$TEST_RUN_ID" >&2 + # Find and delete the service token + TOKEN_ID=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/service_tokens" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" | \ + jq -r ".result[] | select(.name == \"moltbot-e2e-$TEST_RUN_ID\") | .id" 2>/dev/null || echo "") + + if [ -n "$TOKEN_ID" ]; then + curl -s -X DELETE \ + "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/access/service_tokens/$TOKEN_ID" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" >/dev/null 2>&1 || true + echo "Service token deleted" >&2 + fi +fi + +# Step 4: Clean up local files +echo "Cleaning up local files..." >&2 +rm -f "$CCTR_FIXTURE_DIR/terraform-output.json" +rm -f "$CCTR_FIXTURE_DIR/worker-url.txt" +rm -f "$CCTR_FIXTURE_DIR/worker-name.txt" +rm -f "$CCTR_FIXTURE_DIR/gateway-token.txt" +rm -f "$CCTR_FIXTURE_DIR/cf-access-client-id.txt" +rm -f "$CCTR_FIXTURE_DIR/cf-access-client-secret.txt" +rm -f "$CCTR_FIXTURE_DIR/test-run-id.txt" +rm -f "$CCTR_FIXTURE_DIR/r2-bucket-name.txt" +rm -f "$CCTR_FIXTURE_DIR/container-name.txt" +rm -f "$CCTR_FIXTURE_DIR/access-app-id.txt" +rm -f "$CCTR_FIXTURE_DIR/access-aud.txt" +rm -rf "$SCRIPT_DIR/.terraform" "$SCRIPT_DIR/terraform.tfstate"* "$SCRIPT_DIR/.terraform.lock.hcl" + +echo "Cloud e2e infrastructure stopped and cleaned up" >&2 +sleep 1 # Let stderr flush before stdout +echo "stopped" diff --git a/test/e2e/fixture/server/terraform-apply b/test/e2e/fixture/server/terraform-apply new file mode 100755 index 000000000..a77db2fb2 --- /dev/null +++ b/test/e2e/fixture/server/terraform-apply @@ -0,0 +1,43 @@ +#!/bin/bash +# Initialize and apply terraform configuration for cloud e2e infrastructure +set -e + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR" + +# Required environment variables +: "${CLOUDFLARE_API_TOKEN:?CLOUDFLARE_API_TOKEN is required}" +: "${CF_ACCOUNT_ID:?CF_ACCOUNT_ID is required}" +: "${WORKERS_SUBDOMAIN:?WORKERS_SUBDOMAIN is required}" + +# Validate we're targeting the correct account +echo "Validating Cloudflare account..." >&2 +ACCOUNT_NAME=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CF_ACCOUNT_ID" \ + -H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \ + -H "Content-Type: application/json" | jq -r '.result.name // empty') + +if [ -z "$ACCOUNT_NAME" ]; then + echo "ERROR: Could not fetch account info for CF_ACCOUNT_ID=$CF_ACCOUNT_ID" >&2 + echo "Check your CLOUDFLARE_API_TOKEN and CF_ACCOUNT_ID" >&2 + exit 1 +fi + +echo "Deploying to account: $ACCOUNT_NAME (subdomain: $WORKERS_SUBDOMAIN)" >&2 + +# Optional: unique test run ID (defaults to "local") +TEST_RUN_ID="${E2E_TEST_RUN_ID:-local}" + +echo "Initializing terraform..." >&2 +terraform init -input=false -upgrade >&2 + +echo "Applying terraform configuration..." >&2 +terraform apply -auto-approve -input=false \ + -var="cloudflare_api_token=$CLOUDFLARE_API_TOKEN" \ + -var="cloudflare_account_id=$CF_ACCOUNT_ID" \ + -var="workers_subdomain=$WORKERS_SUBDOMAIN" \ + -var="test_run_id=$TEST_RUN_ID" \ + >&2 + +# Output the values for use by other scripts +echo "Terraform outputs:" >&2 +terraform output -json diff --git a/test/e2e/fixture/server/terraform-destroy b/test/e2e/fixture/server/terraform-destroy new file mode 100755 index 000000000..cbfa70a3d --- /dev/null +++ b/test/e2e/fixture/server/terraform-destroy @@ -0,0 +1,51 @@ +#!/bin/bash +# Destroy all terraform-managed e2e infrastructure +set -e + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR" + +# Required environment variables +: "${CLOUDFLARE_API_TOKEN:?CLOUDFLARE_API_TOKEN is required}" +: "${CF_ACCOUNT_ID:?CF_ACCOUNT_ID is required}" +: "${WORKERS_SUBDOMAIN:?WORKERS_SUBDOMAIN is required}" + +# Optional: unique test run ID (defaults to "local") +TEST_RUN_ID="${E2E_TEST_RUN_ID:-local}" + +# Check if terraform state exists +if [ ! -f "terraform.tfstate" ]; then + echo "No terraform state found, nothing to destroy" >&2 + exit 0 +fi + +# Get the R2 bucket name from terraform state before destroying +R2_BUCKET=$(terraform output -raw r2_bucket_name 2>/dev/null || echo "") + +# Empty the R2 bucket first (required before deletion) +if [ -n "$R2_BUCKET" ]; then + echo "Emptying R2 bucket: $R2_BUCKET" >&2 + # List and delete all objects in the bucket using wrangler + # Note: wrangler r2 object delete requires object keys, so we list first + npx wrangler r2 object list "$R2_BUCKET" --json 2>/dev/null | \ + jq -r '.objects[].key' 2>/dev/null | \ + while read -r key; do + if [ -n "$key" ]; then + npx wrangler r2 object delete "$R2_BUCKET/$key" 2>/dev/null || true + fi + done + echo "R2 bucket emptied" >&2 +fi + +echo "Destroying terraform-managed infrastructure..." >&2 +terraform destroy -auto-approve -input=false \ + -var="cloudflare_api_token=$CLOUDFLARE_API_TOKEN" \ + -var="cloudflare_account_id=$CF_ACCOUNT_ID" \ + -var="workers_subdomain=$WORKERS_SUBDOMAIN" \ + -var="test_run_id=$TEST_RUN_ID" + +# Clean up local state files +rm -f terraform.tfstate terraform.tfstate.backup +rm -rf .terraform .terraform.lock.hcl + +echo "Terraform infrastructure destroyed" >&2 diff --git a/test/e2e/fixture/server/variables.tf b/test/e2e/fixture/server/variables.tf new file mode 100755 index 000000000..e1c0659fe --- /dev/null +++ b/test/e2e/fixture/server/variables.tf @@ -0,0 +1,21 @@ +variable "cloudflare_api_token" { + description = "Cloudflare API token with Access and R2 permissions" + type = string + sensitive = true +} + +variable "cloudflare_account_id" { + description = "Cloudflare account ID" + type = string +} + +variable "workers_subdomain" { + description = "Your workers.dev subdomain (e.g., 'myaccount' for myaccount.workers.dev)" + type = string +} + +variable "test_run_id" { + description = "Unique identifier for this test run (e.g., PR number or timestamp)" + type = string + default = "local" +} diff --git a/test/e2e/fixture/server/wait-ready b/test/e2e/fixture/server/wait-ready new file mode 100755 index 000000000..49f46e0b1 --- /dev/null +++ b/test/e2e/fixture/server/wait-ready @@ -0,0 +1,43 @@ +#!/bin/bash +# Wait for the deployed worker to be ready (container cold start can take 1-2 min) +set -e + +WORKER_URL="$1" +GATEWAY_TOKEN="$2" +CF_ACCESS_CLIENT_ID="$3" +CF_ACCESS_CLIENT_SECRET="$4" + +if [ -z "$WORKER_URL" ] || [ -z "$GATEWAY_TOKEN" ] || [ -z "$CF_ACCESS_CLIENT_ID" ] || [ -z "$CF_ACCESS_CLIENT_SECRET" ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +TIMEOUT_SECONDS=300 # 5 minutes for cloud cold start +START_TIME=$(date +%s) + +echo "Waiting for worker to be ready at $WORKER_URL..." >&2 + +while true; do + ELAPSED=$(($(date +%s) - START_TIME)) + if [ "$ELAPSED" -ge "$TIMEOUT_SECONDS" ]; then + echo "Timeout waiting for worker after ${ELAPSED}s" >&2 + exit 1 + fi + + # Make request with Access service token headers + status=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "CF-Access-Client-Id: $CF_ACCESS_CLIENT_ID" \ + -H "CF-Access-Client-Secret: $CF_ACCESS_CLIENT_SECRET" \ + "$WORKER_URL/?token=$GATEWAY_TOKEN" 2>/dev/null || echo "000") + + if [ "$status" = "200" ]; then + echo "Worker is ready! (HTTP $status after ${ELAPSED}s)" >&2 + echo "ready" + exit 0 + fi + + if [ $((ELAPSED % 15)) -eq 0 ]; then + echo "Still waiting... (${ELAPSED}s elapsed, last status: $status)" >&2 + fi + sleep 2 +done diff --git a/test/e2e/fixture/start-browser b/test/e2e/fixture/start-browser new file mode 100755 index 000000000..6338db6cb --- /dev/null +++ b/test/e2e/fixture/start-browser @@ -0,0 +1,43 @@ +#!/bin/bash +# Start playwright-cli browser session for E2E testing with Access headers + +set -e + +SESSION_NAME="moltworker-e2e" +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Support running directly (not via cctr) +if [ -z "$CCTR_FIXTURE_DIR" ]; then + CCTR_FIXTURE_DIR="/tmp/e2e-cloud-manual" +fi + +# Stop and delete any existing session +playwright-cli session-stop "$SESSION_NAME" >/dev/null 2>&1 || true +playwright-cli session-delete "$SESSION_NAME" >/dev/null 2>&1 || true + +# Build the args +GLOBAL_ARGS=("--session=$SESSION_NAME") + +if [ "${PLAYWRIGHT_HEADED:-}" = "1" ] || [ "${PLAYWRIGHT_HEADED:-}" = "true" ]; then + GLOBAL_ARGS+=("--headed") +fi + +# Open the browser to a blank page first +playwright-cli "${GLOBAL_ARGS[@]}" open "about:blank" >/dev/null 2>&1 & +sleep 2 + +# Read Access credentials +CF_ACCESS_CLIENT_ID=$(cat "$CCTR_FIXTURE_DIR/cf-access-client-id.txt" 2>/dev/null || echo "") +CF_ACCESS_CLIENT_SECRET=$(cat "$CCTR_FIXTURE_DIR/cf-access-client-secret.txt" 2>/dev/null || echo "") + +if [ -n "$CF_ACCESS_CLIENT_ID" ] && [ -n "$CF_ACCESS_CLIENT_SECRET" ]; then + # Set extra HTTP headers for Access authentication + playwright-cli "${GLOBAL_ARGS[@]}" run-code "async page => { + await page.context().setExtraHTTPHeaders({ + 'CF-Access-Client-Id': '$CF_ACCESS_CLIENT_ID', + 'CF-Access-Client-Secret': '$CF_ACCESS_CLIENT_SECRET' + }); + }" >/dev/null 2>&1 +fi + +echo "ready" diff --git a/test/e2e/fixture/start-server b/test/e2e/fixture/start-server new file mode 100755 index 000000000..b0a9f1a3c --- /dev/null +++ b/test/e2e/fixture/start-server @@ -0,0 +1,17 @@ +#!/bin/bash +# Start the moltworker for E2E testing (cloud deployment) +# +# Required environment variables: +# - CLOUDFLARE_API_TOKEN: API token with Workers, Access, R2 permissions +# - CF_ACCOUNT_ID: Cloudflare account ID +# - WORKERS_SUBDOMAIN: Your workers.dev subdomain +# - CF_ACCESS_TEAM_DOMAIN: Cloudflare Access team domain +# - R2_ACCESS_KEY_ID: R2 access key for bucket mounting +# - R2_SECRET_ACCESS_KEY: R2 secret key for bucket mounting +# +# Optional: +# - E2E_TEST_RUN_ID: Unique ID for this test run (default: "local") +# - AI_GATEWAY_API_KEY, AI_GATEWAY_BASE_URL: For AI provider +# - ANTHROPIC_API_KEY: Direct Anthropic access + +exec "$(dirname "$0")/server/start" "$@" diff --git a/test/e2e/fixture/stop-browser b/test/e2e/fixture/stop-browser new file mode 100755 index 000000000..e1e4a5ae0 --- /dev/null +++ b/test/e2e/fixture/stop-browser @@ -0,0 +1,8 @@ +#!/bin/bash +# Stop playwright-cli browser session + +SESSION_NAME="moltworker-e2e" + +playwright-cli session-stop "$SESSION_NAME" 2>/dev/null || true + +echo "stopped" diff --git a/test/e2e/fixture/stop-server b/test/e2e/fixture/stop-server new file mode 100755 index 000000000..23a9caff2 --- /dev/null +++ b/test/e2e/fixture/stop-server @@ -0,0 +1,9 @@ +#!/bin/bash +# Stop the moltworker and clean up ALL cloud resources +# +# This will: +# 1. Delete the deployed worker +# 2. Destroy terraform resources (Access app, service token, R2 bucket) +# 3. Clean up local state files + +exec "$(dirname "$0")/server/stop" "$@" diff --git a/test/e2e/pairing_and_conversation.txt b/test/e2e/pairing_and_conversation.txt new file mode 100644 index 000000000..7ae70dcb4 --- /dev/null +++ b/test/e2e/pairing_and_conversation.txt @@ -0,0 +1,82 @@ +=== +navigate to admin page to approve device +%require +=== +TOKEN=$(cat "$CCTR_FIXTURE_DIR/gateway-token.txt") +WORKER_URL=$(cat "$CCTR_FIXTURE_DIR/worker-url.txt") +./pw --session=moltworker-e2e open "$WORKER_URL/_admin/?token=$TOKEN" +--- + +=== +wait for pending devices section to load +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('text=Pending Pairing Requests', { timeout: 120000 }); +}" +--- + +=== +wait for Approve All button and click it +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + const btn = await page.waitForSelector('button:has-text(\"Approve All\")', { timeout: 120000 }); + await btn.click(); +}" +--- + +=== +wait for approval to complete +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('text=No pending pairing requests', { timeout: 120000 }); +}" +--- + +=== +navigate back to main chat page +%require +=== +TOKEN=$(cat "$CCTR_FIXTURE_DIR/gateway-token.txt") +WORKER_URL=$(cat "$CCTR_FIXTURE_DIR/worker-url.txt") +./pw --session=moltworker-e2e open "$WORKER_URL/?token=$TOKEN" +--- + +=== +wait for chat interface to load +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('textarea', { timeout: 120000 }); +}" +--- + +=== +type math question into chat +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + const textarea = await page.waitForSelector('textarea'); + await textarea.fill('What is 847293 + 651824? Reply with just the number.'); +}" +--- + +=== +click send button +%require +=== +./pw --session=moltworker-e2e run-code "async page => { + const btn = await page.waitForSelector('button:has-text(\"Send\")'); + await btn.click(); +}" +--- + +=== +wait for response containing the correct answer +=== +./pw --session=moltworker-e2e run-code "async page => { + await page.waitForSelector('text=1499117', { timeout: 120000 }); +}" +---