diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml index 7a4d1f111..6c77d5aed 100644 --- a/.github/actions/build/action.yml +++ b/.github/actions/build/action.yml @@ -16,10 +16,11 @@ runs: - name: Build kernel shell: bash run: | + # Running as root to avoid permission issues. + # Ownership will be fixed in the calling workflow. docker run --rm \ -v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \ -w "${{ inputs.workspace_path }}/kernel" \ - --user $(id -u):$(id -g) \ ${{ inputs.docker_image }} \ bash -c " make O=../kobj ARCH=arm64 defconfig && @@ -33,11 +34,9 @@ runs: docker run --rm \ -v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \ -w "${{ inputs.workspace_path }}/video-driver" \ - --user $(id -u):$(id -g) \ ${{ inputs.docker_image }} \ bash -c " make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \ -C ${{ inputs.workspace_path }}/kobj \ M=\$(pwd) VIDEO_KERNEL_ROOT=\$(pwd) modules - " - + " \ No newline at end of file diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml index fca9d86ca..e31a7e9ef 100644 --- a/.github/actions/lava_job_render/action.yml +++ b/.github/actions/lava_job_render/action.yml @@ -34,7 +34,6 @@ runs: const modulesTarUrl = findUrlByFilename('modules.tar.xz'); const imageUrl = findUrlByFilename('Image'); const mergedRamdiskUrl = findUrlByFilename('video-merged.cpio.gz'); - const vmlinuxUrl = findUrlByFilename('vmlinux'); // DTB is expected to be ".dtb" const dtbFilename = `${process.env.MACHINE}.dtb`; @@ -42,13 +41,11 @@ runs: core.setOutput('modules_url', modulesTarUrl || ''); core.setOutput('image_url', imageUrl || ''); - core.setOutput('vmlinux_url', vmlinuxUrl || ''); core.setOutput('dtb_url', dtbUrl || ''); core.setOutput('merged_ramdisk_url', mergedRamdiskUrl || ''); console.log(`Modules URL: ${modulesTarUrl}`); console.log(`Image URL: ${imageUrl}`); - console.log(`Vmlinux URL: ${vmlinuxUrl}`); console.log(`Dtb URL: ${dtbUrl}`); console.log(`Merged Ramdisk URL: ${mergedRamdiskUrl}`); @@ -68,19 +65,18 @@ runs: - name: Upload metadata.json id: upload_metadata - uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/actions/aws_s3_helper@workflow_2 with: local_file: ../job_render/data/metadata.json - s3_bucket: qli-prd-video-gh-artifacts + s3_bucket: qli-stg-video-gh-artifacts mode: single-upload - name: Create template json (cloudData.json) shell: bash run: | - echo "Populating cloudData.json with kernel, vmlinux, modules, metadata, ramdisk" + echo "Populating cloudData.json with kernel, modules, metadata, ramdisk" metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}" image_url="${{ steps.process_urls.outputs.image_url }}" - vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}" modules_url="${{ steps.process_urls.outputs.modules_url }}" merged_ramdisk_url="${{ steps.process_urls.outputs.merged_ramdisk_url }}" @@ -104,15 +100,6 @@ runs: ${{ inputs.docker_image }} \ jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # vmlinux (set only if present) - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e vmlinux_url="$vmlinux_url" \ - ${{ inputs.docker_image }} \ - sh -c 'if [ -n "$vmlinux_url" ]; then jq ".artifacts.vmlinux = env.vmlinux_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi' - # modules docker run -i --rm \ --user "$(id -u):$(id -g)" \ @@ -138,7 +125,7 @@ runs: # Fallback to stable kerneltest ramdisk only if merged ramdisk is not available if [ -z "${{ steps.process_urls.outputs.merged_ramdisk_url }}" ]; then echo "Merged ramdisk not found. Using stable kerneltest ramdisk fallback." - ramdisk_url="$(aws s3 presign s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" + ramdisk_url="$(aws s3 presign s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" docker run -i --rm \ --user "$(id -u):$(id -g)" \ --workdir="$PWD" \ @@ -152,7 +139,7 @@ runs: # Optional board-specific firmware initramfs if [ -n "${{ env.FIRMWARE }}" ]; then - firmware_url="$(aws s3 presign s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-firmware-${{ env.FIRMWARE }}-image-qcom-armv8a.cpio.gz --expires 7600)" + firmware_url="$(aws s3 presign s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/initramfs-firmware-${{ env.FIRMWARE }}-image-qcom-armv8a.cpio.gz --expires 7600)" docker run -i --rm \ --user "$(id -u):$(id -g)" \ --workdir="$PWD" \ diff --git a/.github/actions/loading/action.yml b/.github/actions/loading/action.yml index 2b90396dd..cd564d509 100644 --- a/.github/actions/loading/action.yml +++ b/.github/actions/loading/action.yml @@ -1,4 +1,3 @@ ---- name: Load Parameters description: Load parameters for the build job @@ -6,9 +5,8 @@ outputs: build_matrix: description: Build matrix value: ${{ steps.set-matrix.outputs.build_matrix }} - full_matrix: - description: full matrix containing lava devails + description: Full matrix containing lava details value: ${{ steps.set-matrix.outputs.full_matrix }} runs: @@ -21,24 +19,49 @@ runs: script: | const fs = require('fs'); const path = require('path'); - const targetsPath = path.join(process.env.GITHUB_WORKSPACE, 'video-driver', 'ci', 'MACHINES.json'); + + // 1. Define possible paths for MACHINES.json + // Path A: Workspace/video-driver/ci/MACHINES.json (Nested) + const pathNested = path.join(process.env.GITHUB_WORKSPACE, 'video-driver', 'ci', 'MACHINES.json'); + // Path B: Workspace/ci/MACHINES.json (Root) + const pathRoot = path.join(process.env.GITHUB_WORKSPACE, 'ci', 'MACHINES.json'); + + let targetsPath = ''; + + // 2. Check which path exists + if (fs.existsSync(pathNested)) { + console.log(`Found config at nested path: ${pathNested}`); + targetsPath = pathNested; + } else if (fs.existsSync(pathRoot)) { + console.log(`Found config at root path: ${pathRoot}`); + targetsPath = pathRoot; + } else { + // 3. Debugging: If neither exists, list files to help us see what is happening + console.log('!!! Error: MACHINES.json not found in expected locations.'); + console.log(`Checked: ${pathNested}`); + console.log(`Checked: ${pathRoot}`); + + console.log('--- Workspace Root Contents ---'); + try { + console.log(fs.readdirSync(process.env.GITHUB_WORKSPACE)); + } catch (e) { console.log(e.message); } + + core.setFailed(`MACHINES.json not found.`); + return; + } + + // 4. Parse the file let targets; try { - if (!fs.existsSync(targetsPath)) { - core.setFailed(`MACHINES.json not found at ${targetsPath}`); - return; - } targets = JSON.parse(fs.readFileSync(targetsPath, 'utf-8')); } catch (err) { - core.setFailed(`Failed to load or parse MACHINES.json: ${err.message}`); + core.setFailed(`Failed to parse MACHINES.json: ${err.message}`); return; } - // Build matrix: machine, firmware + + // 5. Generate Outputs const build_matrix = Object.values(targets).map(({ machine, firmware }) => ({ machine, firmware })); core.setOutput('build_matrix', JSON.stringify(build_matrix)); - console.log("Build Matrix:", build_matrix); - // Full matrix: machine, firmware, lavaname const full_matrix = Object.values(targets).map(({ machine, firmware, lavaname }) => ({ machine, firmware, lavaname })); - core.setOutput('full_matrix', JSON.stringify(full_matrix)); - console.log("Full Matrix:", full_matrix); + core.setOutput('full_matrix', JSON.stringify(full_matrix)); \ No newline at end of file diff --git a/.github/workflows/loading.yml b/.github/workflows/loading.yml index 9859304ab..68cf88cd6 100644 --- a/.github/workflows/loading.yml +++ b/.github/workflows/loading.yml @@ -1,14 +1,18 @@ ---- name: _loading description: Load required parameters for the subsequent jobs on: workflow_call: + inputs: + target_branch: + description: "Branch to checkout (optional)" + required: false + type: string + default: "" outputs: build_matrix: description: Build matrix value: ${{ jobs.loading.outputs.build_matrix }} - full_matrix: description: Full Matrix containing lava description value: ${{ jobs.loading.outputs.full_matrix }} @@ -20,14 +24,28 @@ jobs: build_matrix: ${{ steps.loading.outputs.build_matrix }} full_matrix: ${{ steps.loading.outputs.full_matrix }} steps: - - name: Sync codebase - uses: qualcomm-linux/video-driver/.github/actions/sync@video.qclinux.0.0 + # SCENARIO 1: Pull Request (Pre-Merge) + # Uses your custom sync action to merge PR code with base + - name: Sync codebase (PR) + if: github.event_name == 'pull_request' + uses: qualcomm-linux-stg/video-driver/.github/actions/sync@video.qclinux.0.0 with: event_name: ${{ github.event_name }} pr_ref: ${{ github.event.pull_request.head.ref }} pr_repo: ${{ github.event.pull_request.head.repo.full_name }} base_ref: ${{ github.ref_name }} + # SCENARIO 2: Schedule or Manual (Post-Merge) + # Uses standard checkout because there is no PR to sync + - name: Checkout Code (Schedule) + if: github.event_name != 'pull_request' + uses: actions/checkout@v4 + with: + # Use the input branch if provided, otherwise default to current ref + ref: ${{ inputs.target_branch || github.ref_name }} + # Check out into 'video-driver' folder so the script finds the nested path + path: video-driver + - name: Load Parameters id: loading - uses: qualcomm-linux/video-driver/.github/actions/loading@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/actions/loading@video.qclinux.0.0 \ No newline at end of file diff --git a/.github/workflows/post_merge.yml b/.github/workflows/post_merge.yml index 38a17cfe6..ec852acfd 100644 --- a/.github/workflows/post_merge.yml +++ b/.github/workflows/post_merge.yml @@ -1,22 +1,19 @@ -name: post_merge_weekly -description: | - Runs post-merge CI for the video-driver repository on a weekly schedule. - Reuses loading, build and test workflows. +name: Post Merge Weekly on: schedule: - # Every Monday and Thursday at 02:30 UTC (8:00 AM IST) - - cron: "30 2 * * 1,4" + - cron: '*/5 * * * *' workflow_dispatch: jobs: loading: - uses: qualcomm-linux/video-driver/.github/workflows/loading.yml@video.qclinux.0.0 + # We call the reusable workflow + uses: qualcomm-linux-stg/video-driver/.github/workflows/loading.yml@video.qclinux.0.0 secrets: inherit build: needs: loading - uses: qualcomm-linux/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.0.0 secrets: inherit with: docker_image: kmake-image:ver.1.0 @@ -24,7 +21,7 @@ jobs: lava-test: needs: [loading, build] - uses: qualcomm-linux/video-driver/.github/workflows/test.yml@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/workflows/test.yml@video.qclinux.0.0 secrets: inherit with: docker_image: kmake-image:ver.1.0 diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index f4c06559c..3bffd54c9 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -15,12 +15,12 @@ on: jobs: loading: - uses: qualcomm-linux/video-driver/.github/workflows/loading.yml@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/workflows/loading.yml@video.qclinux.0.0 secrets: inherit build: needs: loading - uses: qualcomm-linux/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.0.0 secrets: inherit with: docker_image: kmake-image:ver.1.0 @@ -28,9 +28,10 @@ jobs: lava-test: needs: [loading, build] - uses: qualcomm-linux/video-driver/.github/workflows/test.yml@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/workflows/test.yml@video.qclinux.0.0 secrets: inherit with: docker_image: kmake-image:ver.1.0 build_matrix: ${{ needs.loading.outputs.build_matrix }} + full_matrix: ${{ needs.loading.outputs.full_matrix }} diff --git a/.github/workflows/sync-and-build.yml b/.github/workflows/sync-and-build.yml index a50c954d3..076688da3 100644 --- a/.github/workflows/sync-and-build.yml +++ b/.github/workflows/sync-and-build.yml @@ -1,290 +1,296 @@ -name: Sync and Build - -on: - workflow_dispatch: - workflow_call: - inputs: - docker_image: - description: Docker image to use for the build - required: false - type: string - default: kmake-image:ver.1.0 - build_matrix: - description: Build matrix for multi target builds - type: string - required: true - -permissions: - packages: read - -jobs: - sync-and-build: - runs-on: - group: GHA-video-Prd-SelfHosted-RG - labels: [self-hosted, video-prd-u2204-x64-large-od-ephem] - - steps: - - name: Pull Docker image - uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main - with: - image: ${{ inputs.docker_image }} - - - name: Sync codebase - uses: qualcomm-linux/video-driver/.github/actions/sync@video.qclinux.0.0 - with: - event_name: ${{ github.event_name }} - pr_ref: ${{ github.event.pull_request.head.ref }} - pr_repo: ${{ github.event.pull_request.head.repo.full_name }} - base_ref: ${{ github.ref_name }} - caller_workflow: build - - - name: Build workspace - uses: qualcomm-linux/video-driver/.github/actions/build@video.qclinux.0.0 - with: - docker_image: kmake-image:ver.1.0 - workspace_path: ${{ github.workspace }} - - - name: Download iris_test_app from the s3 - shell: bash - run: | - set -euo pipefail - mkdir -p "${{github.workspace }}/v4l-video-test-app/build/" - echo " syncing files from s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/" - aws s3 sync "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/" "${{ github.workspace }}/v4l-video-test-app/build/" - echo " ✅ Download complete" - ls ${{ github.workspace }}/v4l-video-test-app/build/ - - - name: Download firmware file from S3 - shell: bash - run: | - set -euo pipefail - mkdir -p "${{ github.workspace }}/downloads" - echo "📥 Syncing files from S3 path: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/" - aws s3 sync "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/" "${{ github.workspace }}/downloads" - echo "✅ Download complete" - [ -f "${{ github.workspace }}/downloads/vpu20_1v.mbn" ] || { echo "❌ Missing vpu20_1v.mbn"; exit 1; } - - - name: Download the video-contents for testing - shell: bash - run: | - set -euo pipefail - mkdir -p "${{ github.workspace }}/downloads" - echo "Downloading the video-content files" - wget -q https://github.com/qualcomm-linux/qcom-linux-testkit/releases/download/IRIS-Video-Files-v1.0/video_clips_iris.tar.gz \ - -O "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" - [ -f "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" ] || { echo "❌ Failed to download video_clips_iris.tar.gz"; exit 1; } - - - name: Prepare /data/vendor/iris_test_app and list contents - shell: bash - run: | - set -euo pipefail - data_dir="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app" - mkdir -p "$data_dir" - data_dir2="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app/firmware" - mkdir -p "$data_dir2" - firmware_version=$(ls kobj/tar-install/lib/modules/) - mkdir -p "kobj/tar-install/lib/modules/$firmware_version/updates" - cp video-driver/iris_vpu.ko kobj/tar-install/lib/modules/$firmware_version/updates/ - # Copy test app, firmware blob, and video clips tar into data/vendor/iris_test_app - cp "v4l-video-test-app/build/iris_v4l2_test" "$data_dir/" - cp "${{ github.workspace }}/downloads/vpu20_1v.mbn" "$data_dir2/" - #cp video-driver/iris_vpu.ko "$data_dir/" - cp "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" "$data_dir/" - - echo "📂 Contents of $data_dir:" - ls -lh "$data_dir" - - - name: Create compressed kernel ramdisk archives - shell: bash - run: | - set -euo pipefail - cd "${{ github.workspace }}/kobj/tar-install" - find lib/modules data | cpio -o -H newc --owner=0:0 | gzip -9 > "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz" - cd - > /dev/null - ls -lh "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz" - - - name: Download meta-qcom stable initramfs artifacts from S3 - shell: bash - run: | - set -euo pipefail - mkdir -p "${{ github.workspace }}/downloads" - echo "🔍 Fetching initramfs files from S3 bucket: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/" - aws s3 cp s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz "${{ github.workspace }}/downloads/" - echo "Initramfs files downloaded to: ${{ github.workspace }}/downloads" - - - name: Decompress ramdisk files and rename .cpio.gz files - shell: bash - run: | - set -euo pipefail - cd "${{ github.workspace }}/downloads" - echo " Decompressing and renaming .cpio.gz files..." - gunzip -c initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz > kerneltest.cpio - - - name: Merge and repackage initramfs - shell: bash - run: | - set -euo pipefail - echo "🔧 Starting repackaging process" - - workspace="${{ github.workspace }}" - mkdir -p "$workspace/combineramdisk" - cp "$workspace/local-kernel-ramdisk.cpio.gz" "$workspace/combineramdisk/" - cd "$workspace/combineramdisk" - - # Decompress local-kernel-ramdisk - mv local-kernel-ramdisk.cpio.gz local-kernel-ramdisk.cpio.gz.bak - gunzip -c local-kernel-ramdisk.cpio.gz.bak > local-kernel-ramdisk.cpio - - # Copy kerneltest from downloads - cp "$workspace/downloads/kerneltest.cpio" . - - # Merge kerneltest and local-kernel-ramdisk - cat kerneltest.cpio local-kernel-ramdisk.cpio > video-merged.cpio - gzip -9 video-merged.cpio - - # Create temp workspace to clean up archive - mkdir -p temp_merge - cd temp_merge - cpio -id --no-absolute-filenames < ../kerneltest.cpio - cpio -id --no-absolute-filenames < ../local-kernel-ramdisk.cpio - cd .. - - # Remove old merged archive - rm -f video-merged.cpio.gz - - # Repackage clean archive - cd temp_merge - find . | cpio -o -H newc --owner=0:0 > ../video-merged.cpio - cd .. - gzip -9 video-merged.cpio - - # Cleanup - rm -rf temp_merge kerneltest.cpio local-kernel-ramdisk.cpio - echo "Final archive: $workspace/combineramdisk/video-merged.cpio.gz" - ls -lh "$workspace/combineramdisk/video-merged.cpio.gz" - - - name: Validate build_matrix and jq - shell: bash - run: | - set -euo pipefail - machines_json='${{ inputs.build_matrix }}' - if ! command -v jq >/dev/null 2>&1; then - echo "❌ jq is not installed on this runner. Please install jq." - exit 1 - fi - echo "$machines_json" | jq -e . >/dev/null - [ "$(echo "$machines_json" | jq length)" -gt 0 ] || { echo "❌ build_matrix is empty"; exit 1; } - echo "✅ build_matrix is valid JSON" - - - name: Append artifacts to S3 upload list - shell: bash - run: | - set -euo pipefail - workspace="${{ github.workspace }}" - file_list="$workspace/artifacts/file_list.txt" - mkdir -p "$workspace/artifacts" - - # Fresh file_list - : > "$file_list" - - # Package lib/modules (xz-compressed) — exclude risky symlinks - mod_root="$workspace/kobj/tar-install/lib/modules" - [ -d "$mod_root" ] || { echo "❌ Missing directory: $mod_root"; exit 1; } - tar -C "$workspace/kobj/tar-install" \ - --exclude='lib/modules/*/build' \ - --exclude='lib/modules/*/source' \ - --numeric-owner --owner=0 --group=0 \ - -cJf "$workspace/modules.tar.xz" lib/modules - - # Safety checks on the tar - if tar -Jtvf "$workspace/modules.tar.xz" | grep -q ' -> '; then - echo "❌ Symlinks found in modules archive (should be none)"; exit 1 - fi - if tar -Jtf "$workspace/modules.tar.xz" | grep -Eq '^/|(^|/)\.\.(/|$)'; then - echo "❌ Unsafe paths found in modules archive"; exit 1 - fi - - echo "$workspace/modules.tar.xz" >> "$file_list" - echo "✅ Queued for upload: $workspace/modules.tar.xz" - - # Kernel Image + merged video ramdisk (no local ramdisk) - IMAGE_PATH="$workspace/kobj/arch/arm64/boot/Image" - VMLINUX_PATH="$workspace/kobj/vmlinux" - MERGED_PATH="$workspace/combineramdisk/video-merged.cpio.gz" - - [ -f "$IMAGE_PATH" ] || { echo "❌ Missing expected file: $IMAGE_PATH"; exit 1; } - [ -f "$VMLINUX_PATH" ] || { echo "❌ Missing expected file: $VMLINUX_PATH"; exit 1; } - [ -f "$MERGED_PATH" ] || { echo "❌ Missing merged cpio: $MERGED_PATH"; exit 1; } - - echo "$IMAGE_PATH" >> "$file_list" - echo "✅ Queued for upload: $IMAGE_PATH" - echo "$VMLINUX_PATH" >> "$file_list" - echo "✅ Queued for upload: $VMLINUX_PATH" - echo "$MERGED_PATH" >> "$file_list" - echo "✅ Queued for upload: $MERGED_PATH" - - # Loop through all machines from the build_matrix input and add DTBs - machines='${{ inputs.build_matrix }}' - for machine in $(echo "$machines" | jq -r '.[].machine'); do - dtb="$workspace/kobj/arch/arm64/boot/dts/qcom/${machine}.dtb" - if [ -f "$dtb" ]; then - echo "$dtb" >> "$file_list" - echo "✅ Queued for upload: $dtb" - else - echo "❌ Missing DTB: $dtb" - exit 1 - fi - done - - echo "----- Files queued for S3 upload -----" - cat "$file_list" - - - name: Upload all artifacts to S3 - uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.0.0 - with: - s3_bucket: qli-prd-video-gh-artifacts - local_file: ${{ github.workspace }}/artifacts/file_list.txt - mode: multi-upload - upload_location: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.run_id }}-${{ github.run_attempt }} - - - - name: Clean up - if: always() - shell: bash - run: | - set -euo pipefail - ws="${{ github.workspace }}" - rm -rf "$ws/artqifacts" || true - rm -rf "$ws/combineramdisk" || true - rm -rf "$ws/downloads" || true - rm -rf "$ws/kobj" || true - rm -f "$ws/modules.tar.xz" || true - rm -f "$ws/local-kernel-ramdisk.cpio.gz" || true - - - - name: Update summary - if: success() || failure() - shell: bash - run: | - status="${{ steps.build_workspace.outcome }}" - if [ "$status" = "success" ]; then - summary=":heavy_check_mark: Build Success" - else - summary=":x: Build Failed" - fi - - ws="${{ github.workspace }}" - file_list="$ws/artifacts/file_list.txt" - - { - echo "
Build Summary" - echo "$summary" - if [ -f "$file_list" ]; then - echo "" - echo "Artifacts queued for upload:" - while IFS= read -r line; do - echo "- $line" - done < "$file_list" - fi - echo "
" - } >> "$GITHUB_STEP_SUMMARY" \ No newline at end of file +name: Sync and Build + +on: + workflow_dispatch: + workflow_call: + inputs: + docker_image: + description: Docker image to use for the build + required: false + type: string + default: kmake-image:ver.1.0 + build_matrix: + description: Build matrix for multi target builds + type: string + required: true + +permissions: + packages: read + +jobs: + sync-and-build: + runs-on: + group: GHA-video-Stg-SelfHosted-RG + labels: [self-hosted, video-stg-u2204-x64-large-od-ephem] + + steps: + - name: Pull Docker image + uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main + with: + image: ${{ inputs.docker_image }} + + # ------------------------------------------------------------------------ + # ✅ CRITICAL FIX: Explicitly checkout the driver code. + # This ensures the source code exists for Post-Merge/Scheduled runs. + # ------------------------------------------------------------------------ + - name: Checkout Video Driver + uses: actions/checkout@v4 + + - name: Sync codebase + uses: qualcomm-linux-stg/video-driver/.github/actions/sync@video.qclinux.0.0 + with: + event_name: ${{ github.event_name }} + pr_ref: ${{ github.event.pull_request.head.ref }} + pr_repo: ${{ github.event.pull_request.head.repo.full_name }} + base_ref: ${{ github.ref_name }} + caller_workflow: build + - name: Build workspace + uses: qualcomm-linux-stg/video-driver/.github/actions/build@video.qclinux.0.0 + with: + docker_image: kmake-image:ver.1.0 + workspace_path: ${{ github.workspace }} + + - name: Download iris_test_app from the s3 + shell: bash + run: | + set -euo pipefail + mkdir -p "${{github.workspace }}/v4l-video-test-app/build/" + echo " syncing files from s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/iris_test_app/" + aws s3 sync "s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/iris_test_app/" "${{ github.workspace }}/v4l-video-test-app/build/" + echo " ✅ Download complete" + ls ${{ github.workspace }}/v4l-video-test-app/build/ + + - name: Download firmware file from S3 + shell: bash + run: | + set -euo pipefail + mkdir -p "${{ github.workspace }}/downloads" + echo "📥 Syncing files from S3 path: s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/" + aws s3 sync "s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/" "${{ github.workspace }}/downloads" + echo "✅ Download complete" + [ -f "${{ github.workspace }}/downloads/vpu20_1v.mbn" ] || { echo "❌ Missing vpu20_1v.mbn"; exit 1; } + + - name: Download the video-contents for testing + shell: bash + run: | + set -euo pipefail + mkdir -p "${{ github.workspace }}/downloads" + echo "Downloading the video-content files" + wget -q https://github.com/qualcomm-linux/qcom-linux-testkit/releases/download/IRIS-Video-Files-v1.0/video_clips_iris.tar.gz \ + -O "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" + [ -f "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" ] || { echo "❌ Failed to download video_clips_iris.tar.gz"; exit 1; } + + - name: Prepare /data/vendor/iris_test_app and list contents + shell: bash + run: | + set -euo pipefail + data_dir="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app" + mkdir -p "$data_dir" + data_dir2="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app/firmware" + mkdir -p "$data_dir2" + firmware_version=$(ls kobj/tar-install/lib/modules/) + mkdir -p "kobj/tar-install/lib/modules/$firmware_version/updates" + cp video-driver/iris_vpu.ko kobj/tar-install/lib/modules/$firmware_version/updates/ + # Copy test app, firmware blob, and video clips tar into data/vendor/iris_test_app + cp "v4l-video-test-app/build/iris_v4l2_test" "$data_dir/" + cp "${{ github.workspace }}/downloads/vpu20_1v.mbn" "$data_dir2/" + #cp video-driver/iris_vpu.ko "$data_dir/" + cp "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" "$data_dir/" + + echo "📂 Contents of $data_dir:" + ls -lh "$data_dir" + + - name: Create compressed kernel ramdisk archives + shell: bash + run: | + set -euo pipefail + cd "${{ github.workspace }}/kobj/tar-install" + find lib/modules data | cpio -o -H newc --owner=0:0 | gzip -9 > "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz" + cd - > /dev/null + ls -lh "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz" + + - name: Download meta-qcom stable initramfs artifacts from S3 + shell: bash + run: | + set -euo pipefail + mkdir -p "${{ github.workspace }}/downloads" + echo "🔍 Fetching initramfs files from S3 bucket: s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/" + aws s3 cp s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz "${{ github.workspace }}/downloads/" + echo "Initramfs files downloaded to: ${{ github.workspace }}/downloads" + + - name: Decompress ramdisk files and rename .cpio.gz files + shell: bash + run: | + set -euo pipefail + cd "${{ github.workspace }}/downloads" + echo " Decompressing and renaming .cpio.gz files..." + gunzip -c initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz > kerneltest.cpio + + - name: Merge and repackage initramfs + shell: bash + run: | + set -euo pipefail + echo "🔧 Starting repackaging process" + + workspace="${{ github.workspace }}" + mkdir -p "$workspace/combineramdisk" + cp "$workspace/local-kernel-ramdisk.cpio.gz" "$workspace/combineramdisk/" + cd "$workspace/combineramdisk" + + # Decompress local-kernel-ramdisk + mv local-kernel-ramdisk.cpio.gz local-kernel-ramdisk.cpio.gz.bak + gunzip -c local-kernel-ramdisk.cpio.gz.bak > local-kernel-ramdisk.cpio + + # Copy kerneltest from downloads + cp "$workspace/downloads/kerneltest.cpio" . + + # Merge kerneltest and local-kernel-ramdisk + cat kerneltest.cpio local-kernel-ramdisk.cpio > video-merged.cpio + gzip -9 video-merged.cpio + + # Create temp workspace to clean up archive + mkdir -p temp_merge + cd temp_merge + cpio -id --no-absolute-filenames < ../kerneltest.cpio + cpio -id --no-absolute-filenames < ../local-kernel-ramdisk.cpio + cd .. + + # Remove old merged archive + rm -f video-merged.cpio.gz + + # Repackage clean archive + cd temp_merge + find . | cpio -o -H newc --owner=0:0 > ../video-merged.cpio + cd .. + gzip -9 video-merged.cpio + + # Cleanup + rm -rf temp_merge kerneltest.cpio local-kernel-ramdisk.cpio + echo "Final archive: $workspace/combineramdisk/video-merged.cpio.gz" + ls -lh "$workspace/combineramdisk/video-merged.cpio.gz" + + - name: Validate build_matrix and jq + shell: bash + run: | + set -euo pipefail + machines_json='${{ inputs.build_matrix }}' + if ! command -v jq >/dev/null 2>&1; then + echo "❌ jq is not installed on this runner. Please install jq." + exit 1 + fi + echo "$machines_json" | jq -e . >/dev/null + [ "$(echo "$machines_json" | jq length)" -gt 0 ] || { echo "❌ build_matrix is empty"; exit 1; } + echo "✅ build_matrix is valid JSON" + + - name: Append artifacts to S3 upload list + shell: bash + run: | + set -euo pipefail + workspace="${{ github.workspace }}" + file_list="$workspace/artifacts/file_list.txt" + mkdir -p "$workspace/artifacts" + + # Fresh file_list + : > "$file_list" + + # Package lib/modules (xz-compressed) — exclude risky symlinks + mod_root="$workspace/kobj/tar-install/lib/modules" + [ -d "$mod_root" ] || { echo "❌ Missing directory: $mod_root"; exit 1; } + tar -C "$workspace/kobj/tar-install" \ + --exclude='lib/modules/*/build' \ + --exclude='lib/modules/*/source' \ + --numeric-owner --owner=0 --group=0 \ + -cJf "$workspace/modules.tar.xz" lib/modules + + # Safety checks on the tar + if tar -Jtvf "$workspace/modules.tar.xz" | grep -q ' -> '; then + echo "❌ Symlinks found in modules archive (should be none)"; exit 1 + fi + if tar -Jtf "$workspace/modules.tar.xz" | grep -Eq '^/|(^|/)\.\.(/|$)'; then + echo "❌ Unsafe paths found in modules archive"; exit 1 + fi + + echo "$workspace/modules.tar.xz" >> "$file_list" + echo "✅ Queued for upload: $workspace/modules.tar.xz" + + # Kernel Image + merged video ramdisk (no local ramdisk) + IMAGE_PATH="$workspace/kobj/arch/arm64/boot/Image" + VMLINUX_PATH="$workspace/kobj/vmlinux" + MERGED_PATH="$workspace/combineramdisk/video-merged.cpio.gz" + + [ -f "$IMAGE_PATH" ] || { echo "❌ Missing expected file: $IMAGE_PATH"; exit 1; } + [ -f "$VMLINUX_PATH" ] || { echo "❌ Missing expected file: $VMLINUX_PATH"; exit 1; } + [ -f "$MERGED_PATH" ] || { echo "❌ Missing merged cpio: $MERGED_PATH"; exit 1; } + + echo "$IMAGE_PATH" >> "$file_list" + echo "✅ Queued for upload: $IMAGE_PATH" + echo "$VMLINUX_PATH" >> "$file_list" + echo "✅ Queued for upload: $VMLINUX_PATH" + echo "$MERGED_PATH" >> "$file_list" + echo "✅ Queued for upload: $MERGED_PATH" + + # Loop through all machines from the build_matrix input and add DTBs + machines='${{ inputs.build_matrix }}' + for machine in $(echo "$machines" | jq -r '.[].machine'); do + dtb="$workspace/kobj/arch/arm64/boot/dts/qcom/${machine}.dtb" + if [ -f "$dtb" ]; then + echo "$dtb" >> "$file_list" + echo "✅ Queued for upload: $dtb" + else + echo "❌ Missing DTB: $dtb" + exit 1 + fi + done + + echo "----- Files queued for S3 upload -----" + cat "$file_list" + + - name: Upload all artifacts to S3 + uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.0.0 + with: + s3_bucket: qli-stg-video-gh-artifacts + local_file: ${{ github.workspace }}/artifacts/file_list.txt + mode: multi-upload + upload_location: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.run_id }}-${{ github.run_attempt }} + + + - name: Clean up + if: always() + shell: bash + run: | + set -euo pipefail + ws="${{ github.workspace }}" + rm -rf "$ws/artqifacts" || true + rm -rf "$ws/combineramdisk" || true + rm -rf "$ws/downloads" || true + rm -rf "$ws/kobj" || true + rm -f "$ws/modules.tar.xz" || true + rm -f "$ws/local-kernel-ramdisk.cpio.gz" || true + + + - name: Update summary + if: success() || failure() + shell: bash + run: | + status="${{ steps.build_workspace.outcome }}" + if [ "$status" = "success" ]; then + summary=":heavy_check_mark: Build Success" + else + summary=":x: Build Failed" + fi + + ws="${{ github.workspace }}" + file_list="$ws/artifacts/file_list.txt" + + { + echo "
Build Summary" + echo "$summary" + if [ -f "$file_list" ]; then + echo "" + echo "Artifacts queued for upload:" + while IFS= read -r line; do + echo "- $line" + done < "$file_list" + fi + echo "
" + } >> "$GITHUB_STEP_SUMMARY" \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 505eb418b..e210b773e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -23,8 +23,8 @@ on: jobs: test: runs-on: - group: GHA-video-Prd-SelfHosted-RG - labels: [ self-hosted, video-prd-u2204-x64-large-od-ephem ] + group: GHA-video-Stg-SelfHosted-RG + labels: [ self-hosted, video-stg-u2204-x64-large-od-ephem ] strategy: fail-fast: false matrix: @@ -72,7 +72,7 @@ jobs: - name: Create lava job definition id: create_job_definition - uses: qualcomm-linux/video-driver/.github/actions/lava_job_render@video.qclinux.0.0 + uses: qualcomm-linux-stg/video-driver/.github/actions/lava_job_render@video.qclinux.0.0 with: docker_image: ${{ inputs.docker_image }} env: