Skip to content

DO NOT MERGE: test without Jenkins NFS #6

DO NOT MERGE: test without Jenkins NFS

DO NOT MERGE: test without Jenkins NFS #6

name: "Yocto Build-Test-Deploy"
on:
workflow_call:
secrets:
# BALENA_API_DEPLOY_KEY is a secret that should be specific to the runtime environment
# It requires permissions to deploy hostApp releases, and fetch supervisor release images (via yocto recipes)
BALENA_API_DEPLOY_KEY:
description: balena API key for the deploy environment, used for deploying hostApps and fetching supervisor releases
required: false
# BALENA_API_TEST_KEY is a secret that should be specific to the runtime environment
# It requires permissions to manage autokit workers, and create test fleets
BALENA_API_TEST_KEY:
description: balena API key for the test environment, used for finding autokit workers and creating test fleets
required: false
# Dockerhub secrets are used only for pulling the helper image for "Prepare files for S3" step - if we simplify this to not use the
# helper image, these secrets can be removed
DOCKERHUB_USER:
description: Dockerhub user for pulling private helper images
required: false
DOCKERHUB_TOKEN:
description: Dockerhub token for pulling private helper images
required: false
SIGN_KMOD_KEY_APPEND:
description: Base64-encoded public key of a kernel module signing keypair
required: false
# SIGN_API_KEY is a secret that should be specific to the runtime environment
# It requires permissions to access the image signing server
SIGN_API_KEY:
description: balena API key that provides access to the signing server
required: false
GH_APP_PRIVATE_KEY:
description: "GPG Private Key for GitHub App to generate ephemeral tokens (used with vars.FLOWZONE_APP_ID)"
required: false
PBDKF2_PASSPHRASE:
description: "Passphrase used to encrypt/decrypt balenaOS assets at rest in GitHub."
required: false
inputs:
build-runs-on:
description: The runner labels to use for the build job(s)
required: false
type: string
default: >
[
"self-hosted",
"X64",
"yocto"
]
device-repo:
description: balenaOS device repository (owner/repo)
required: false
type: string
default: ${{ github.repository }}
device-repo-ref:
description: balenaOS device repository tag, branch, or commit to build
required: false
type: string
default: ${{ github.ref }}
meta-balena-ref:
description: meta-balena ref if not the currently pinned version
required: false
type: string
yocto-scripts-ref:
description: balena-yocto-scripts ref if not the currently pinned version
required: false
type: string
machine:
description: yocto board name
required: true
type: string
deploy-environment:
description: The GitHub environment to deploy to - includes the balena Cloud environment and related vars
required: false
type: string
default: balena-cloud.com
# This input exists because we want the option to not auto-finalise for some device types, even if they have tests and those tests pass - for example some custom device types, the customer doesn't want new releases published until they green light it
finalize-on-push-if-tests-passed:
description: Whether to finalize a hostApp container image to a balena environment, if tests pass.
required: false
type: boolean
default: true # Default behaviour is auto-finalise if tests pass, unless opted out by customer
# For use when we need to force deploy a release, for example after manual testing (negates finalize-on-push-if-tests-pass)
force-finalize:
description: Force deploy a finalized release
required: false
type: boolean
default: false
deploy-ami:
description: Whether to deploy an AMI to AWS
required: false
type: boolean
default: false # For now always false, as it doesn't work.
sign-image:
description: Whether to sign image for secure boot
required: false
type: boolean
default: false # Always false by default, override on specific device types which this is relevant in the device repo
# Supported fields for the test matrix:
# - test_suite: (required) The test suite to run. The valid test suites are `os`, `hup`, and `cloud`
# - environment: (required) The balenaCloud environment to use for testing, e.g. `bm.balena-dev.com` or `balena-cloud.com`
# - worker_type: The worker type to use for testing. The valid worker types are `qemu` and `testbot`. The default worker type is `testbot`
# - worker_fleets: The testbot fleets for finding available Leviathan workers. Not used for QEMU workers. Can accept a list of apps separated by commas, no spaces in between
# - test_org: The organization to use for testing cloud functionality. This default org is `testbot`
# - runs_on: A JSON array of runner labels to use for the test job(s). For qemu workers use the labels `["self-hosted", "X64", "kvm"]`.
# - secure_boot: (truthy) Enable secure boot testing flags QEMU_SECUREBOOT=1 and FLASHER_SECUREBOOT=1. Default is false.
# To use specific settings for each test job, create an include array like this...
# {"include": [
# {
# "test_suite": "os",
# "environment": "bm.balena-dev.com"
# },
# {
# "test_suite": "cloud",
# "environment": "balena-cloud.com",
# "test_org": "testbot"
# },
# {
# "test_suite": "hup",
# "environment": "balena-cloud.com",
# "worker_type": "qemu",
# "runs_on": ["self-hosted", "X64", "kvm"]
# }
# ]}
# Alternatively, you can have the matrix run on a combinatorial match on the provided values where every single permutation of the values will be executed ...
# {
# "test_suite": ["os","cloud","hup"],
# "environment": ["bm.balena-dev.com"],
# "worker_type": ["qemu","testbot"],
# "runs_on": [["self-hosted", "X64", "kvm"]]
# }
test_matrix:
description: "JSON Leviathan test matrix to use for testing. No tests will be run if not provided."
required: false
type: string
# https://docs.github.com/en/actions/using-jobs/using-concurrency
# https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/control-the-concurrency-of-workflows-and-jobs
# The following concurrency group cancels in-progress jobs or runs on pull_request events only;
# if github.head_ref is undefined, the concurrency group will fallback to the run ID,
# which is guaranteed to be both unique and defined for the run.
# From: https://github.com/orgs/community/discussions/69704#discussioncomment-7803351
# The available contexts for cancel-in-progress expressions are:
# - github: This context provides access to various GitHub-specific variables,
# such as github.event_name, github.ref, and github.workflow.
# - inputs: This context allows you to access input parameters defined in the workflow.
# This is particularly useful for conditional cancellation based on user-specified settings.
# - vars: This context provides access to workflow-defined variables,
# which can be used to store intermediate values or constants.
# When evaluating expressions for cancel-in-progress, certain parameters may not be available at the time of evaluation.
# For instance, the github.job context is not accessible, as it's specific to the running job and not the concurrency group.
# Note that we do not use github.ref here, as PRs from forks will have a
# ref of 'refs/heads/master' and collide with each other. Instead, we use github.head_ref
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ inputs.machine }}-${{ inputs.deploy-environment }}
# Cancel jobs in-progress for open PRs, but not merged or closed PRs, by checking for the merge ref.
# Note that for pull_request_target events (PRs from forks), the github.ref value is
# usually 'refs/heads/master' so we can't rely on that to determine if it is a merge event or not.
# As a result pull_request_target events will never cancel in-progress jobs and will be queued instead.
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
env:
WORKSPACE: ${{ github.workspace }}
MACHINE: ${{ inputs.machine }}
VERBOSE: verbose
WORKFLOW_NAME: ${{ github.workflow }}
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
permissions:
id-token: write # This is required for requesting the JWT #https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#requesting-the-access-token
actions: read # We are fetching workflow run results of a merge commit when workflow is triggered by new tag, to see if tests pass
pull-requests: write # Read is required to fetch the PR that merged, in order to get the test results. Write is required to create PR comments for workflow approvals.
jobs:
build:
name: Build
runs-on: ${{ fromJSON(inputs.build-runs-on) }}
environment: ${{ inputs.deploy-environment }}
env:
automation_dir: "${{ github.workspace }}/balena-yocto-scripts/automation"
BALENARC_BALENA_URL: ${{ vars.BALENA_HOST || inputs.deploy-environment || 'balena-cloud.com' }}
API_ENV: ${{ vars.BALENA_HOST || inputs.deploy-environment || 'balena-cloud.com' }}
BARYS_ARGUMENTS_VAR: ""
# https://docs.yoctoproject.org/3.1.21/overview-manual/overview-manual-concepts.html#user-configuration
# Create an autobuilder configuration file that is loaded before local.conf
AUTO_CONF_FILE: "${{ github.workspace }}/build/conf/auto.conf"
outputs:
os_version: ${{ steps.balena-lib.outputs.os_version }}
device_slug: ${{ steps.balena-lib.outputs.device_slug }}
deploy_artifact: ${{ steps.balena-lib.outputs.deploy_artifact }}
is_private: ${{ steps.balena-lib.outputs.is_private }}
dt_arch: ${{ steps.balena-lib.outputs.dt_arch }}
meta_balena_version: ${{ steps.balena-lib.outputs.meta_balena_version }}
yocto_scripts_ref: ${{ steps.balena-lib.outputs.yocto_scripts_ref }}
yocto_scripts_version: ${{ steps.balena-lib.outputs.yocto_scripts_version }}
defaults:
run:
working-directory: .
shell: bash --noprofile --norc -eo pipefail -x {0}
steps:
# Combining pull_request_target workflow trigger with an explicit checkout of an
# untrusted PR is a dangerous practice that may lead to repository compromise.
# https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/
# This action requires approvals via reactions for each workflow run.
# https://github.com/product-os/review-commit-action
- name: Wait for approval on pull_request_target events
if: github.event_name == 'pull_request_target' && github.event.pull_request.merged != true
timeout-minutes: 90
uses: product-os/review-commit-action@cddebf4cec8e40ea8f698b6dcce8cd70e38b7320 # v0.1.7
with:
poll-interval: "10"
allow-authors: false
# this must be done before putting files in the workspace
# https://github.com/easimon/maximize-build-space
- name: Maximize build space
if: contains(fromJSON(inputs.build-runs-on), 'ubuntu-latest') == true
uses: easimon/maximize-build-space@fc881a613ad2a34aca9c9624518214ebc21dfc0c
with:
root-reserve-mb: "4096"
temp-reserve-mb: "1024"
swap-size-mb: "4096"
remove-dotnet: "true"
remove-android: "true"
remove-haskell: "true"
remove-codeql: "true"
remove-docker-images: "true"
# Generate an app installation token that has access to
# all repos where the app is installed (usually the whole org)
# Owner input to make token valid for all repositories in the org
# This behvaiour is required for private submodules
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token
uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token
if: vars.FLOWZONE_APP_ID != ''
with:
app-id: ${{ vars.FLOWZONE_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
# Generate another app token for the balena-io organization
# so we can checkout private contracts
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token (balena-io)
uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token-balena-io
if: vars.FLOWZONE_APP_ID != ''
with:
app-id: ${{ vars.FLOWZONE_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
owner: balena-io
# https://github.com/actions/checkout
- name: Clone device repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: ${{ inputs.device-repo }}
token: ${{ steps.app-token.outputs.token || secrets.GITHUB_TOKEN }}
ref: ${{ inputs.device-repo-ref }} # In the case of a new tagged version, this will be the new tag, claimed from ${{ github.events.push.ref }}
submodules: true
fetch-depth: 0 # DEBUG - this is for testing on a device repo
fetch-tags: true
# Do not persist the app installation token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# In the old workflow we had to fetch the merge commit, get the check runs from the PR, and check if a device type passed or failed
# reference: https://github.com/balena-os/github-workflows/blob/master/.github/workflows/build_and_deploy.yml#L89
# NOTE: This will not be necessary if we had a way to deploy artifacts and mark as final like with fleet releases
# We're also checking out the tag in this step, so the subsequent build is done from the tagged version of the device repo
- name: "Fetch merge commit"
id: set-merge-commit
if: ${{ github.event_name == 'push' }} # Only perform on push event - i.e a new version tag
run: |
merge_commit=$(git rev-parse :/"^Merge pull request")
echo "Found merge commit ${merge_commit}"
echo "merge_commit=${merge_commit}" >>"${GITHUB_OUTPUT}"
# This will control the deployment of the hostapp only - it will determine if it is marked as final or not
# The hostapp being finalised is what determines if the API will present this OS version to user
# If the test_matrix is empty - it means there are no tests for the DT - so don't check tests, and don't finalise, unless manually done with "force-finalize" input
- name: Check test results
# https://docs.github.com/en/actions/learn-github-actions/expressions#functions
# this expression checks that the test_matrix input is truthy - there is no test_matrix input provided in the device-repo workflow file, test results won't be checked, and
# the release can't be finlized
if: github.event_name == 'push' && inputs.test_matrix && inputs.finalize-on-push-if-tests-passed
id: merge-test-result
env:
REPO: ${{ inputs.device-repo }}
COMMIT: ${{ steps.set-merge-commit.outputs.merge_commit }}
# environment variables used by gh CLI
# https://cli.github.com/manual/gh_help_environment
GH_DEBUG: "true"
GH_PAGER: "cat"
GH_PROMPT_DISABLED: "true"
GH_REPO: "${{ github.repository }}"
GH_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
run: |
# Gets the PR number of the merge commit
prid=$(gh api -H "Accept: application/vnd.github+json" "/repos/${REPO}/commits/$COMMIT" --jq '.commit.message' | head -n1 | cut -d "#" -f2 | awk '{ print $1}')
# Gets the head commit of the PR - needed to fetch workflows ran on that commit
head=$(gh api -H "Accept: application/vnd.github+json" "/repos/${REPO}/pulls/${prid}" --jq '.head.sha')
# Fetching workflow runs and filtering by the commit of the head of the PR returns the latest attempts of the workflow for that commit
# Selecting for workflows with the same name as the workflow name ("github.workflow")
# There will be "pull_request" and "pull_request_trigger" triggered workflow runs in the response - one will be skipped, one will be success/fail
# So selecting for .conclusion==success will give us a response and evaluate to true in the following "if" statement if either we successful
passed="false"
conclusion="$(gh run list -w "${WORKFLOW_NAME}" -c "${head}" --json conclusion --jq '.[] | select(.conclusion == "success").conclusion')"
if [[ "${conclusion}" = "success" ]]; then
passed="true"
fi
echo "finalize=${passed}" >>"${GITHUB_OUTPUT}"
# Check if the repository is a yocto device respository
- name: Device repository check
run: |
if [ "$(yq '.type' repo.yml)" != "yocto-based OS image" ]; then
echo "::error::Repository does not appear to be of type 'yocto-based OS image'"
exit 1
fi
# Checkout the right ref for meta-balena submodule
- name: Update meta-balena submodule to ${{ inputs.meta-balena-ref }}
if: inputs.meta-balena-ref != ''
working-directory: ./layers/meta-balena
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.meta-balena-ref }}"
git submodule update --init --recursive
# Checkout the right ref for balena-yocto-scripts submodule
- name: Update balena-yocto-scripts submodule to ${{ inputs.yocto-scripts-ref }}
if: inputs.yocto-scripts-ref != ''
working-directory: ./balena-yocto-scripts
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.yocto-scripts-ref }}"
git submodule update --init --recursive
# A lot of outputs inferred from here are used everywhere else in the workflow
- name: Set build outputs
id: balena-lib
env:
CURL: "curl --silent --retry 10 --location --compressed"
TRANSLATION: "v6"
BALENAOS_TOKEN: ${{ secrets.BALENA_API_DEPLOY_KEY }}
run: |
source "${automation_dir}/include/balena-api.inc"
source "${automation_dir}/include/balena-lib.inc"
./balena-yocto-scripts/build/build-device-type-json.sh
device_slug="$(balena_lib_get_slug "${MACHINE}")"
echo "device_slug=${device_slug}" >>"${GITHUB_OUTPUT}"
# As we use this to determine the os version from the device repository - when checking out the repo we need enough fetch depth to get tags
os_version=$(git describe --abbrev=0)
echo "os_version=${os_version#v*}" >>"${GITHUB_OUTPUT}"
meta_balena_version="$(balena_lib_get_meta_balena_base_version)"
echo "meta_balena_version=${meta_balena_version}" >>"${GITHUB_OUTPUT}"
yocto_scripts_ref="$(git submodule status balena-yocto-scripts | awk '{print $1}')"
echo "yocto_scripts_ref=${yocto_scripts_ref}" >>"${GITHUB_OUTPUT}"
yocto_scripts_version="$(cd balena-yocto-scripts && head -n1 VERSION)"
echo "yocto_scripts_version=${yocto_scripts_version}" >>"${GITHUB_OUTPUT}"
deploy_artifact="$(balena_lib_get_deploy_artifact "${MACHINE}")"
echo "deploy_artifact=${deploy_artifact}" >>"${GITHUB_OUTPUT}"
dt_arch="$(balena_lib_get_dt_arch "${MACHINE}")"
echo "dt_arch=${dt_arch}" >>"${GITHUB_OUTPUT}"
# Unrolled balena_api_is_dt_private function - https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L424
# Had to be unrolled due to this: https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-lib.inc#L191 function relying on a jenkins env var to select the balena env - so failed
is_private=$(${CURL} -XGET -H "Content-type: application/json" -H "Authorization: bearer ${BALENAOS_TOKEN}" --silent --retry 5 "https://api.${API_ENV}/${TRANSLATION}/device_type?\$filter=slug%20eq%20%27${device_slug}%27&\$select=slug,is_private" | jq -r '.d[0].is_private')
echo "is_private=${is_private}" >>"${GITHUB_OUTPUT}"
- name: Checkout private Contracts
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: steps.balena-lib.outputs.is_private == 'true'
with:
repository: balena-io/private-contracts
token: ${{ steps.app-token-balena-io.outputs.token }}
path: ${{ github.workspace }}/private-contracts
# Do not persist the token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# Unrolled balena_api_is_dt_private function - https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L424
# Had to be unrolled due to this: https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-lib.inc#L191 function relying on a jenkins env var to select the balena env - so failed
- name: Build OS contract
env:
CONTRACTS_BUILD_DIR: "${{ github.workspace }}/balena-yocto-scripts/build/contracts"
NODE: node
DEVICE_TYPE_SLUG: ${{ steps.balena-lib.outputs.device_slug }}
CONTRACTS_OUTPUT_DIR: "${{ github.workspace }}/build/contracts"
run: |
npm --prefix="${CONTRACTS_BUILD_DIR}" ci > /dev/null || (>&2 echo "[balena_lib_build_contracts]: npm failed installing dependencies" && return 1)
NODE_PATH="${CONTRACTS_BUILD_DIR}/node_modules" ${NODE} "${CONTRACTS_BUILD_DIR}/generate-oscontracts.js" > /dev/null
if [ -f "${CONTRACTS_OUTPUT_DIR}/${DEVICE_TYPE_SLUG}/balena-os/balena.yml" ]; then
echo "${CONTRACTS_OUTPUT_DIR}/${DEVICE_TYPE_SLUG}/balena-os/balena.yml"
else
>&2 echo "[balena_lib_build_contracts]: Failed to build OS contract for ${DEVICE_TYPE_SLUG}"
return 1
fi
# Move newly generated OS contract to location expected later on in the workflow
cp "${CONTRACTS_OUTPUT_DIR}/${DEVICE_TYPE_SLUG}/balena-os/balena.yml" "${WORKSPACE}/balena.yml"
# Causes tarballs of the source control repositories (e.g. Git repositories), including metadata, to be placed in the DL_DIR directory.
# https://docs.yoctoproject.org/4.0.5/ref-manual/variables.html?highlight=compress#term-BB_GENERATE_MIRROR_TARBALLS
- name: Enable mirror tarballs
run: |
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a BB_GENERATE_MIRROR_TARBALLS=1" >> "${GITHUB_ENV}"
- name: Enable signed images
if: inputs.sign-image == true
env:
SIGN_API: "${{ vars.SIGN_API_URL || 'https://sign.balena-cloud.com' }}"
SIGN_API_KEY: "${{ secrets.SIGN_API_KEY }}"
SIGN_GRUB_KEY_ID: 2EB29B4CE0132F6337897F5FB8A88D1C62FCC729
SIGN_KMOD_KEY_APPEND: "${{ secrets.SIGN_KMOD_KEY_APPEND }}"
run: |
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_API=${SIGN_API}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_API_KEY=${SIGN_API_KEY}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_GRUB_KEY_ID=${SIGN_GRUB_KEY_ID}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_KMOD_KEY_APPEND=${SIGN_KMOD_KEY_APPEND}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} --bitbake-args --no-setscene"
echo "BARYS_ARGUMENTS_VAR=${BARYS_ARGUMENTS_VAR}" >>"${GITHUB_ENV}"
- name: Mount shared NFS cache
if: !vars.YOCTO_CACHE_HOST && contains(fromJSON(inputs.build-runs-on), 'self-hosted')

Check failure on line 449 in .github/workflows/yocto-build-deploy.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/yocto-build-deploy.yml

Invalid workflow file

You have an error in your yaml syntax on line 449
continue-on-error: true
id: jenkins-nfs
env:
YOCTO_CACHE_HOST: ${{ vars.YOCTO_CACHE_HOST }}
MOUNTPOINT: ${{ github.workspace}}/nfs/yocto
run: |
sudo mkdir -p "${MOUNTPOINT}"
sudo chown -R "$(id -u):$(id -g)" "${MOUNTPOINT}"
sudo mount -t nfs "${YOCTO_CACHE_HOST}:/" "${MOUNTPOINT}" -o fsc,nolock
# https://wiki.yoctoproject.org/wiki/Enable_sstate_cache
# https://docs.yoctoproject.org/4.0.10/ref-manual/variables.html#term-MIRRORS
# https://docs.yoctoproject.org/4.0.10/ref-manual/variables.html#term-PREMIRRORS
# https://docs.yoctoproject.org/4.0.10/ref-manual/variables.html#term-SSTATE_MIRRORS
# https://docs.yoctoproject.org/4.0.10/overview-manual/concepts.html#source-mirror-s
# https://docs.yoctoproject.org/4.0.10/ref-manual/classes.html?highlight=source_mirror#own-mirrors-bbclass
# https://github.com/openembedded/openembedded/blob/master/classes/own-mirrors.bbclass
# https://github.com/openembedded/openembedded/blob/master/classes/mirrors.bbclass
- name: Add NFS shared-downloads to PREMIRRORS
if: steps.jenkins-nfs.outcome == 'success'
env:
# Relative to the build container working dir, not the workspace
SOURCE_MIRROR_URL: file:///work/nfs/yocto/runner/shared-downloads/
SSTATE_MIRROR_URL: file:///work/nfs/yocto/runner/${{ inputs.machine }}/sstate/PATH
run: |
mkdir -p "$(dirname "${AUTO_CONF_FILE}")"
cat <<EOF >> "${AUTO_CONF_FILE}"
PREMIRRORS:prepend = "\\
cvs://.*/.* ${SOURCE_MIRROR_URL} \\
svn://.*/.* ${SOURCE_MIRROR_URL} \\
git://.*/.* ${SOURCE_MIRROR_URL} \\
hg://.*/.* ${SOURCE_MIRROR_URL} \\
bzr://.*/.* ${SOURCE_MIRROR_URL} \\
https?$://.*/.* ${SOURCE_MIRROR_URL} \\
ftp://.*/.* ${SOURCE_MIRROR_URL} \\
"
EOF
cat "${AUTO_CONF_FILE}"
# https://docs.yoctoproject.org/4.0.10/ref-manual/classes.html?highlight=source_mirror#own-mirrors-bbclass
# https://github.com/openembedded/openembedded/blob/master/classes/own-mirrors.bbclass
# The own-mirrors class makes it easier to set up your own PREMIRRORS from which to first fetch source before
# attempting to fetch it from the upstream specified in SRC_URI within each recipe.
- name: Add S3 shared-downloads to PREMIRRORS
env:
SOURCE_MIRROR_URL: https://${{ vars.AWS_S3_BUCKET || vars.S3_BUCKET }}.s3.${{ vars.AWS_REGION || 'us-east-1' }}.amazonaws.com/shared-downloads/
run: |
mkdir -p "$(dirname "${AUTO_CONF_FILE}")"
cat <<EOF >> "${AUTO_CONF_FILE}"
INHERIT += "own-mirrors"
SOURCE_MIRROR_URL = "${SOURCE_MIRROR_URL}"
EOF
cat "${AUTO_CONF_FILE}"
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
# https://github.com/actions/cache/blob/main/README.md#creating-a-cache-key
# https://github.com/actions/cache
# https://github.com/actions/cache/blob/main/restore/README.md
# Caches are scoped to the current branch context, with fallback to the default branch context.
# GitHub will remove any cache entries that have not been accessed in over 7 days.
# There is no limit on the number of caches you can store, but the total size of all caches in a repository is limited to 10 GB.
# Once a repository has reached its maximum cache storage, the cache eviction policy will create space by deleting the oldest caches in the repository.
- name: Restore sstate cache
id: cache-restore
uses: actions/cache/[email protected]
with:
path: ${{ github.workspace }}/shared/${{ inputs.machine }}/sstate
key: ${{ inputs.machine }}-sstate-${{ github.sha }}
restore-keys: |
${{ inputs.machine }}-sstate-
# All preperation complete before this step
# Start building balenaOS
# We use the BALENA_API_DEPLOY_KEY secret to preload the supervisor image
# (why do we need a key for this? can we use a different key?)
- name: Build
id: build
env:
HELPER_IMAGE_REPO: ghcr.io/balena-os/balena-yocto-scripts
SHARED_BUILD_DIR: ${{ github.workspace }}/shared
run: |
# When building for non-x86 device types, meson, after building binaries must try to run them via qemu if possible , maybe as some sanity check or test?
# Therefore qemu must be used - and our runner mmap_min_addr is set to 4096 (default, set here: https://github.com/product-os/github-runner-kernel/blob/ef5a66951599dc64bf2920d896c36c6d9eda8df6/config/5.10/microvm-kernel-x86_64-5.10.config#L858
# Using a value of 4096 leads to issues https://gitlab.com/qemu-project/qemu/-/issues/447 so we must set it to 65536
# We do this in the workflow instead of the runner kernel as it makes this portable across runners
sysctl vm.mmap_min_addr
sudo sysctl -w vm.mmap_min_addr=65536
sysctl vm.mmap_min_addr
mkdir -p "${SHARED_BUILD_DIR}"
cat "${AUTO_CONF_FILE}"
./balena-yocto-scripts/build/balena-build.sh \
-d "${MACHINE}" \
-t "${{ secrets.BALENA_API_DEPLOY_KEY }}" \
-s "${SHARED_BUILD_DIR}" \
-g "${BARYS_ARGUMENTS_VAR}" | tee balena-build.log
if grep -R "ERROR: " build/tmp/log/*; then
exit 1
fi
if ! grep -q "Build for ${{ inputs.machine }} suceeded" balena-build.log; then
exit 1
fi
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
# https://github.com/actions/cache/blob/main/README.md#creating-a-cache-key
# https://github.com/actions/cache
# https://github.com/actions/cache/blob/main/save/README.md
# Caches are scoped to the current branch context, with fallback to the default branch context.
# GitHub will remove any cache entries that have not been accessed in over 7 days.
# There is no limit on the number of caches you can store, but the total size of all caches in a repository is limited to 10 GB.
# Once a repository has reached its maximum cache storage, the cache eviction policy will create space by deleting the oldest caches in the repository.
- name: Save sstate cache
uses: actions/cache/[email protected]
# Do not save cache for pull_request_target events
# as they run in the context of the main branch and would be vulnerable to cache poisoning
# https://0xn3va.gitbook.io/cheat-sheets/ci-cd/github/actions#cache-poisoning
# https://adnanthekhan.com/2024/05/06/the-monsters-in-your-build-cache-github-actions-cache-poisoning/
if: github.event_name != 'pull_request_target'
with:
path: ${{ github.workspace }}/shared/${{ inputs.machine }}/sstate
key: ${{ steps.cache-restore.outputs.cache-primary-key }}
# https://github.com/unfor19/install-aws-cli-action
- name: Setup awscli
uses: unfor19/install-aws-cli-action@e8b481e524a99f37fbd39fdc1dcb3341ab091367 # v1
# https://github.com/aws-actions/configure-aws-credentials
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
with:
role-to-assume: ${{ vars.AWS_IAM_ROLE }}
role-session-name: github-${{ github.job }}-${{ github.run_id }}-${{ github.run_attempt }}
aws-region: ${{ vars.AWS_REGION || 'us-east-1' }}
# https://github.com/orgs/community/discussions/26636#discussioncomment-3252664
mask-aws-account-id: false
# Sync shared downloads to S3 to use as a sources mirror in case original sources are not available.
# Exlude all directories and temp files as we only want the content and the .done files.
# https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/sync.html
- name: Sync shared downloads to S3
# Do not publish shared downloads for pull_request_target events to prevent cache poisoning
# Do not publish shared downloads for private device-types as the mirror is public-read
if: github.event_name != 'pull_request_target' && steps.balena-lib.outputs.is_private == 'false'
# Ignore errors for now, as we may have upload conflicts with other jobs
continue-on-error: true
env:
SHARED_DOWNLOADS_DIR: ${{ github.workspace }}/shared/shared-downloads
S3_ACL: public-read
S3_SSE: AES256
# FIXME: This should be a public bucket that does not differ between production and staging deploys
S3_URL: "s3://${{ vars.AWS_S3_BUCKET || vars.S3_BUCKET }}/shared-downloads"
S3_REGION: ${{ vars.AWS_REGION || 'us-east-1' }}
# Create a symlink to the from the relative container path to the workspace in order to resolve symlinks
# created in the build container runtime.
run: |
sudo ln -sf "${{ github.workspace }}" /work
ls -al "${SHARED_DOWNLOADS_DIR}/"
aws s3 sync --sse="${S3_SSE}" --acl="${S3_ACL}" "${SHARED_DOWNLOADS_DIR}/" "${S3_URL}/" \
--exclude "*/*" --exclude "*.tmp" --size-only --follow-symlinks --no-progress
# TODO: pre-install on self-hosted-runners
# Needed by the yocto job to zip artifacts - Don't remove
- name: Install zip package
run: |
sudo apt-get update
sudo apt-get install -y zip
# DEPLOY_PATH is the path that all build artifacts get exported to by "balena_deploy_artifacts"
- name: Export prepare artifacts deploy path to env
env:
DEVICE_TYPE_SLUG: ${{ steps.balena-lib.outputs.device_slug }}
VERSION: ${{ steps.balena-lib.outputs.os_version }}
run: |
echo "DEPLOY_PATH=${{ runner.temp }}/deploy/${DEVICE_TYPE_SLUG}/${VERSION}" >>"${GITHUB_ENV}"
# TODO: prepare artifacts manually to replace balena_deploy_artifacts
- name: Prepare artifacts
run: |
# DEBUG: check deploy path
echo "DEPLOY_PATH = ${DEPLOY_PATH}"
source "${automation_dir}/include/balena-deploy.inc"
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-deploy.inc#L23
balena_deploy_artifacts "${{ inputs.machine }}" "${DEPLOY_PATH}" false
- name: Encrypt artifacts
id: encrypt
if: |
github.event.repository.private != true &&
(inputs.sign-image == true || steps.balena-lib.outputs.is_private == 'true')
run: |
for artifact in ${{ env.DEPLOY_PATH }}/image/balena.img ${{ env.DEPLOY_PATH }}/balena-image.docker; do
openssl enc -v -e -aes-256-cbc -k '${{ secrets.PBDKF2_PASSPHRASE }}' -pbkdf2 -iter 310000 -md sha256 -salt -in "${artifact}" -out "${artifact}.enc"
done
echo "ENCRYPTED_EXTENSION=.enc" >>"${GITHUB_ENV}"
# https://github.com/actions/upload-artifact
# We upload only `balena.img` for use with the leviathan tests - this is the artifact that is presented to users
# We upload `balena-image.docker` for use in the HUP test suite - if we could fetch the hostapp from the draft release instead, we can remove that to save the artifact storage space
# Only upload if tests will be running - i.e on PR's only, and only on devices that tests are defined for
# Separate "flasher" and "raw" variants are not used in the testing flow
- name: Upload artifacts
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
if: github.event_name != 'push' && inputs.test_matrix
with:
name: build-artifacts
if-no-files-found: error
retention-days: 3
compression-level: 7
# ENCRYPTED_EXTENSION may be empty if the artifact is not encrypted
path: |
${{ env.DEPLOY_PATH }}/image/balena.img${{ env.ENCRYPTED_EXTENSION }}
${{ env.DEPLOY_PATH }}/balena-image.docker${{ env.ENCRYPTED_EXTENSION }}
# Separate this evaluation into its own step + output, as we use this logic in several places and its easier to manage this way
- name: Evaluate whether to finalize release
if: steps.merge-test-result.outputs.finalize == 'true' || inputs.force-finalize
id: should-finalize
run: |
echo "finalize=true" >>"${GITHUB_OUTPUT}"
# Separate this evaluation into its own step + output, as we use this logic in several places and its easier to manage this way
# We want to push a hostapp on push events (PR merge) , or dispatch??
# If we deploy the hostapp, also deploy the s3 artifacts
# Force finlize will finalize no matter what - so we want to make sure there is something to finlize - so it will always trigger this if true
- name: Evaluate whether to deploy hostapp
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' || inputs.force-finalize
id: should-deploy
run: |
echo "deploy=true" >>"${GITHUB_OUTPUT}"
# TODO: check that github.ref_name actually gives the name of the branch in workflow dispatch: https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/contexts#github-context
# This will work up until we have a rolling v20.x.x release of balenaOS
- name: Evaluate ESR
if: |
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v20')) ||
(github.event_name == 'workflow_dispatch' && startsWith(github.ref_name, '20'))
id: esr-check
run: |
echo "is-esr=true" >>"${GITHUB_OUTPUT}"
##############################
# S3 Deploy
##############################
# login required to pull private balena/balena-img image
# https://github.com/docker/login-action
- name: Login to Docker Hub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
if: steps.should-deploy.outputs.deploy
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Prepare files for S3
if: steps.should-deploy.outputs.deploy && steps.balena-lib.outputs.deploy_artifact != 'docker-image'
env:
HELPER_IMAGE: balena/balena-img:6.20.26
# This path is different from DEPLOY_PATH due to the structure the prepare.ts expects: "/host/image/${device_slug}/${version}/..."
PREPARE_DEPLOY_PATH: ${{ runner.temp }}/deploy
run: |
docker run --rm \
-e BASE_DIR=/host/images \
-v "${PREPARE_DEPLOY_PATH}:/host/images" \
"${HELPER_IMAGE}" /usr/src/app/node_modules/.bin/ts-node /usr/src/app/scripts/prepare.ts
find "${PREPARE_DEPLOY_PATH}" -exec ls -lh {} \;
- name: Set S3 ACL to private
id: s3-acl-private
if: steps.should-deploy.outputs.deploy && steps.balena-lib.outputs.is_private != 'false'
run: echo "string=private" >>"${GITHUB_OUTPUT}"
- name: Set S3 ESR destination directory
id: s3-esr-images-dir
if: steps.should-deploy.outputs.deploy && steps.esr-check.outputs.is-esr
run: echo "string=esr-images" >>"${GITHUB_OUTPUT}"
# "If no keys are provided, but an IAM role is associated with the EC2 instance, it will be used transparently".
# https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/rm.html
# https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/cp.html
# https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/sync.html
- name: Deploy to S3
if: steps.should-deploy.outputs.deploy && steps.balena-lib.outputs.deploy_artifact != 'docker-image'
env:
S3_ACL: ${{ steps.s3-acl-private.outputs.string || 'public-read' }}
S3_SSE: AES256
S3_URL: "s3://${{ vars.AWS_S3_BUCKET || vars.S3_BUCKET }}/${{ steps.s3-esr-images-dir.outputs.string || 'images' }}"
S3_REGION: ${{ vars.AWS_REGION || 'us-east-1' }}
SLUG: ${{ steps.balena-lib.outputs.device_slug }}
VERSION: ${{ steps.balena-lib.outputs.os_version }}
SOURCE_DIR: ${{ runner.temp }}/deploy
run: |
if [ -n "$(aws s3 ls "${S3_URL}/${SLUG}/${VERSION}/")" ] && [ -z "$($S3_CMD ls "${S3_URL}/${SLUG}/${VERSION}/IGNORE")" ]; then
echo "::warning::Deployment already exists at ${S3_URL}/${VERSION}"
exit 0
fi
echo "${VERSION}" > "${SOURCE_DIR}/${SLUG}/latest"
touch "${SOURCE_DIR}/${SLUG}/${VERSION}/IGNORE"
aws s3 rm --recursive "${S3_URL}/${SLUG}/${VERSION}"
aws s3 cp --no-progress --sse="${S3_SSE}" --acl="${S3_ACL}" "${SOURCE_DIR}/${SLUG}/${VERSION}/IGNORE" "${S3_URL}/${SLUG}/${VERSION}/"
aws s3 sync --no-progress --sse="${S3_SSE}" --acl="${S3_ACL}" "${SOURCE_DIR}/${SLUG}/${VERSION}/" "${S3_URL}/${SLUG}/${VERSION}/"
aws s3 cp --no-progress --sse="${S3_SSE}" --acl=public-read "${SOURCE_DIR}/${SLUG}/latest" "${S3_URL}/${SLUG}/"
aws s3 rm "${S3_URL}/${SLUG}/${VERSION}/IGNORE"
##############################
# hostapp Deploy
##############################
- name: Check Balena CLI installation
if: steps.should-deploy.outputs.deploy
run: |
balena --version
# TODO: replace this with balena-io/deploy-to-balena-action when it supports deploy-only
# https://github.com/balena-io/deploy-to-balena-action/issues/286
- name: Deploy to balena
if: steps.should-deploy.outputs.deploy
id: deploy-hostapp
env:
# BALENA_API_DEPLOY_KEY is a secret that should be specific to the runtime environment
# It requires permissions to deploy hostApp releases, and fetch supervisor release images (via yocto recipes)
# This step should never run untrusted user code, as we have a secret in the environment
BALENAOS_TOKEN: ${{ secrets.BALENA_API_DEPLOY_KEY }}
BALENAOS_ACCOUNT: ${{ vars.HOSTAPP_ORG || 'balena_os' }}
SLUG: "${{ steps.balena-lib.outputs.device_slug }}"
APPNAME: "${{ steps.balena-lib.outputs.device_slug }}"
META_BALENA_VERSION: "${{ steps.balena-lib.outputs.meta_balena_version }}"
RELEASE_VERSION: "${{ steps.balena-lib.outputs.os_version }}"
BOOTABLE: 1
TRANSLATION: "v6"
FINAL: ${{ steps.should-finalize.outputs.finalize }}
ESR: "${{ steps.esr-check.outputs.is-esr }}"
balenaCloudEmail: # TODO: currently trying to use named API key only, its possible email/pw auth no longer has the additional privileges that it used to
balenaCloudPassword: # TODO: currently trying to use named API key only, its possible email/pw auth no longer has the additional privileges that it used to
CURL: "curl --silent --retry 10 --location --compressed"
VERSION: ${{ steps.balena-lib.outputs.os_version }}
# Used when creating a new hostapp APP - to give the relevant access to the relevant team
HOSTAPP_ACCESS_TEAM: OS%20Devs
HOSTAPP_ACCESS_ROLE: developer
run: |
set -e
## Adapted from https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/entry_scripts/balena-deploy-block.sh
## That script was executed from inside a helper image - here we're doing it inline
# load hostapp bundle and get local image reference, needed for `balena deploy`
_local_image=$(docker load -i "${DEPLOY_PATH}/balena-image.docker" | cut -d: -f1 --complement | tr -d " " )
echo "[INFO] Logging into ${API_ENV} as ${BALENAOS_ACCOUNT}"
export BALENARC_BALENA_URL="${API_ENV}"
balena login --token "${BALENAOS_TOKEN}"
if [ "$ESR" = "true" ]; then
echo "Deploying ESR release"
APPNAME="${APPNAME}-esr"
fi
if [ -f "${WORKSPACE}/balena.yml" ]; then
echo -e "\nversion: ${VERSION}" >> "${WORKSPACE}/balena.yml"
if [ "${{ inputs.sign-image }}" = "true" ]; then
sed -i '/provides:/a \ - type: sw.feature\n slug: secureboot' "/${WORKSPACE}/balena.yml"
fi
fi
#DEBUG: print workspace and balena.yml
ls "${WORKSPACE}"
cat "${WORKSPACE}/balena.yml"
echo "[INFO] Deploying to ${BALENAOS_ACCOUNT}/${APPNAME}"
## Adapted from https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L373
# Get the App Id from the name
_json=$(${CURL} -XGET "https://api.${API_ENV}/${TRANSLATION}/application?\$filter=(slug%20eq%20'${BALENAOS_ACCOUNT}/${APPNAME}')" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
_appID=$(echo "${_json}" | jq --raw-output '.d[0].id')
echo "${_appID}"
# Check if app already exists if it doesn't then create a new one
if [ -z "${_appID}" ] || [ "${_appID}" = "null" ]; then
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L128
echo "Creating App"
_json=$(${CURL} -XPOST "https://api.${API_ENV}/${TRANSLATION}/application" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --data "{\"app_name\": \"${BALENAOS_ACCOUNT}/${APPNAME}\", \"device_type\": \"${APPNAME}\"}")
_appID=$(echo "${_json}" | jq --raw-output '.id' || true)
echo "${_appID}"
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L882
# This gives the relevant users access to these host apps
echo "Creating role access"
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L835
# Get the ID of the team
_json=$(${CURL} -XGET "https://api.${API_ENV}/${TRANSLATION}/team?\$select=id&\$filter=(name%20eq%20'${HOSTAPP_ACCESS_TEAM}')%20and%20(belongs_to__organization/any(o:o/handle%20eq%20'${BALENAOS_ACCOUNT}'))" -H "Content-Type:application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
_team_id=$(echo "${_json}" | jq -r '.d[0].id')
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L858
# Get the ID of the role
_json=$(${CURL} -XGET "https://api.${API_ENV}/${TRANSLATION}/application_membership_role?\$select=id&\$filter=name%20eq%20'${HOSTAPP_ACCESS_ROLE}'" -H "Content-Type:application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
_role_id=$(echo "${_json}" | jq -r '.d[0].id')
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L914
# Give the team developer access to the app
_json=$(${CURL} -XPOST "https://api.${API_ENV}/${TRANSLATION}/team_application_access" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --data "{\"team\": \"${_team_id}\", \"grants_access_to__application\": \"${_appID}\", \"application_membership_role\": \"${_role_id}\"\"}")
_id=$(echo "${_json}" | jq -r '.id')
if [ "${_id}" = "null" ]; then
>&2 echo "Failed to add ${HOSTAPP_ACCESS_ROLE} access tole to ${APPNAME}"
fi
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L207
# Set it to public
_json=$(${CURL} -XPATCH "https://api.${API_ENV}/${TRANSLATION}/application(${_appID})" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --data '{"is_public": true, "is_stored_at__repository_url": "${{ inputs.device-repo }}"}')
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L166
# Mark is as class "app"
_json=$(${CURL} -XPATCH "https://api.${API_ENV}/${TRANSLATION}/application(${_appID})" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --data '{"is_of__class": "app"}')
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L248
# Mark as host
# NOTE: this -might- be why we used the email auth in the original yocto scripts - does the API key we use have the privileges to do this?
if [ "${BOOTABLE}" = 1 ]; then
_json=$(${CURL} -XPATCH "https://api.${API_ENV}/${TRANSLATION}/application(${_appID})" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --data '{"is_host": true}')
fi
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L86
# Set esr policy
if [ "${ESR}" = true ]; then
_json=$(${CURL} -XPOST "https://api.${API_ENV}/${TRANSLATION}/application_tag" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --data "{\"application\": \"${_appID}\", \"tag_key\": \"release-policy\", \"value\": \"esr\"}")
fi
else
>&2 echo "[${APPNAME}] Application ${_appID} already exists."
fi
echo "${_appID}"
# This is a sanity check to ensure the versions in the yocto build and the contract match
if [ -f "${WORKSPACE}/balena.yml" ]; then
_contract_version=$(awk '/version:/ {print $2}' "${WORKSPACE}/balena.yml")
if [ "${_contract_version}" != "${VERSION}" ]; then
>&2 echo "balena_lib_release: Version mismatch, contract ${_contract_version} os ${VERSION}"
fi
else
>&2 echo "balena_lib_release: balena.yml contract file not present"
fi
if [ "${FINAL}" != true ]; then
status="--draft"
fi
#[ "${VERBOSE}" = "verbose" ] && _debug="--debug"
if [ -n "${_local_image}" ]; then
releaseCommit="$(BALENARC_BALENA_URL="${API_ENV}" balena deploy "${BALENAOS_ACCOUNT}/${APPNAME}" "${_local_image}" --source "${WORKSPACE}" ${status} ${_debug} | sed -n 's/.*Release: //p')"
else
releaseCommit="$(BALENARC_BALENA_URL="${API_ENV}" balena deploy "${BALENAOS_ACCOUNT}/${APPNAME}" --build --source "${WORKSPACE}" ${status} ${_debug} | sed -n 's/.*Release: //p')"
fi
[ -n "${releaseCommit}" ] && >&2 echo "Deployed ${_local_image} to ${BALENAOS_ACCOUNT}/${APPNAME} as ${status##--} at ${releaseCommit}"
echo "${releaseCommit}"
if [ -z "${releaseCommit}" ]; then
echo "[INFO] Failed to deploy to ${BALENAOS_ACCOUNT}/${APPNAME}"
exit 1
fi
# Potentially this should be split into a separate step
### Attaching assets to release ###
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/entry_scripts/balena-deploy-block.sh#L43
# find assets
_assets="$(find "${DEPLOY_PATH}" -name licenses.tar.gz) ${DEPLOY_PATH}/CHANGELOG.md"
# Get hostapp release ID - at the moment we only have the commit hash releaseCommit
_json=$(${CURL} -XGET -H "Content-type: application/json" "https://api.${API_ENV}/${TRANSLATION}/release?\$filter=commit%20eq%20%27${releaseCommit}%27" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
_release_id=$(echo "${_json}" | jq -r '.d[0].id')
echo "${_release_id}"
# For use in esr tagging step
echo "release_id=${_release_id}" >>"${GITHUB_OUTPUT}"
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L1163
# attach each asset to release with _release_id
for _asset in ${_assets}; do
if [ -f "${_asset}" ]; then
_asset_key=$(basename "${_asset}")
# note: this uses the "resin" endpoint rather than v6
_json=$(${CURL} -XPOST "https://api.${API_ENV}/resin/release_asset" -H "Authorization: Bearer ${BALENAOS_TOKEN}" --form "release=${_release_id}" --form "asset_key=${_asset_key}" --form "asset=@${_asset}")
_aid=$(echo "${_json}" | jq -r '.id')
echo "${_aid}"
if [ -n "${_aid}" ]; then
echo "[INFO] Added ${_asset} with ID ${_aid} to release ${releaseCommit}"
else
echo "[ERROR] Failed to add ${_asset} to release ${releaseCommit}"
exit 1
fi
fi
done
- name: Tag ESR release
if: steps.should-deploy.outputs.deploy && steps.esr-check.outputs.is-esr && steps.should-finalize.outputs.finalize
env:
BALENAOS_ACCOUNT: ${{ vars.HOSTAPP_ORG || 'balena_os' }}
SLUG: "${{ steps.balena-lib.outputs.device_slug }}"
APPNAME: "${{ steps.balena-lib.outputs.device_slug }}-esr"
META_BALENA_VERSION: "${{ steps.balena-lib.outputs.meta_balena_version }}"
TRANSLATION: "v6"
CURL: "curl --silent --retry 10 --location --compressed"
VERSION: ${{ steps.balena-lib.outputs.os_version }}
HOSTAPP_RELEASE_ID: ${{ steps.deploy-hostapp.outputs.release_id }}
Q1ESR: "1|01"
Q2ESR: "4|04"
Q3ESR: "7|07"
Q4ESR: "10"
run: |
set -e
## Adapted from https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-lib.inc
_regex="^[1-3][0-9]{3}\.${Q1ESR}|${Q2ESR}|${Q3ESR}|${Q4ESR}\.[0-9]*$"
if ! echo "${VERSION}" | grep -Eq "${_regex}"; then
>&2 echo "Invalid ESR release ${VERSION}"
exit 1
fi
BALENARC_BALENA_URL=${API_ENV} balena tag set version "${VERSION}" --release "${HOSTAPP_RELEASE_ID}"
BALENARC_BALENA_URL=${API_ENV} balena tag set meta-balena-base "${META_BALENA_VERSION}" --release "${HOSTAPP_RELEASE_ID}"
_x_version="${VERSION%.*}.x"
_json=$(${CURL} -XGET "https://api.${API_ENV}/${TRANSLATION}/application_tag?\$select=tag_key,value&\$filter=(application/app_name%20eq%20%27${APPNAME}%27)%20and%20(tag_key%20eq%20%27esr-current%27)" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
last_current=$(echo "${_json}" | jq -r -e '.d[0].value') || true
_json=$(${CURL} -XGET "https://api.${API_ENV}/${TRANSLATION}/application_tag?\$select=tag_key,value&\$filter=(application/app_name%20eq%20%27${APPNAME}%27)%20and%20(tag_key%20eq%20%27esr-sunset%27)" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
last_sunset=$(echo "${_json}" | jq -r -e '.d[0].value') || true
_json=$(${CURL} -XGET "https://api.${API_ENV}/${TRANSLATION}/application_tag?\$select=tag_key,value&\$filter=(application/app_name%20eq%20%27${APPNAME}%27)%20and%20(tag_key%20eq%20%27esr-next%27)" -H "Content-Type: application/json" -H "Authorization: Bearer ${BALENAOS_TOKEN}")
last_next=$(echo "${_json}" | jq -r -e '.d[0].value') || true
if [ "${last_current}" = "null" ]; then
echo "[INFO][${BALENAOS_ACCOUNT}/${APPNAME}] Tagging fleet with esr-current: ${_x_version}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-current "${_x_version}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
elif [ "${last_sunset}" = "null" ]; then
if [ "${last_next}" = "null" ]; then
echo "[INFO][${BALENAOS_ACCOUNT}/${APPNAME}] Tagging fleet with esr-next: ${_x_version}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-next "${_x_version}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
else
# Only re-tag if deploying a new x version
if [ "${_x_version}" != "${last_next}" ]; then
echo "[INFO][${BALENAOS_ACCOUNT}/${APPNAME}] Tagging fleet with esr-next: ${_x_version} esr-current: ${last_next} esr-sunset: ${last_current}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-next "${_x_version}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-current "${last_next}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-sunset "${last_current}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
fi
fi
else
if [ "${last_next}" = "null" ]; then
>&2 echo "Invalid fleet tags: current: ${last_current} next: ${last_next} sunset: ${last_sunset}"
exit 1
else
# Only re-tag if deploying a new x version
if [ "${_x_version}" != "${last_next}" ]; then
echo "[INFO][${BALENAOS_ACCOUNT}/${APPNAME}] Tagging fleet with esr-next: ${_x_version} esr-current: ${last_next} esr-sunset: ${last_current}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-next "${_x_version}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-current "${last_next}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
BALENARC_BALENA_URL=${API_ENV} balena tag set esr-sunset "${last_current}" --fleet "${BALENAOS_ACCOUNT}/${APPNAME}"
fi
fi
fi
# TODO: AMI releases are currently completely broken - pending investigation
##############################
# AMI Deploy
##############################
# - name: Set AMI arch
# id: ami-arch
# if: inputs.deploy-ami == true
# run: |
# if [ "${dt_arch}" = "amd64" ]; then
# echo "string=x86_64" >>"${GITHUB_OUTPUT}"
# elif [ "${dt_arch}" = "aarch64" ]; then
# echo "string=arm64" >>"${GITHUB_OUTPUT}"
# fi
# # AMI name format: balenaOS(-installer?)(-secureboot?)-VERSION-DEVICE_TYPE
# - name: Set AMI name
# id: ami-name
# if: inputs.deploy-ami == true
# run: |
# if [ "${{ inputs.sign-image }}" = "true" ]; then
# echo "string=balenaOS-secureboot-${VERSION}-${MACHINE}" >>"${GITHUB_OUTPUT}"
# else
# echo "string=balenaOS-${VERSION}-${MACHINE}" >>"${GITHUB_OUTPUT}"
# fi
# - name: Pull helper image
# id: ami-helper-image
# if: inputs.deploy-ami == true
# env:
# HELPER_IMAGE_REPO: ghcr.io/balena-os/balena-yocto-scripts
# YOCTO_SCRIPTS_VERSION: ${{ steps.balena-lib.outputs.yocto_scripts_version }}
# YOCTO_SCRIPTS_REF: ${{ steps.balena-lib.outputs.yocto_scripts_ref }}
# HELPER_IMAGE_VARIANT: yocto-build-env
# run: |
# image_tag="${HELPER_IMAGE_REPO}:${YOCTO_SCRIPTS_VERSION}-${HELPER_IMAGE_VARIANT}"
# if ! docker pull "${image_tag}"; then
# image_tag="${HELPER_IMAGE_REPO}:${YOCTO_SCRIPTS_REF}-${HELPER_IMAGE_VARIANT}"
# docker pull "${image_tag}"
# fi
# image_id="$(docker images --format "{{.ID}}" "${image_tag}")"
# echo "id=${image_id}" >>"${GITHUB_OUTPUT}"
# - name: Deploy AMI
# if: inputs.deploy-ami == true
# env:
# AWS_DEFAULT_REGION: "${{ vars.AWS_REGION || 'us-east-1' }}"
# S3_BUCKET: "${{ vars.AWS_S3_BUCKET || vars.S3_BUCKET }}"
# AWS_SESSION_TOKEN: "" # only required if MFA is enabled
# AWS_SUBNET_ID: ${{ vars.AWS_SUBNET || 'subnet-02d18a08ea4058574' }}
# AWS_SECURITY_GROUP_ID: ${{ vars.AWS_SECURITY_GROUP || 'sg-057937f4d89d9d51c' }}
# BALENACLI_TOKEN: ${{ secrets.BALENA_API_DEPLOY_KEY }}
# HOSTOS_VERSION: "${{ steps.balena-lib.outputs.os_version }}"
# AMI_NAME: "${{ steps.ami-name.outputs.string }}"
# AMI_ARCHITECTURE: "${{ steps.ami-arch.outputs.string }}"
# AMI_SECUREBOOT: "${{ inputs.sign-image }}"
# BALENA_PRELOAD_APP: "balena_os/cloud-config-${{ steps.ami-arch.outputs.string }}"
# BALENA_PRELOAD_COMMIT: current
# IMAGE: ${{ runner.temp }}/deploy/image/balena.img
# run: |
# docker run --rm -t \
# --privileged \
# --network host \
# -v "${WORKSPACE}:${WORKSPACE}" \
# -v /var/run/docker.sock:/var/run/docker.sock \
# -e VERBOSE \
# -e AWS_ACCESS_KEY_ID \
# -e AWS_SECRET_ACCESS_KEY \
# -e AWS_DEFAULT_REGION \
# -e AWS_SESSION_TOKEN \
# -e AMI_NAME \
# -e AMI_ARCHITECTURE \
# -e AMI_SECUREBOOT \
# -e S3_BUCKET \
# -e BALENA_PRELOAD_APP \
# -e BALENARC_BALENA_URL \
# -e BALENACLI_TOKEN \
# -e BALENA_PRELOAD_COMMIT \
# -e IMAGE \
# -e MACHINE \
# -e HOSTOS_VERSION \
# -e AWS_SUBNET_ID \
# -e AWS_SECURITY_GROUP_ID \
# -w "${WORKSPACE}" \
# "${{ steps.ami-helper-image.outputs.id }}" /balena-generate-ami.sh
##############################
# Leviathan Test
##############################
test:
name: Test
needs: build
# Specify the runner type in the test_matrix input.
# QEMU workers need ["self-hosted", "X64", "kvm"] or ["self-hosted", "ARM64", "kvm"] runners.
# Testbot workers can use any GitHub hosted (ubuntu-latest) or self-hosted runner.
# Default to self-hosted X64 with KVM for now to align with Jenkins but in the future
# we should consider using GitHub hosted runners for the testbot workers.
runs-on: ${{ matrix.runs_on || fromJSON('["self-hosted", "X64", "kvm"]') }}
environment: ${{ matrix.environment }}
# https://docs.github.com/en/actions/learn-github-actions/expressions#functions
# this expression checks to make sure at least one test suite was provided via either matrix syntax
if: |
github.event_name != 'push' &&
(
join(fromJSON(inputs.test_matrix).test_suite) != '' ||
join(fromJSON(inputs.test_matrix).include.*.test_suite) != ''
)
defaults:
run:
working-directory: .
shell: bash --noprofile --norc -eo pipefail -x {0}
strategy:
fail-fast: false
matrix: ${{ fromJSON(inputs.test_matrix) }}
env:
# Variables provided via the selected GitHub environment
BALENACLOUD_API_URL: ${{ vars.BALENA_HOST || matrix.environment || 'balena-cloud.com' }}
BALENACLOUD_SSH_PORT: ${{ vars.BALENACLOUD_SSH_PORT || '22' }}
BALENACLOUD_SSH_URL: ${{ vars.BALENACLOUD_SSH_URL || 'ssh.balena-devices.com' }}
# Settings specific to this test run.
# Generally provided via inputs.test_matrix but sane defaults are also provided.
DEVICE_TYPE: ${{ needs.build.outputs.device_slug }}
TEST_SUITE: ${{ matrix.test_suite }}
WORKER_TYPE: ${{ matrix.worker_type || 'testbot' }}
BALENACLOUD_APP_NAME: ${{ matrix.worker_fleets || 'balena/testbot-rig,balena/testbot-rig-partners,balena/testbot-rig-x86,balena/testbot-rig-partners-x86' }}
BALENACLOUD_ORG: ${{ matrix.test_org || 'testbot' }}
# Local directories
WORKSPACE: ${{ github.workspace }}/leviathan-workspace
REPORTS: ${{ github.workspace }}/reports
LEVIATHAN_ROOT: ${{ github.workspace }}/layers/meta-balena/tests/leviathan
SUITES: ${{ github.workspace }}/layers/meta-balena/tests/suites
# QEMU settings
QEMU_CPUS: 4
QEMU_MEMORY: "1G"
steps:
# https://github.com/actions/create-github-app-token
# Owner input to make token valid for all repositories in the org
# This behvaiour is required for private submodules
- name: Create GitHub App installation token
uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token
if: vars.FLOWZONE_APP_ID != ''
with:
app-id: ${{ vars.FLOWZONE_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
# Generate another app token for the balena-io organization
# so we can checkout private contracts
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token (balena-io)
uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token-balena-io
if: vars.FLOWZONE_APP_ID != ''
with:
app-id: ${{ vars.FLOWZONE_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
owner: balena-io
# Clone the device respository to fetch Leviathan
# https://github.com/actions/checkout
- name: Clone device repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: ${{ inputs.device-repo }}
token: ${{ steps.app-token.outputs.token || secrets.GITHUB_TOKEN }}
ref: ${{ inputs.device-repo-ref }}
submodules: recursive # We need to set this to recursive as leviathan is a submodule nested inside the meta-balena submodule of the device repo
fetch-depth: 0
fetch-tags: true
# Do not persist the app installation token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# Check if the repository is a yocto device respository
- name: Device repository check
run: |
if [ "$(yq '.type' repo.yml)" != "yocto-based OS image" ]; then
echo "::error::Repository does not appear to be of type 'yocto-based OS image'"
exit 1
fi
# This is useful as it allows us to try out test suite changes not yet merged in meta balena
- name: Update meta-balena submodule to ${{ inputs.meta-balena-ref }}
if: inputs.meta-balena-ref != ''
working-directory: ./layers/meta-balena
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.meta-balena-ref }}"
git submodule update --init --recursive
# Images need to end up in workspace folder and need to have correct names
- name: Fetch artifacts from build job
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4
with:
name: build-artifacts
path: ${{ env.WORKSPACE }}
- name: Decrypt artifacts
if: |
github.event.repository.private != true &&
(inputs.sign-image == true || needs.build.outputs.is_private == 'true')
working-directory: ${{ env.WORKSPACE }}
run: |
for artifact in *.enc **/*.enc; do
openssl enc -v -d -aes-256-cbc -k '${{ secrets.PBDKF2_PASSPHRASE }}' -pbkdf2 -iter 310000 -md sha256 -salt -in "${artifact}" -out "${artifact/.enc/}"
done
- name: Install gzip
run: |
sudo apt update
sudo apt install -y --no-install-recommends gzip
# Check out private contracts if this is a private device type - as these are required for the tests
- name: Checkout private Contracts
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: needs.build.outputs.is_private == 'true'
with:
repository: balena-io/private-contracts
token: ${{ steps.app-token-balena-io.outputs.token }}
path: ${{ env.LEVIATHAN_ROOT }}/core/private-contracts
# Do not persist the token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# Image was uploaded uncompressed and Leviathan test config.js expects the image in a certain place and with a certain name
# The balena.img file is downloaded to ${WORKSPACE}/image/balena.img
# Moving it to where the meta-balena config.js expects
- name: Prepare workspace
run: |
mv "${WORKSPACE}/image/balena.img" "${WORKSPACE}"
gzip "${WORKSPACE}/balena.img"
cp -v "${SUITES}/${TEST_SUITE}/config.js" "${WORKSPACE}/config.js"
mkdir -p "${REPORTS}"
# Two variables are needed for secure boot tests. Check Makefile in Leviathan to trace their usage.
- name: Enable secure boot tests
# Evaluate as truthy
if: matrix.secure_boot
run: |
echo "QEMU_SECUREBOOT=1" >> "${GITHUB_ENV}"
echo "FLASHER_SECUREBOOT=1" >> "${GITHUB_ENV}"
echo "QEMU_MEMORY=4G" >> "${GITHUB_ENV}"
# https://github.com/balena-os/leviathan/blob/master/action.yml
- name: BalenaOS Leviathan Tests
uses: balena-os/leviathan@8234f44f6581cccd1c6635b55426551ff509a661 # v2.31.60
env:
# BALENA_API_TEST_KEY is a secret that should be specific to the runtime environment
# It requires permissions to manage autokit workers, and create test fleets
BALENACLOUD_API_KEY: ${{ secrets.BALENA_API_TEST_KEY }}
# This job always runs and will fail if any of the builds or tests fail.
# This way we can mark this job as required for merging PRs.
# Otherwise we would need to mark each build and test matrix, suite, etc. as required.
all_jobs:
name: All jobs
needs:
- build
- test
runs-on: ubuntu-latest
# The default condition for jobs is success(), which means that this
# job would be skipped if a previous job failed.
# Unfortunately GitHub treats skipped jobs as a pass as far as merge requirements!
# So we override the conditions of this job to always run, and check
# the results of the previous jobs to return overall success or failure.
if: |
always()
defaults:
run:
working-directory: .
shell: bash --noprofile --norc -eo pipefail -x {0}
strategy:
fail-fast: true
matrix:
include:
- machine: ${{ inputs.machine }}
environment: ${{ inputs.deploy-environment }}
steps:
- name: Reject failed jobs
run: |
if [ "${{ contains(needs.*.result, 'failure') }}" = "true" ]
then
echo "One or more jobs have failed"
exit 1
fi
- name: Reject cancelled jobs
run: |
if [ "${{ contains(needs.*.result, 'cancelled') }}" = "true" ]
then
echo "One or more jobs were cancelled"
exit 1
fi