diff --git a/.github/actions/combine-build/action.yml b/.github/actions/combine-build/action.yml index c20e6521f5..3c17fff60b 100644 --- a/.github/actions/combine-build/action.yml +++ b/.github/actions/combine-build/action.yml @@ -54,9 +54,13 @@ runs: username: ${{ inputs.aws_access_key_id }} password: ${{ inputs.aws_secret_access_key }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build The Combine run: > deploy/scripts/build.py + --arch amd64 arm64 --components ${{ inputs.build_component }} --tag ${{ env.IMAGE_TAG }} --repo ${{ inputs.image_registry }}${{ inputs.image_registry_alias}} diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index a9f830e103..31b6cdc24e 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -99,6 +99,9 @@ jobs: docker_build: if: ${{ github.event.type }} == "PullRequest" runs-on: ubuntu-latest + strategy: + matrix: + arch: ["amd64", "arm64"] steps: # See https://docs.stepsecurity.io/harden-runner/getting-started/ for instructions on # configuring harden-runner and identifying allowed endpoints. @@ -112,10 +115,14 @@ jobs: *.data.mcr.microsoft.com:443 api.nuget.org:443 archive.ubuntu.com:80 + auth.docker.io:443 dc.services.visualstudio.com:443 deb.debian.org:80 github.com:443 mcr.microsoft.com:443 + ports.ubuntu.com:80 + production.cloudflare.docker.com:443 + registry-1.docker.io:443 security.ubuntu.com:80 # For subfolders, currently a full checkout is required. # See: https://github.com/marketplace/actions/build-and-push-docker-images#path-context @@ -123,11 +130,9 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 - name: Build backend run: | - deploy/scripts/build.py --components backend - shell: bash - - name: Image digest - run: | - docker image inspect combine_backend:latest -f '{{json .Id}}' + deploy/scripts/build.py --components backend --arch ${{ matrix.arch }} shell: bash diff --git a/.github/workflows/database.yml b/.github/workflows/database.yml index e0f14e3f6b..dc49cc1a45 100644 --- a/.github/workflows/database.yml +++ b/.github/workflows/database.yml @@ -17,6 +17,9 @@ jobs: docker_build: if: ${{ github.event.type }} == "PullRequest" runs-on: ubuntu-latest + strategy: + matrix: + arch: ["amd64", "arm64"] steps: # See https://docs.stepsecurity.io/harden-runner/getting-started/ for instructions on # configuring harden-runner and identifying allowed endpoints. @@ -36,11 +39,9 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 - name: Build database image run: | - deploy/scripts/build.py --components database - shell: bash - - name: Image digest - run: | - docker image inspect combine_database:latest -f '{{json .Id}}' + deploy/scripts/build.py --components database --arch ${{ matrix.arch }} shell: bash diff --git a/.github/workflows/deploy_qa.yml b/.github/workflows/deploy_qa.yml index 0decd4877c..745ef48748 100644 --- a/.github/workflows/deploy_qa.yml +++ b/.github/workflows/deploy_qa.yml @@ -2,7 +2,7 @@ name: "Deploy Update to QA Server" on: push: - branches: [master] + branches: [arm, master] concurrency: cancel-in-progress: true @@ -44,6 +44,7 @@ jobs: files.pythonhosted.org:443 github.com:443 mcr.microsoft.com:443 + ports.ubuntu.com:80 production.cloudflare.docker.com:443 public.ecr.aws:443 pypi.org:443 diff --git a/.github/workflows/deploy_release.yml b/.github/workflows/deploy_release.yml index 511b3d4b6b..56698a6303 100644 --- a/.github/workflows/deploy_release.yml +++ b/.github/workflows/deploy_release.yml @@ -38,6 +38,7 @@ jobs: github.com:443 mcr.microsoft.com:443 production.cloudflare.docker.com:443 + ports.ubuntu.com:80 public.ecr.aws:443 pypi.org:443 registry-1.docker.io:443 diff --git a/.github/workflows/frontend.yml b/.github/workflows/frontend.yml index 0b5f301e71..d0d1a2eb0e 100644 --- a/.github/workflows/frontend.yml +++ b/.github/workflows/frontend.yml @@ -116,6 +116,9 @@ jobs: docker_build: if: ${{ github.event.type }} == "PullRequest" runs-on: ubuntu-latest + strategy: + matrix: + arch: ["amd64", "arm64"] steps: # See https://docs.stepsecurity.io/harden-runner/getting-started/ for instructions on # configuring harden-runner and identifying allowed endpoints. @@ -128,6 +131,7 @@ jobs: auth.docker.io:443 files.pythonhosted.org:443 github.com:443 + ports.ubuntu.com:80 production.cloudflare.docker.com:443 pypi.org:443 registry-1.docker.io:443 @@ -136,11 +140,9 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 - name: Build frontend run: | - deploy/scripts/build.py --components frontend - shell: bash - - name: Image digest - run: | - docker image inspect combine_frontend:latest -f '{{json .Id}}' + deploy/scripts/build.py --components frontend --arch ${{ matrix.arch }} shell: bash diff --git a/.github/workflows/maintenance.yml b/.github/workflows/maintenance.yml index 44caa28cab..27af79303d 100644 --- a/.github/workflows/maintenance.yml +++ b/.github/workflows/maintenance.yml @@ -17,6 +17,9 @@ jobs: docker_build: if: ${{ github.event.type }} == "PullRequest" runs-on: ubuntu-latest + strategy: + matrix: + arch: ["amd64", "arm64"] steps: # See https://docs.stepsecurity.io/harden-runner/getting-started/ for instructions on # configuring harden-runner and identifying allowed endpoints. @@ -31,6 +34,7 @@ jobs: auth.docker.io:443 files.pythonhosted.org:443 github.com:443 + ports.ubuntu.com:80 production.cloudflare.docker.com:443 public.ecr.aws:443 pypi.org:443 @@ -42,11 +46,9 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 - name: Build maintenance image run: | - deploy/scripts/build.py --components maintenance - shell: bash - - name: Image digest - run: | - docker image inspect combine_maint:latest -f '{{json .Id}}' + deploy/scripts/build.py --components maintenance --arch ${{ matrix.arch }} shell: bash diff --git a/Backend/Dockerfile b/Backend/Dockerfile index 6135ad3cc0..4a46843e29 100644 --- a/Backend/Dockerfile +++ b/Backend/Dockerfile @@ -7,7 +7,7 @@ ############################################################ # Docker multi-stage build -FROM mcr.microsoft.com/dotnet/sdk:8.0.409-jammy AS builder +FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0.409-jammy AS builder WORKDIR /app # Copy csproj and restore (fetch dependencies) as distinct layers. @@ -46,7 +46,7 @@ RUN usermod --uid 999 --gid app \ --comment "Docker image user" \ app -## Set up application install directory. +# Set up application install directory. RUN mkdir $APP_HOME && \ mkdir $APP_FILES && \ # Give access to the entire home folder so the backend can create files and folders there. diff --git a/deploy/Dockerfile b/deploy/Dockerfile index 9872c1b1ca..188fde6793 100644 --- a/deploy/Dockerfile +++ b/deploy/Dockerfile @@ -3,6 +3,7 @@ # # Supported Platforms: # - Intel/AMD 64-bit +# - ARM 64-bit ############################################################ FROM python:3.12.10-slim-bookworm @@ -16,7 +17,7 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* # Install kubectl and helm -RUN MACH=amd64 && \ +RUN MACH=$(case $(uname -m) in *86*) echo amd64;; *aarch*) echo arm64;; *arm*) echo arm64;; esac) && \ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${MACH}/kubectl" && \ install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl && \ curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && \ diff --git a/deploy/scripts/build.py b/deploy/scripts/build.py index b090535de0..e8630cfe67 100755 --- a/deploy/scripts/build.py +++ b/deploy/scripts/build.py @@ -206,6 +206,13 @@ def parse_args() -> Namespace: description="Build containerd container images for project.", formatter_class=RawFormatter, ) + parser.add_argument( + "--arch", + choices=["amd64", "arm64"], + default=[], + help="Target cpu architecture(s).", + nargs="*", + ) parser.add_argument( "--build-args", nargs="*", help="Build arguments to pass to the docker build." ) @@ -270,10 +277,12 @@ def main() -> None: if args.debug: container_cmd.extend(["-D", "-l", "debug"]) build_cmd = container_cmd + ["buildx", "build"] - push_cmd = container_cmd + ["push"] + build_cmd.append("--load" if args.repo is None else "--push") case _: logging.critical(f"Container CLI '{container_cmd[0]}' is not supported.") sys.exit(1) + if len(args.arch): + build_cmd.extend(["--platform", ",".join([f"linux/{arch}" for arch in args.arch])]) # Setup build options if args.quiet: @@ -306,7 +315,7 @@ def main() -> None: job_set[component] = JobQueue(component, debug=args.debug) logging.debug(f"Adding job {build_cmd + job_opts}") job_set[component].add_job(Job(build_cmd + job_opts, spec.dir)) - if args.repo is not None: + if args.repo is not None and container_cmd[0] == "nerdctl": logging.debug(f"Adding job {push_cmd + [image_name]}") job_set[component].add_job(Job(push_cmd + [image_name], None)) logging.info(f"Building component {component}") diff --git a/deploy/scripts/install-combine.sh b/deploy/scripts/install-combine.sh index 89cfdac9fd..5aac5a7bb3 100755 --- a/deploy/scripts/install-combine.sh +++ b/deploy/scripts/install-combine.sh @@ -83,7 +83,10 @@ install-kubernetes () { if [ -d "${DEPLOY_DIR}/airgap-images" ] ; then EXTRA_VARS="${EXTRA_VARS} -e install_airgap_images=true" fi - + if [ $ARM == 1 ] ; then + EXTRA_VARS="${EXTRA_VARS} -e cpu_arch=arm64" + fi + ansible-playbook playbook_desktop_setup.yml -K ${EXTRA_VARS} $(((DEBUG == 1)) && echo "-vv") } @@ -143,6 +146,7 @@ install-the-combine () { ./setup_combine.py \ $(((DEBUG == 1)) && echo "--debug") \ --repo public.ecr.aws/thecombine \ + $(((ARM == 1)) && echo "--set global.cpuArch=arm64" ) \ --tag ${COMBINE_VERSION} \ --target desktop \ ${SETUP_OPTS} @@ -200,6 +204,7 @@ CONFIG_DIR=${HOME}/.config/combine mkdir -p ${CONFIG_DIR} SINGLE_STEP=0 IS_SERVER=0 +ARM=0 DEBUG=0 # See if we need to continue from a previous install @@ -214,6 +219,9 @@ fi while (( "$#" )) ; do OPT=$1 case $OPT in + arm) + ARM=1 + ;; clean) next-state "Pre-reqs" if [ -f ${CONFIG_DIR}/env ] ; then diff --git a/deploy/scripts/package_images.py b/deploy/scripts/package_images.py index 47645e2775..8aba2cf8a2 100755 --- a/deploy/scripts/package_images.py +++ b/deploy/scripts/package_images.py @@ -7,6 +7,8 @@ helm templates for the middleware used by The Combine and for The Combine itself. The image names are extracted from the templates and then pulled from the repo and stored in ../images as compressed tarballs; zstd compression is used. + +By default, packs images for amd64; use --arch for a different architecture. """ import argparse @@ -39,6 +41,12 @@ def parse_args() -> argparse.Namespace: ) parser.add_argument("output_dir", help="Directory for the collected image files.") # Add Optional arguments + parser.add_argument( + "--arch", + choices=["amd64", "arm64"], + default="amd64", + help="Target cpu architecture.", + ) parser.add_argument( "--config", "-c", @@ -59,26 +67,30 @@ def parse_args() -> argparse.Namespace: return parser.parse_args() -def package_k3s(dest_dir: Path, *, debug: bool = False) -> None: +def package_k3s(dest_dir: Path, *, arch: str = "arm64", debug: bool = False) -> None: logging.info("Packaging k3s images.") ansible_cmd = [ "ansible-playbook", "playbook_k3s_airgapped_files.yml", "--extra-vars", f"package_dir={dest_dir}", + "--extra-vars", + f"cpu_arch={arch}", ] if debug: ansible_cmd.append("-vv") run_cmd(ansible_cmd, cwd=str(ansible_dir), print_cmd=debug, print_output=debug) -def package_images(image_list: List[str], tar_file: Path, *, debug: bool = False) -> None: +def package_images( + image_list: List[str], tar_file: Path, *, arch: str = "amd64", debug: bool = False +) -> None: container_cli_cmd = [os.getenv("CONTAINER_CLI", "docker")] if container_cli_cmd[0] == "nerdctl": container_cli_cmd.extend(["--namespace", "k8s.io"]) # Pull each image - pull_cmd = container_cli_cmd + ["pull"] + pull_cmd = container_cli_cmd + ["pull", f"--platform=linux/{arch}"] for image in image_list: run_cmd(pull_cmd + [image], print_cmd=debug, print_output=debug) @@ -94,7 +106,13 @@ def package_images(image_list: List[str], tar_file: Path, *, debug: bool = False def package_middleware( - config_file: str, *, cluster_type: str, image_dir: Path, chart_dir: Path, debug: bool = False + config_file: str, + *, + cluster_type: str, + image_dir: Path, + chart_dir: Path, + arch: str = "amd64", + debug: bool = False, ) -> None: logging.info("Packaging middleware images.") @@ -149,11 +167,13 @@ def package_middleware( middleware_images.append(match.group(1)) logging.debug(f"Middleware images: {middleware_images}") - out_path = image_dir / "middleware-airgap-images-amd64.tar" - package_images(middleware_images, out_path, debug=debug) + out_path = image_dir / f"middleware-airgap-images-{arch}.tar" + package_images(middleware_images, out_path, arch=arch, debug=debug) -def package_thecombine(tag: str, image_dir: Path, *, debug: bool = False) -> None: +def package_thecombine( + tag: str, image_dir: Path, *, arch: str = "amd64", debug: bool = False +) -> None: logging.info(f"Packaging The Combine version {tag}.") logging.debug("Create helm charts from templates") combine_charts.generate(tag) @@ -185,8 +205,8 @@ def package_thecombine(tag: str, image_dir: Path, *, debug: bool = False) -> Non logging.debug(f"Combine images: {combine_images}") # Logout of AWS to allow pulling the images - out_path = image_dir / "combine-airgap-images-amd64.tar" - package_images(combine_images, out_path, debug=debug) + out_path = image_dir / f"combine-airgap-images-{arch}.tar" + package_images(combine_images, out_path, arch=arch, debug=debug) def main() -> None: @@ -207,15 +227,16 @@ def main() -> None: os.environ["AWS_DEFAULT_REGION"] = "" # Update helm repos - package_k3s(image_dir, debug=args.debug) + package_k3s(image_dir, arch=args.arch, debug=args.debug) package_middleware( args.config, cluster_type="standard", image_dir=image_dir, chart_dir=chart_dir, + arch=args.arch, debug=args.debug, ) - package_thecombine(args.tag, image_dir, debug=args.debug) + package_thecombine(args.tag, image_dir, arch=args.arch, debug=args.debug) if __name__ == "__main__": diff --git a/installer/make-combine-installer.sh b/installer/make-combine-installer.sh index a86523f036..1fbc35ad9a 100755 --- a/installer/make-combine-installer.sh +++ b/installer/make-combine-installer.sh @@ -16,12 +16,16 @@ error () { # cd to the directory where the script is installed SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +ARM=0 DEBUG=0 NET_INSTALL=0 # Parse arguments to customize installation while (( "$#" )) ; do OPT=$1 case $OPT in + --arm) + ARM=1 + ;; --debug) DEBUG=1 ;; @@ -64,7 +68,7 @@ if [[ $NET_INSTALL == 0 ]] ; then info "Packaging low-bandwidth installer for $COMBINE_VERSION." TEMP_DIR=/tmp/images-$$ pushd scripts - ./package_images.py ${COMBINE_VERSION} ${TEMP_DIR} $((( DEBUG == 1 )) && echo "--debug") + ./package_images.py ${COMBINE_VERSION} ${TEMP_DIR} $((( ARM == 1 )) && echo "--arch arm64") $((( DEBUG == 1 )) && echo "--debug") INSTALLER_NAME="combine-installer.run" popd else @@ -82,7 +86,7 @@ for DIR in venv scripts/__pycache__ ; do done cd ${SCRIPT_DIR} -makeself $((( DEBUG == 0)) && echo "--tar-quietly" ) ../deploy ${INSTALLER_NAME} "Combine Installer" scripts/install-combine.sh ${COMBINE_VERSION} +makeself $((( DEBUG == 0)) && echo "--tar-quietly" ) ../deploy ${INSTALLER_NAME} "Combine Installer" scripts/install-combine.sh ${COMBINE_VERSION} $((( ARM == 1 )) && echo "arm") if [[ $NET_INSTALL == 0 ]] ; then makeself $((( DEBUG == 0)) && echo "--tar-quietly" ) --append ${TEMP_DIR} ${INSTALLER_NAME} rm -rf ${TEMP_DIR}