diff --git a/.github/workflows/development.yml b/.github/workflows/development.yml index 925ed42..7e8cc3d 100644 --- a/.github/workflows/development.yml +++ b/.github/workflows/development.yml @@ -3,22 +3,19 @@ name: development on: pull_request: push: - branches: + branches: - '!main' # excludes main - '!master' # excludes master env: TARGET_PLATFORMS: linux/amd64,linux/arm64 + COMPOSE_DOCKER_CLI_BUILD: '1' + DOCKER_BUILDKIT: '1' jobs: build_images: name: "Create runtime and buildroot OCI Images" - services: - registry: - image: registry:3 - ports: - - 5000:5000 strategy: fail-fast: true matrix: @@ -34,6 +31,7 @@ jobs: - '3.20' - '3.21' - '3.22' + - '3.23' os: - 'ubuntu-latest' exclude: @@ -42,8 +40,16 @@ jobs: alpine: '3.21' - python: '3.8' alpine: '3.22' + - python: '3.8' + alpine: '3.23' + + - python: '3.9' + alpine: '3.23' + - python: '3.14' alpine: '3.20' + - python: '3.15' + alpine: '3.20' runs-on: ${{ matrix.os }} steps: @@ -51,9 +57,49 @@ jobs: name: Checkout uses: actions/checkout@v4 - + id: setup name: Setup run: | + DEBUG_OUTPUT=/dev/null + if [ x"${DEBUG_DOCKERD:-}" = x1 ]; then + DEBUG_OUTPUT="$GITHUB_STEP_SUMMARY" + fi + + setup_registry () { + >/dev/null docker pull -q docker.io/library/registry:3 + registry_id=$(docker run \ + -p5000:5000 \ + --name registry \ + --rm -d registry:3) + echo 'REGISTRY_CONTAINER_ID='$registry_id + } + setup_containerd_snapshotter () { + local _sudo=sudo + if [ $(id -u) -eq 0 ]; then + _sudo=sudo + fi + if ! [ -f /etc/docker/daemon.json ]; then + eval $_sudo mkdir -p /etc/docker + echo '{}' | >/dev/null $_sudo tee /etc/docker/daemon.json + fi + cat /etc/docker/daemon.json | jq '. | .+{"features": {"containerd-snapshotter": true}}' | >/dev/null eval $_sudo tee /etc/docker/daemon.json + printf '# Docker config\n\n```\n' + cat /etc/docker/daemon.json + printf '\n```\n\n' + eval $_sudo systemctl restart docker + sleep 1 + printf '# Docker Info\n\n```' + docker info + printf '\n```\n' + } + sudo apt-get -y install skopeo + setup_containerd_snapshotter | tee -a "$DEBUG_OUTPUT" + { + printf '## Registry info:\n\n' + setup_registry | tee -a "$GITHUB_OUTPUT" + } | tee -a "$DEBUG_OUTPUT" + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -61,7 +107,10 @@ jobs: name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 with: + platforms: ${{ env.TARGET_PLATFORMS }} driver-opts: network=host + buildkitd-flags: '--allow-insecure-entitlement security.insecure --allow-insecure-entitlement network.host' + - id: image_env run: | @@ -71,30 +120,36 @@ jobs: '${{ github.repository_owner }}' \ 'localhost:5000' - docker pull "${SOURCE_IMAGE}" || true + docker pull -q "${SOURCE_IMAGE}" || true echo "IMAGE_HOME=$(mktemp -d)" >> "$GITHUB_OUTPUT" - echo ALPINE_VERSION="${ALPINE_VERSION}" >> "$GITHUB_OUTPUT" - echo PYTHON_VERSION="${PYTHON_VERSION}" >> "$GITHUB_OUTPUT" - echo SOURCE_IMAGE="${SOURCE_IMAGE}" >> "$GITHUB_OUTPUT" - echo IMAGE_TAG="${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - echo IMAGE_BUILDROOT_TAG="${IMAGE_TAG}-buildroot" >> "$GITHUB_OUTPUT" - echo IMAGE_TAG_SAFE="$(echo "$IMAGE_TAG" | base64 -w 0 )" >> "$GITHUB_OUTPUT" - echo IMAGE_BUILDROOT_TAG_SAFE="$(echo "${IMAGE_TAG}-buildroot" | base64 -w 0 )" >> "$GITHUB_OUTPUT" - echo REPOSITORY="${REPOSITORY}" >> "$GITHUB_OUTPUT" - echo REPOSITORY_SAFE="$(echo "${REPOSITORY}" | base64 -w 0 )" >> "$GITHUB_OUTPUT" - echo BASE_IMAGE_DIGEST="$(digest_of "$SOURCE_IMAGE")" >> "$GITHUB_OUTPUT" - echo 'IMAGE_DESCRIPTION=${{ github.event.repository.description }}. See ${{ github.server_url }}/${{ github.repository }} for more info.' >> "$GITHUB_OUTPUT" + echo ALPINE_VERSION="${ALPINE_VERSION}"| tee -a "$GITHUB_OUTPUT" + echo PYTHON_VERSION="${PYTHON_VERSION}"| tee -a "$GITHUB_OUTPUT" + echo SOURCE_IMAGE="${SOURCE_IMAGE}"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_TAG="${IMAGE_TAG}"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_BUILDROOT_TAG="${IMAGE_TAG}-buildroot"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_TAG_SAFE="$(echo "$IMAGE_TAG" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_BUILDROOT_TAG_SAFE="$(echo "${IMAGE_TAG}-buildroot" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo 'EXAMPLE_IMAGE_TAG='"${IMAGE_TAG}-example1" | tee -a "$GITHUB_OUTPUT" + echo EXAMPLE_IMAGE_TAG_SAFE="$(echo "${IMAGE_TAG}-example1" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo REPOSITORY="${REPOSITORY}"| tee -a "$GITHUB_OUTPUT" + echo REPOSITORY_SAFE="$(echo "${REPOSITORY}" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo BASE_IMAGE_DIGEST="$(digest_of "$SOURCE_IMAGE")"| tee -a "$GITHUB_OUTPUT" + echo 'IMAGE_DESCRIPTION=${{ github.event.repository.description }}. See ${{ github.server_url }}/${{ github.repository }} for more info.' | tee -a "$GITHUB_OUTPUT" - name: Create Buildroot uses: docker/build-push-action@v6 id: buildroot + env: + DOCKER_BUILD_SUMMARY: false + DOCKER_BUILD_RECORD_UPLOAD: false with: + allow: network.host,security.insecure # this is for the bind mount push: true platforms: ${{ env.TARGET_PLATFORMS }} context: "." - file: Dockerfile.alpine + file: buildroot/Dockerfile.alpine sbom: true provenance: mode=max target: buildroot @@ -111,7 +166,6 @@ jobs: SOURCE_IMAGE=${{ steps.image_env.outputs.SOURCE_IMAGE }} BUILD_ROOT=/d tags: "${{ steps.image_env.outputs.IMAGE_TAG }}-buildroot" - outputs: type=oci,dest=${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG_SAFE }}.tar - name: Create Image @@ -119,11 +173,13 @@ jobs: uses: docker/build-push-action@v6 env: SOURCE_DATE_EPOCH: 0 + DOCKER_BUILD_SUMMARY: false + DOCKER_BUILD_RECORD_UPLOAD: false with: push: true context: "." platforms: ${{ env.TARGET_PLATFORMS }} - file: Dockerfile.alpine + file: runtime/Dockerfile.alpine cache-to: | type=gha,mode=max cache-from: | @@ -136,9 +192,9 @@ jobs: BASE_IMAGE_DIGEST=${{ steps.image_env.outputs.BASE_IMAGE_DIGEST }} PYTHON_VERSION=${{ steps.image_env.outputs.PYTHON_VERSION }} SOURCE_IMAGE=${{ steps.image_env.outputs.SOURCE_IMAGE }} + BUILDROOT_IMAGE=${{ steps.image_env.outputs.IMAGE_TAG }}-buildroot@${{ steps.buildroot.outputs.digest }} BUILD_ROOT=/d tags: "${{ steps.image_env.outputs.IMAGE_TAG }}" - outputs: type=oci,dest=${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar labels: ${{steps.image_env.outputs.IMAGE_LABELS}} sbom: true provenance: mode=max @@ -155,8 +211,13 @@ jobs: - name: examples/simple-flask + id: build_test uses: docker/build-push-action@v6 + env: + DOCKER_BUILD_SUMMARY: false + DOCKER_BUILD_RECORD_UPLOAD: false with: + push: true context: "examples/simple-flask" platforms: ${{ env.TARGET_PLATFORMS }} cache-from: | @@ -166,11 +227,120 @@ jobs: type=registry,ref=${{ steps.image_env.outputs.SOURCE_IMAGE }}@${{ steps.image_env.outputs.BASE_IMAGE_DIGEST }} build-args: | SOURCE_IMAGE=${{ steps.image_env.outputs.IMAGE_TAG }} - tags: "${{ steps.image_env.outputs.IMAGE_TAG }}-example1-amd64" - outputs: type=oci,dest=${{ steps.image_env.outputs.IMAGE_HOME }}/example1-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar + tags: "${{ steps.image_env.outputs.IMAGE_TAG }}-example1" + - + id: test + name: Test Example + env: + IMAGE_URI: '${{ steps.image_env.outputs.EXAMPLE_IMAGE_TAG }}' + IMAGE_DIGEST: '${{ steps.build_test.outputs.digest }}' + test_command: curl -sLv 'http://localhost:8080/' + run: | + set -o pipefail + + run_test () { + local rv=0 + local T="$(mktemp)" + trap 'rm -f '"$T" RETURN + >"$T" 2>&1 eval "$@" || rv=$? + if [ $rv -eq 0 ] && case "$(tail -1 $T)" in '

Hello, World!

') false ;; *) true ;; esac ; then + >&2 echo 'failed! got "'"$(tail -1 $T)"'" !' + rv=1 + fi + printf '`%s` (returned code: ' "$@" + printf '%s)\n```\n' "$rv" + cat "$T" + printf '\n```\n\n' + return "$rv" + } + + rc=0 + REMOTE_DIGEST="$(skopeo inspect --tls-verify=false --raw docker://"$IMAGE_URI" | skopeo manifest-digest /dev/stdin)" + if [ "$REMOTE_DIGEST" != "${IMAGE_DIGEST}" ]; then + echo 'generated a diferent manifest than '"${IMAGE_DIGEST}"': '"$REMOTE_DIGEST" + exit 2 + fi + docker pull -q "$(echo "$IMAGE_URI" | rev | cut -d: -f2- | rev )@${IMAGE_DIGEST}" + for platform in $(echo $TARGET_PLATFORMS | tr , '\n') + do + docker pull --platform $platform -q "$IMAGE_URI" + >&2 printf 'pulled %s (%s)\n' "$IMAGE_TAG" "$platform" + done + docker pull -q "$IMAGE_URI" + { + printf '# Test\n\n' + printf '## Source Images\n\n```' + docker images --tree $IMAGE_URI | sed -r "s/\x1B\[([0-9]{1,3}(;[0-9]{1,2};?)?)?[mGK]//g" + printf '\n```\n' + } | tee -a "$GITHUB_STEP_SUMMARY" + + for platform in $(echo $TARGET_PLATFORMS | tr , '\n') + do + rc=0 + container_id="$(docker run \ + --platform $platform \ + --detach -p 8080:8080 \ + --rm $IMAGE_URI)" || rc=$? + trap 'docker stop $container_id' EXIT + if [ $rc -ne 0 ]; then + printf 'docker run failed (return code '$rc')!\n' | >&2 tee \ + -a "$GITHUB_OUTPUT" + exit $rc + fi + start_ts=$(date +%s) + until [ x$(curl \ + --silent --output /dev/null \ + -w '%{http_code}\n' --fail 'http://localhost:8080/_health') = x200 ] + do + sleep 1 + if [ "$(expr "${start_ts}" \+ 30)" -lt "$(date +%s)" ]; then + >&2 echo 'failed to get a good response in 30 seconds!' + break + fi + done + + rc=0 + printf '## Test (%s)\n\n' "$(curl 'http://localhost:8080/_arch')" | tee -a "$GITHUB_STEP_SUMMARY" + (run_test "$test_command" | tee -a "$GITHUB_STEP_SUMMARY") || rc=$? + if [ $rc -ne 0 ]; then + exit $rc + fi + trap -- EXIT + docker stop $container_id + done + + - id: post_build + name: 'download multiplatform images' run: | + skopeo copy \ + --src-no-creds \ + --src-tls-verify=false \ + --quiet \ + --multi-arch all \ + docker://$(echo '${{ steps.image_env.outputs.IMAGE_TAG }}' | rev | cut -d: -f2- | rev)@${{ steps.runtime.outputs.digest }} \ + oci-archive://${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar \ + & + skopeo copy \ + --src-no-creds \ + --src-tls-verify=false \ + --quiet \ + --multi-arch all \ + docker://$(echo '${{ steps.image_env.outputs.IMAGE_TAG }}-buildroot' | rev | cut -d: -f2- | rev)@${{steps.buildroot.outputs.digest}} \ + oci-archive://${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG_SAFE }}.tar \ + & + skopeo copy \ + --src-no-creds \ + --src-tls-verify=false \ + --quiet \ + --multi-arch all \ + docker://$(echo '${{ steps.image_env.outputs.EXAMPLE_IMAGE_TAG }}' | rev | cut -d: -f2- | rev)@${{steps.build_test.outputs.digest}} \ + oci-archive://${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.EXAMPLE_IMAGE_TAG_SAFE }}.tar \ + & + wait + docker stop '${{ steps.setup.outputs.REGISTRY_CONTAINER_ID }}' + IMAGE_DIGEST='${{steps.runtime.outputs.digest}}' IMAGE_BUILDROOT_DIGEST='${{steps.buildroot.outputs.digest}}' echo 'IMAGE_DIGEST='$IMAGE_DIGEST >> "GITHUB_OUTPUT" @@ -178,7 +348,9 @@ jobs: echo '${{ steps.image_env.outputs.IMAGE_TAG }}@'"$IMAGE_DIGEST" | tee -a ${{ steps.image_env.outputs.IMAGE_HOME }}/manifest.txt echo '${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG }}@'"$IMAGE_BUILDROOT_DIGEST" | tee -a ${{ steps.image_env.outputs.IMAGE_HOME }}/manifest.txt ls ${{ steps.image_env.outputs.IMAGE_HOME }} - - name: upload build + + - + name: upload build uses: actions/upload-artifact@v4 with: if-no-files-found: error @@ -188,7 +360,8 @@ jobs: ${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar ${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG_SAFE }}.tar - - name: upload build info + - + name: upload build info uses: actions/upload-artifact@v4 with: if-no-files-found: error @@ -197,7 +370,8 @@ jobs: path: | ${{ steps.image_env.outputs.IMAGE_HOME }}/*.txt - render-dockerhub-desc: + + post-build: name: "Render metadata" needs: [build_images] runs-on: "ubuntu-latest" @@ -207,12 +381,13 @@ jobs: uses: actions/checkout@v4 - uses: actions/setup-python@v6 with: - python-version: '3.12' + python-version: '3.12' + cache: 'pip' - name: install dependencies run: | python -m pip install jinplate - - + - name: fetch metadata uses: actions/download-artifact@v5 with: @@ -237,7 +412,7 @@ jobs: printf '```\n' } >>"$GITHUB_STEP_SUMMARY" - - + - name: fetch images uses: actions/download-artifact@v5 with: @@ -269,13 +444,12 @@ jobs: cat ./values.json >./README.rst python \ -m jinplate.cli \ - README.rst.tmpl file://$PWD/values.json + docs/README.rst.tmpl file://$PWD/values.json - name: Convert README.rst to markdown uses: docker://pandoc/core:3.8 with: args: >- - -s --wrap=none -t gfm -o README.md @@ -284,3 +458,6 @@ jobs: name: Print out markdown run: | cat README.md >>"$GITHUB_STEP_SUMMARY" + printf '\n\n# README.rst\n\n```\n' >> "$GITHUB_STEP_SUMMARY" + cat README.rst >> "$GITHUB_STEP_SUMMARY" + printf '\n```\n' >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f75ea04..fce4ad6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -6,7 +6,7 @@ on: # https://crontab.guru/once-a-week - cron: "0 0 * * 0" push: - branches: + branches: - 'main' @@ -16,11 +16,6 @@ env: jobs: build_images: name: "Create runtime and buildroot OCI Images" - services: - registry: - image: registry:3 - ports: - - 5000:5000 strategy: fail-fast: true matrix: @@ -36,6 +31,7 @@ jobs: - '3.20' - '3.21' - '3.22' + - '3.23' os: - 'ubuntu-latest' exclude: @@ -44,8 +40,16 @@ jobs: alpine: '3.21' - python: '3.8' alpine: '3.22' + - python: '3.8' + alpine: '3.23' + + - python: '3.9' + alpine: '3.23' + - python: '3.14' alpine: '3.20' + - python: '3.15' + alpine: '3.20' runs-on: ${{ matrix.os }} steps: @@ -53,9 +57,47 @@ jobs: name: Checkout uses: actions/checkout@v4 - + id: setup name: Setup run: | + DEBUG_OUTPUT=/dev/null + if [ x"${DEBUG_DOCKERD:-}" = x1 ]; then + DEBUG_OUTPUT="$GITHUB_STEP_SUMMARY" + fi + + setup_registry () { + registry_id=$(docker run \ + -p5000:5000 \ + --name registry \ + --rm -d registry:3) + echo 'REGISTRY_CONTAINER_ID='$registry_id + } + setup_containerd_snapshotter () { + local _sudo=sudo + if [ $(id -u) -eq 0 ]; then + _sudo=sudo + fi + if ! [ -f /etc/docker/daemon.json ]; then + eval $_sudo mkdir -p /etc/docker + echo '{}' | >/dev/null $_sudo tee /etc/docker/daemon.json + fi + cat /etc/docker/daemon.json | jq '. | .+{"features": {"containerd-snapshotter": true}}' | >/dev/null eval $_sudo tee /etc/docker/daemon.json + printf '# Docker config\n\n```\n' + cat /etc/docker/daemon.json + printf '\n```\n\n' + eval $_sudo systemctl restart docker + sleep 1 + printf '# Docker Info\n\n```' + docker info + printf '\n```\n' + } + sudo apt-get -y install skopeo + setup_containerd_snapshotter | tee -a "$DEBUG_OUTPUT" + { + printf '## Registry info:\n\n' + setup_registry | tee -a "$GITHUB_OUTPUT" + } | tee -a "$DEBUG_OUTPUT " - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -64,6 +106,7 @@ jobs: uses: docker/setup-buildx-action@v3 with: driver-opts: network=host + - id: image_env run: | @@ -73,25 +116,30 @@ jobs: '${{ github.repository_owner }}' \ 'localhost:5000' - docker pull "${SOURCE_IMAGE}" || true + docker pull -q "${SOURCE_IMAGE}" || true echo "IMAGE_HOME=$(mktemp -d)" >> "$GITHUB_OUTPUT" - echo ALPINE_VERSION="${ALPINE_VERSION}" >> "$GITHUB_OUTPUT" - echo PYTHON_VERSION="${PYTHON_VERSION}" >> "$GITHUB_OUTPUT" - echo SOURCE_IMAGE="${SOURCE_IMAGE}" >> "$GITHUB_OUTPUT" - echo IMAGE_TAG="${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - echo IMAGE_BUILDROOT_TAG="${IMAGE_TAG}-buildroot" >> "$GITHUB_OUTPUT" - echo IMAGE_TAG_SAFE="$(echo "$IMAGE_TAG" | base64 -w 0 )" >> "$GITHUB_OUTPUT" - echo IMAGE_BUILDROOT_TAG_SAFE="$(echo "${IMAGE_TAG}-buildroot" | base64 -w 0 )" >> "$GITHUB_OUTPUT" - echo REPOSITORY="${REPOSITORY}" >> "$GITHUB_OUTPUT" - echo REPOSITORY_SAFE="$(echo "${REPOSITORY}" | base64 -w 0 )" >> "$GITHUB_OUTPUT" - echo BASE_IMAGE_DIGEST="$(digest_of "$SOURCE_IMAGE")" >> "$GITHUB_OUTPUT" - echo 'IMAGE_DESCRIPTION=${{ github.event.repository.description }}. See ${{ github.server_url }}/${{ github.repository }} for more info.' >> "$GITHUB_OUTPUT" + echo ALPINE_VERSION="${ALPINE_VERSION}"| tee -a "$GITHUB_OUTPUT" + echo PYTHON_VERSION="${PYTHON_VERSION}"| tee -a "$GITHUB_OUTPUT" + echo SOURCE_IMAGE="${SOURCE_IMAGE}"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_TAG="${IMAGE_TAG}"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_BUILDROOT_TAG="${IMAGE_TAG}-buildroot"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_TAG_SAFE="$(echo "$IMAGE_TAG" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo IMAGE_BUILDROOT_TAG_SAFE="$(echo "${IMAGE_TAG}-buildroot" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo 'EXAMPLE_IMAGE_TAG='"${IMAGE_TAG}-example1" | tee -a "$GITHUB_OUTPUT" + echo EXAMPLE_IMAGE_TAG_SAFE="$(echo "${EXAMPLE_IMAGE_TAG}" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo REPOSITORY="${REPOSITORY}"| tee -a "$GITHUB_OUTPUT" + echo REPOSITORY_SAFE="$(echo "${REPOSITORY}" | base64 -w 0 )"| tee -a "$GITHUB_OUTPUT" + echo BASE_IMAGE_DIGEST="$(digest_of "$SOURCE_IMAGE")"| tee -a "$GITHUB_OUTPUT" + echo 'IMAGE_DESCRIPTION=${{ github.event.repository.description }}. See ${{ github.server_url }}/${{ github.repository }} for more info.' | tee -a "$GITHUB_OUTPUT" - name: Create Buildroot uses: docker/build-push-action@v6 id: buildroot + env: + DOCKER_BUILD_SUMMARY: false + DOCKER_BUILD_RECORD_UPLOAD: false with: push: true platforms: ${{ env.TARGET_PLATFORMS }} @@ -121,6 +169,8 @@ jobs: uses: docker/build-push-action@v6 env: SOURCE_DATE_EPOCH: 0 + DOCKER_BUILD_SUMMARY: false + DOCKER_BUILD_RECORD_UPLOAD: false with: push: true context: "." @@ -158,6 +208,9 @@ jobs: - name: examples/simple-flask uses: docker/build-push-action@v6 + env: + DOCKER_BUILD_SUMMARY: false + DOCKER_BUILD_RECORD_UPLOAD: false with: context: "examples/simple-flask" platforms: ${{ env.TARGET_PLATFORMS }} @@ -170,9 +223,118 @@ jobs: SOURCE_IMAGE=${{ steps.image_env.outputs.IMAGE_TAG }} tags: "${{ steps.image_env.outputs.IMAGE_TAG }}-example1-amd64" outputs: type=oci,dest=${{ steps.image_env.outputs.IMAGE_HOME }}/example1-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar + - + id: test + name: Test Example + env: + IMAGE_URI: '${{ steps.image_env.outputs.EXAMPLE_IMAGE_TAG }}' + IMAGE_DIGEST: '${{ steps.build_test.outputs.digest }}' + test_command: curl -sLv 'http://localhost:8080/' + run: | + set -o pipefail + + run_test () { + local rv=0 + local T="$(mktemp)" + trap 'rm -f '"$T" RETURN + >"$T" 2>&1 eval "$@" || rv=$? + if [ $rv -eq 0 ] && case "$(tail -1 $T)" in '

Hello, World!

') false ;; *) true ;; esac ; then + >&2 echo 'failed! got "'"$(tail -1 $T)"'" !' + rv=1 + fi + printf '`%s` (returned code: ' "$@" + printf '%s)\n```\n' "$rv" + cat "$T" + printf '\n```\n\n' + return "$rv" + } + + rc=0 + REMOTE_DIGEST="$(skopeo inspect --tls-verify=false --raw docker://"$IMAGE_URI" | skopeo manifest-digest /dev/stdin)" + if [ "$REMOTE_DIGEST" != "${IMAGE_DIGEST}" ]; then + echo 'generated a diferent manifest than '"${IMAGE_DIGEST}"': '"$REMOTE_DIGEST" + exit 2 + fi + docker pull -q "$(echo "$IMAGE_URI" | rev | cut -d: -f2- | rev )@${IMAGE_DIGEST}" + for platform in $(echo $TARGET_PLATFORMS | tr , '\n') + do + docker pull --platform $platform -q "$IMAGE_URI" + >&2 printf 'pulled %s (%s)\n' "$IMAGE_TAG" "$platform" + done + docker pull -q "$IMAGE_URI" + { + printf '# Test\n\n' + printf '## Source Images\n\n```' + docker images --tree $IMAGE_URI | sed -r "s/\x1B\[([0-9]{1,3}(;[0-9]{1,2};?)?)?[mGK]//g" + printf '\n```\n' + } | tee -a "$GITHUB_STEP_SUMMARY" + + for platform in $(echo $TARGET_PLATFORMS | tr , '\n') + do + rc=0 + container_id="$(docker run \ + --platform $platform \ + --detach -p 8080:8080 \ + --rm $IMAGE_URI)" || rc=$? + trap 'docker stop $container_id' EXIT + if [ $rc -ne 0 ]; then + printf 'docker run failed (return code '$rc')!\n' | >&2 tee \ + -a "$GITHUB_OUTPUT" + exit $rc + fi + start_ts=$(date +%s) + until [ x$(curl \ + --silent --output /dev/null \ + -w '%{http_code}\n' --fail 'http://localhost:8080/_health') = x200 ] + do + sleep 1 + if [ "$(expr "${start_ts}" \+ 30)" -lt "$(date +%s)" ]; then + >&2 echo 'failed to get a good response in 30 seconds!' + break + fi + done + + rc=0 + printf '## Test (%s)\n\n' "$(curl 'http://localhost:8080/_arch')" | tee -a "$GITHUB_STEP_SUMMARY" + (run_test "$test_command" | tee -a "$GITHUB_STEP_SUMMARY") || rc=$? + if [ $rc -ne 0 ]; then + exit $rc + fi + trap -- EXIT + docker stop $container_id + done + - id: post_build + name: 'download multiplatform images' run: | + skopeo copy \ + --src-no-creds \ + --src-tls-verify=false \ + --quiet \ + --multi-arch all \ + docker://$(echo '${{ steps.image_env.outputs.IMAGE_TAG }}' | rev | cut -d: -f2- | rev)@${{ steps.runtime.outputs.digest }} \ + oci-archive://${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar \ + & + skopeo copy \ + --src-no-creds \ + --src-tls-verify=false \ + --quiet \ + --multi-arch all \ + docker://$(echo '${{ steps.image_env.outputs.IMAGE_TAG }}-buildroot' | rev | cut -d: -f2- | rev)@${{steps.buildroot.outputs.digest}} \ + oci-archive://${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG_SAFE }}.tar \ + & + skopeo copy \ + --src-no-creds \ + --src-tls-verify=false \ + --quiet \ + --multi-arch all \ + docker://$(echo '${{ steps.image_env.outputs.EXAMPLE_IMAGE_TAG }}' | rev | cut -d: -f2- | rev)@${{steps.build_test.outputs.digest}} \ + oci-archive://${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.EXAMPLE_IMAGE_TAG_SAFE }}.tar \ + & + wait + docker stop '${{ steps.setup.outputs.REGISTRY_CONTAINER_ID }}' + IMAGE_DIGEST='${{steps.runtime.outputs.digest}}' IMAGE_BUILDROOT_DIGEST='${{steps.buildroot.outputs.digest}}' echo 'IMAGE_DIGEST='$IMAGE_DIGEST >> "GITHUB_OUTPUT" @@ -181,7 +343,8 @@ jobs: echo '${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG }}@'"$IMAGE_BUILDROOT_DIGEST" | tee -a ${{ steps.image_env.outputs.IMAGE_HOME }}/manifest.txt ls ${{ steps.image_env.outputs.IMAGE_HOME }} - - name: upload build + - + name: upload build uses: actions/upload-artifact@v4 with: if-no-files-found: error @@ -191,7 +354,8 @@ jobs: ${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_TAG_SAFE }}.tar ${{ steps.image_env.outputs.IMAGE_HOME }}/image-${{ steps.image_env.outputs.IMAGE_BUILDROOT_TAG_SAFE }}.tar - - name: upload build info + - + name: upload build info uses: actions/upload-artifact@v4 with: if-no-files-found: error @@ -200,61 +364,109 @@ jobs: path: | ${{ steps.image_env.outputs.IMAGE_HOME }}/*.txt - update-dockerhub-desc: + upload: name: "Upload and update metadata" needs: [build_images] - runs-on: "ubuntu-latest" + + strategy: + fail-fast: true + matrix: + registry: + - docker.io + - ghcr.io + python: + - '3.14' + - '3.13' + - '3.12' + - '3.11' + - '3.10' + - '3.9' + - '3.8' + alpine: + - '3.20' + - '3.21' + - '3.22' + - '3.23' + os: + - 'ubuntu-latest' + exclude: + # No tag + - python: '3.8' + alpine: '3.21' + - python: '3.8' + alpine: '3.22' + - python: '3.8' + alpine: '3.23' + + - python: '3.9' + alpine: '3.23' + + - python: '3.14' + alpine: '3.20' + - python: '3.15' + alpine: '3.20' + permissions: packages: write + runs-on: ${{ matrix.os }} steps: - name: Checkout uses: actions/checkout@v4 - - uses: actions/setup-python@v6 + - + uses: actions/setup-python@v6 with: - python-version: '3.12' + python-version: '3.12' + cache: 'pip' - name: install dependencies run: | sudo apt-get -y install skopeo - python -m pip install jinplate - - - - + - name: fetch images uses: actions/download-artifact@v5 with: - pattern: images-* - path: dist-images + name: images-alpine${{ matrix.alpine }}-python${{ matrix.python }} merge-multiple: true - + path: dist-images/ - - name: prep files + name: Rename files run: | - printf "Images:\n\n" >> "$GITHUB_STEP_SUMMARY" + printf 'Images:\n\n' >> "$GITHUB_STEP_SUMMARY" for filename in $(find dist-images -type f -name "*.tar" -print) do image_uri="$(basename -s .tar $filename | sed 's/image-//g' | base64 -d)" - for repository in docker.io ghcr.io - do - new_image_uri=$(echo $image_uri | sed 's|localhost:5000|'"$repository"'|g') - new_filename="dist-images/image-$(echo $new_image_uri | base64 -w 0).tar" - cp ${filename} ${new_filename} - echo '* `'${new_filename}'` (`'${new_image_uri}'`)' >> "$GITHUB_STEP_SUMMARY" - done + new_image_uri=$(echo $image_uri | sed 's|localhost:5000|${{ matrix.repository }}|g') + new_filename="dist-images/image-$(echo $new_image_uri | base64 -w 0).tar" + cp ${filename} ${new_filename} + printf '* `'${new_filename}'` (`'${new_image_uri}'`)\n' >> "$GITHUB_STEP_SUMMARY" rm $filename done + printf '\n' >> "GITHUB_STEP_SUMMARY" - - name: login to registry + - name: login to dockerhub + if: ${{ matrix.repository == 'docker.io' }} env: DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - printf "${GITHUB_TOKEN}\n" | skopeo login --password-stdin --username ${{ github.repository_owner }} ghcr.io - printf "${DOCKERHUB_TOKEN}\n" | skopeo login --password-stdin --username ${{ github.repository_owner }} docker.io + printf "${DOCKERHUB_TOKEN}\n" | skopeo \ + login \ + --password-stdin \ + --username ${{ github.repository_owner }} \ + '${{ matrix.registry }}' + - name: login to ghcr + if: ${{ matrix.repository == 'ghcr.io' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + printf "${GITHUB_TOKEN}\n" | skopeo \ + login \ + --password-stdin \ + --username ${{ github.repository_owner }} \ + '${{ matrix.registry }}' - name: upload to registry run: | @@ -330,17 +542,15 @@ jobs: -H "Accept: application/vnd.github+json" \ -H 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ $url)" - >&2 echo 'versions json from ('"$url"') is: '"${versions_json}" local version_id=$(echo "${versions_json}" | jq -r --arg tag "$tag" '.[] | select(.metadata.container.tags[] == $tag) | .id') local package_url="https://github.com/${repo_name}/pkgs/container/${package_name}/${version_id}?tag=${tag}" - if ! [ x$output = x -o x$output = x- ]; then + if ! [ x$output = x -o x$output = x- -o x$output = x/dev/stdout ]; then >>"$output" echo "${image_tag}@${package_url}" else echo "${image_tag}@${package_url}" fi } - printf '#Upload to registry\n\n' >> "$GITHUB_STEP_SUMMARY" num_outstanding=0 concurrency_limit=8 for filename in $(find dist-images -type f -name "*.tar" -print) @@ -355,26 +565,12 @@ jobs: done wait echo 'Done uploading' - - - - - name: aggregate metadata - run: | { - printf '### manifest.txt\n\n' - printf '```\n' + printf '# Uploaded\n\n```' cat manifest.txt - printf '```\n' + print '```\n\n' } >>"$GITHUB_STEP_SUMMARY" - { - printf '### packages.txt\n\n' - printf '```\n' - cat packages.txt - printf '```\n' - } >>"$GITHUB_STEP_SUMMARY" - - - name: parse metadata for README - run: | + for filename in $(echo dist-images/image-*.tar) do IMAGE_URI="$(basename -s .tar $filename | sed 's/image-//g' | base64 -d)" @@ -394,16 +590,79 @@ jobs: "package_url": "'$PACKAGE_URL'" }' done | >./values.json jq -s '. | {"images": .}' - cat ./values.json + + - + name: Upload metadata + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + retention-days: 1 + name: upload-alpine${{ matrix.alpine }}-python${{ matrix.python }}-metadata + path: | + manifest.txt + packages.txt + values.json + + post-upload: + name: "Post Upload" + needs: [upload] + runs-on: "ubuntu-latest" + + steps: + - + name: Checkout + uses: actions/checkout@v4 + - + name: "Setup Python" + uses: actions/setup-python@v6 + with: + python-version: '3.12' + cache: 'pip' + - + name: install dependencies + run: | + python -m pip install jinplate + - + name: Fetch build metadata + uses: actions/download-artifact@v5 + with: + pattern: upload-*-metadata + path: dist-images/ + merge-multiple: false + - + name: parse metadata for README + run: | + >./manifest.txt cat $(find ./dist-images -type f -name 'manifest.txt' -print) + >./packages.txt cat $(find ./dist-images -type f -name 'packages.txt' -print) + { + printf '# manifest_file.txt\n\n```' + cat ./manifest.txt + printf '```\n\n' + printf '# packages.txt\n\n```' + cat ./packages.txt + printf '```\n\n' + printf '# values.json\n\n```' + cat \ + $(find ./dist-images -type f -name 'values.json' -print) | \ + jq -r --slurp '[.[]] | flatten' | tee ./values.json + printf '```\n\n' + } | tee -a "$GITHUB_STEP_SUMMARY" + find ./dist-images \ + -type \( -name manifest.txt -o -name packages.txt -o -name values.json \) \ + -delete >./README.rst python \ -m jinplate.cli \ - README.rst.tmpl file://$PWD/values.json + docs/README.rst.tmpl file://$PWD/values.json + { + printf "# README.rst\n\n```" + cat README.rst + printf "```\n\n" + } | tee -a "$GITHUB_STEP_SUMMARY" - - name: Convert README.rst to markdown + name: "Convert README.rst to markdown" uses: docker://pandoc/core:3.8 with: args: >- - -s --wrap=none -t gfm -o README.md @@ -412,19 +671,11 @@ jobs: name: Print out markdown run: | cat README.md >>"$GITHUB_STEP_SUMMARY" - - - name: upload build info - uses: actions/upload-artifact@v4 - with: - if-no-files-found: error - retention-days: 1 - name: readmes - path: | - README.rst - README.md - - - - name: Update repo description + printf '\n\n# README.rst\n\n```\n' >> "$GITHUB_STEP_SUMMARY" + cat README.rst >> "$GITHUB_STEP_SUMMARY" + printf '\n```\n' >> "$GITHUB_STEP_SUMMARY" + - + name: Update Dockerhub description uses: peter-evans/dockerhub-description@e98e4d1628a5f3be2be7c231e50981aee98723ae # v4.0.0 with: username: ${{ github.repository_owner }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31cb6a5..1db0ded 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v6.0.0 hooks: - id: check-ast diff --git a/build.sh b/build.sh index d4de6eb..7892230 100755 --- a/build.sh +++ b/build.sh @@ -24,8 +24,21 @@ if ! >"$log" 2>&1 docker build \ --build-arg "BASE_IMAGE_DIGEST=${BASE_IMAGE_DIGEST}" \ --build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \ --build-arg "BUILD_ROOT=/d" \ - -f Dockerfile.alpine \ - -t "$IMAGE_TAG" \ + -f buildroot/Dockerfile.alpine \ + -t "${IMAGE_TAG}-buildroot" \ + . ; then + >&2 echo 'Unable to build '"${IMAGE_TAG}-buildroot"'!' + >&2 cat "$log" + exit 8; +fi +if ! >"$log" 2>&1 docker build \ + --build-arg "ALPINE_VERSION=${ALPINE_VERSION}" \ + --build-arg "BASE_IMAGE_DIGEST=${BASE_IMAGE_DIGEST}" \ + --build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \ + --build-arg "BUILD_ROOT=/d" \ + --build-arg "SOURCE_IMAGE=${IMAGE_TAG}-buildroot" \ + -f buildroot/Dockerfile.alpine \ + -t "${IMAGE_TAG}" \ . ; then >&2 echo 'Unable to build '"$IMAGE_TAG"'!' >&2 cat "$log" diff --git a/Dockerfile.alpine b/buildroot/Dockerfile.alpine similarity index 57% rename from Dockerfile.alpine rename to buildroot/Dockerfile.alpine index 1b86131..5c0b123 100644 --- a/Dockerfile.alpine +++ b/buildroot/Dockerfile.alpine @@ -1,12 +1,16 @@ -#syntax=docker/dockerfile:1 +# syntax=docker/dockerfile:1.20-labs + ARG ALPINE_VERSION=3.20 ARG PYTHON_VERSION=3.12 ARG SOURCE_IMAGE=docker.io/python:${PYTHON_VERSION}-alpine${ALPINE_VERSION} ARG BASE_IMAGE_DIGEST -FROM ${SOURCE_IMAGE}@${BASE_IMAGE_DIGEST} AS buildroot +FROM ${SOURCE_IMAGE}@${BASE_IMAGE_DIGEST} AS source +FROM source AS buildroot ARG PYTHON_VERSION=3.12 +ARG TARGETARCH +ARG TARGETVARIANT ARG BUILD_ROOT='/dest' ARG CACHE_ROOT='/cache' @@ -17,16 +21,17 @@ ENV BUILD_ROOT=$BUILD_ROOT \ _apk_add="/usr/bin/env apk add --root $BUILD_ROOT --no-cache" \ _apk_del="/usr/bin/env apk del --root $BUILD_ROOT --purge" \ _sh="chroot $BUILD_ROOT sh" \ - _ln="chroot $BUILD_ROOT ln" \ + _ln="chroot $BUILD_ROOT /bin/ln" \ _chroot="chroot $BUILD_ROOT" +COPY --from=source /dev /$BUILD_ROOT/dev ADD --chmod=0755 chroot-apk.sh /usr/local/bin/chroot-apk ADD --chmod=0755 chroot-pip.sh /usr/local/bin/chroot-pip ADD --chmod=0755 chroot-ln.sh /usr/local/bin/chroot-ln ADD --chmod=0755 remove-py-if-pyc-exists.sh /usr/local/bin/remove-py-if-pyc-exists -RUN set -eu ; \ - python -m pip install -U pip setuptools ; \ - # Add to buildroot: +ADD --chmod=0755 chroot-exec.sh /usr/local/bin/chroot-exec +RUN \ + set -eu ; \ $_sys_apk_add \ dash \ # TLS certs @@ -36,11 +41,6 @@ RUN set -eu ; \ # be imported from. This makes the stdlib immutable. zip \ ; \ - # remove all ``__pycache__`` directories - find /usr/local/lib/python$PYTHON_VERSION -type d -name '__pycache__' -print0 | xargs -0 rm -rf ; \ - # compile all py to an adjacent pyc and remove the original, leaving only the bytecode - python -m compileall -b /usr/local/lib/python$PYTHON_VERSION ; \ - find -type f -name '*.py' -exec sh -c "remove-py-if-pyc-exists {}" \; ;\ # make the new root: mkdir -p \ $CACHE_ROOT/ \ @@ -48,6 +48,9 @@ RUN set -eu ; \ $BUILD_ROOT/bin \ $BUILD_ROOT/usr/local/lib/python$PYTHON_VERSION/site-packages \ $BUILD_ROOT/usr/local/bin \ + $BUILD_ROOT/proc \ + $BUILD_ROOT/sys \ + $BUILD_ROOT/dev \ ; \ # copy the apk related confs cp -R /etc/apk $BUILD_ROOT/etc/apk ; \ @@ -57,68 +60,73 @@ RUN set -eu ; \ alpine-release \ musl \ libffi \ - dash \ - dash-binsh \ - coreutils-env \ ; \ + cp -p /bin/busybox $BUILD_ROOT/bin/busybox ; \ + chroot $BUILD_ROOT /bin/busybox busybox ln -sf /bin/busybox /bin/ln + +RUN --security=insecure \ + set -eu ; \ + mount -t proc none $BUILD_ROOT/proc ; \ + mount -t sysfs none $BUILD_ROOT/sys ; \ + mount --rbind /dev /$BUILD_ROOT/dev ; \ $_apk_add \ + busybox \ + dash \ + dash-binsh \ + ; \ + T=$(mktemp -d) ; \ + if [ -f $BUILD_ROOT/lib/apk/db/scripts.tar.gz ]; then \ + tar -C "$T" -xzpf $BUILD_ROOT/lib/apk/db/scripts.tar.gz ; \ + rm -f $BUILD_ROOT/lib/apk/db/scripts.tar.gz ; \ + find "$T" -name 'busybox-*' -delete ; \ + tar -C "$T" -cpvzf $BUILD_ROOT/lib/apk/db/scripts.tar.gz . ; \ + rm -rf "$T" ; \ + fi ; \ + tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | tar -C "$CACHE_ROOT" -xpf - ; \ + rm -rf $BUILD_ROOT/bin/ln $BUILD_ROOT/bin/busybox $BUILD_ROOT/etc/apk $BUILD_ROOT/var/cache/apk $BUILD_ROOT/usr/share/apk && \ + chroot-apk add \ ca-certificates \ # needed for update-ca-certificates to work: - run-parts \ - # install the runtime dependencies for python + run-parts + +RUN \ + --mount=type=cache,id=pip-cache-${TARGETARCH}${TARGETVARIANT},sharing=shared,target=/root/.cache/pip \ + set -eu ; \ + export SOURCE_DATE_EPOCH=0 ; \ + chroot-apk add \ + coreutils-env \ $(apk info -R .python-rundeps | grep -vE ':$') \ ; \ - cp -p /bin/busybox $BUILD_ROOT/bin/busybox ; \ - ls -lt $BUILD_ROOT/bin/busybox ; \ - chroot $BUILD_ROOT /bin/busybox ln -sf /bin/busybox /bin/ln ; \ - # copy dash into the container so we can use it as the default bin/sh - # tar -C / -cpf - $(\ - # apk info -L \ - # dash \ - # dash-binsh \ - # ca-certificates \ - # | grep -vE ':$' \ - # ) | tar -C $BUILD_ROOT -xpf - ; \ - # $_ln -sf /usr/bin/dash /bin/sh ; \ + python -m pip install -U pip setuptools ; \ + # remove all ``__pycache__`` directories + find /usr/local/lib/python$PYTHON_VERSION -type d -name '__pycache__' -print0 | xargs -0 rm -rf ; \ + # compile all py to an adjacent pyc and remove the original, leaving only the bytecode + python -m compileall -q -b /usr/local/lib/python$PYTHON_VERSION ; \ + find -type f -name '*.py' -exec sh -c "remove-py-if-pyc-exists -q {}" \; ;\ (\ cd /usr/local/lib && \ tar -C /usr/local/lib -cpf - python$PYTHON_VERSION/lib-dynload libpython* | tar -C $BUILD_ROOT/usr/local/lib -xpf - ; \ tar -C /usr/local/bin -cpf - python* | tar -C $BUILD_ROOT/usr/local/bin -xpf -; \ - (cd python$PYTHON_VERSION && zip -9 -X $BUILD_ROOT/usr/local/lib/python$(echo $PYTHON_VERSION | tr -d '.').zip $(\ + (cd python$PYTHON_VERSION && zip -q -9 -X $BUILD_ROOT/usr/local/lib/python$(echo $PYTHON_VERSION | tr -d '.').zip $(\ find . | grep -vE "(__pycache__|^\./(test|site-packages|lib-dynload|idlelib|lib2to3|tkinter|turtle|ensurepip))" \ )); \ cp -p python$PYTHON_VERSION/os.pyc $BUILD_ROOT/usr/local/lib/python$PYTHON_VERSION/os.pyc ; \ touch $BUILD_ROOT/usr/local/lib/python$PYTHON_VERSION/ensurepip.py ; \ rm $BUILD_ROOT/usr/local/lib/python$PYTHON_VERSION/lib-dynload/_tkinter* ; \ ) && \ - $_ln -sf /usr/local/bin/python$PYTHON_VERSION /usr/local/bin/python3 && \ - $_ln -sf /usr/local/bin/python$PYTHON_VERSION /usr/local/bin/python && \ - tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | tar -C "$CACHE_ROOT" -xpf - ; \ - rm -rf $BUILD_ROOT/bin/ln $BUILD_ROOT/bin/busybox $BUILD_ROOT/etc/apk $BUILD_ROOT/var/cache/apk $BUILD_ROOT/usr/share/apk && \ + chroot-ln -sf /usr/local/bin/python$PYTHON_VERSION /usr/local/bin/python3 && \ + chroot-ln -sf /usr/local/bin/python$PYTHON_VERSION /usr/local/bin/python && \ # regenerate the ca-certs! - chroot $BUILD_ROOT update-ca-certificates && \ - chroot-pip install --force-reinstall setuptools + chroot-exec update-ca-certificates && \ + chroot-pip --optimize install --force-reinstall setuptools -FROM scratch AS distroless-python -ARG ALPINE_VERSION=3.20 -ARG PYTHON_VERSION=3.12 -ARG SOURCE_IMAGE=docker.io/python:${PYTHON_VERSION}-alpine${ALPINE_VERSION} -ARG BASE_IMAGE_DIGEST -ARG BUILD_ROOT='/dest' -ENV BUILD_ROOT=$BUILD_ROOT \ - PYTHON_VERSION=$PYTHON_VERSION \ - ALPINE_VERSION=$ALPINE_VERSION -COPY --from=buildroot $BUILD_ROOT / -LABEL \ - org.opencontainers.image.authors="distroless-python image developers " \ - org.opencontainers.image.source="https://github.com/autumnjolitz/distroless-python" \ - org.opencontainers.image.title="Distroless Python ${PYTHON_VERSION} on alpine${ALPINE_VERSION}" \ - org.opencontainers.image.description="Distroless, optimized Python images distilled from the DockerHub official Python images. These images only have a python interpreter and the dash shell." \ - org.opencontainers.image.base.digest="${BASE_IMAGE_DIGEST}" \ - org.opencontainers.image.base.name="$SOURCE_IMAGE" \ - distroless.python-version="${PYTHON_VERSION}" \ - distroless.alpine-version="${ALPINE_VERSION}" \ - distroless.base-image="alpine${ALPINE_VERSION}" +COPY --chmod=755 <&2 echo "Grafting $CACHE_ROOT into $BUILD_ROOT..." - tar -C "$CACHE_ROOT" -cpf - . | tar -C "$BUILD_ROOT" -xpf - + local extra='' + if [ "$DEBUG" = '1' ]; then + extra='-v' + >&2 echo "Grafting $CACHE_ROOT into $BUILD_ROOT..." + fi + tar -C "$CACHE_ROOT" -cpf - . | eval tar -C "$BUILD_ROOT" -xpf $extra - return $? } fini () { - >&2 echo "Removing APK data from $BUILD_ROOT, storing in $CACHE_ROOT" - tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | tar -C "$CACHE_ROOT" -xpf - + local rc=$? + local extra='' + if [ "$DEBUG" = '1' ]; then + >&2 echo "Removing APK data from $BUILD_ROOT, storing in $CACHE_ROOT" + extra='-v' + fi + local T="$(mktemp -d)" + if [ -f $BUILD_ROOT/lib/apk/db/scripts.tar.gz ]; then + tar -C "$T" -xzpf $BUILD_ROOT/lib/apk/db/scripts.tar.gz + rm -f $BUILD_ROOT/lib/apk/db/scripts.tar.gz + sed -i'' 's|^#!busybox sh|#!/usr/bin/dash|g' $(find "$T" -type f -print) + sed -i'' 's|^#!/bin/sh|#!/usr/bin/dash|g' $(find "$T" -type f -print) + sed -i'' 's|^#!/bin/busybox sh|#!/usr/bin/dash|g' $(find "$T" -type f -print) + cat $(find "$T" -type f -print) + tar -C "$T" -cpvzf $BUILD_ROOT/lib/apk/db/scripts.tar.gz . + rm -rf "$T" + fi + + mkdir -p $BUILD_ROOT/var/cache/apk + tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | eval tar -C "$CACHE_ROOT" -xpf $extra - $_chroot /bin/ln -sf /usr/bin/dash /bin/sh.bak rm -rf $BUILD_ROOT/bin/ln $BUILD_ROOT/bin/busybox $BUILD_ROOT/etc/apk $BUILD_ROOT/var/cache/apk $BUILD_ROOT/usr/share/apk if $_chroot /usr/bin/dash -c '[ ! -x /bin/sh ]'; then >&2 echo '/bin/sh in chroot failed the vibe check, replacing with a symlink to /usr/bin/dash!' mv $BUILD_ROOT/bin/sh.bak $BUILD_ROOT/bin/sh else - >&2 echo '/bin/sh passed the vibe check' + if [ "$DEBUG" = '1' ]; then + >&2 echo '/bin/sh passed the vibe check' + fi rm $BUILD_ROOT/bin/sh.bak fi - return $? + exit $rc } trap fini EXIT setup -set -x -apk --root "$BUILD_ROOT" $@ \ No newline at end of file +apk --root "$BUILD_ROOT" $@ diff --git a/chroot-exec.sh b/chroot-exec.sh new file mode 100755 index 0000000..84095d3 --- /dev/null +++ b/chroot-exec.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env sh + +if [ "x$CACHE_ROOT" = 'x' ] || [ "x$BUILD_ROOT" = 'x' ]; then + >&2 echo "CACHE_ROOT (${CACHE_ROOT:-not-set}) and/or BUILD_ROOT (${BUILD_ROOT:-not-set}) is not set!" + exit 1 +fi + +DEBUG="${CHROOT_EXEC_DEBUG:-0}" + +set -e +set -o pipefail + +setup () { + local extra= + if [ "$DEBUG" = '1' ]; then + >&2 echo "Grafting $CACHE_ROOT into $BUILD_ROOT..." + extra='-v' + fi + tar -C "$CACHE_ROOT" -cpf - . | eval tar -C "$BUILD_ROOT" $extra -xpf - + return $? +} + +fini () { + local rc=$? + local extra= + if [ "$DEBUG" = '1' ]; then + >&2 echo "Removing APK data from $BUILD_ROOT, storing in $CACHE_ROOT" + extra=-v + fi + mkdir -p $BUILD_ROOT/var/cache/apk + tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | eval tar -C "$CACHE_ROOT" $extra -xpf - + $_chroot /bin/ln -sf /usr/bin/dash /bin/sh.bak + rm -rf $BUILD_ROOT/bin/ln $BUILD_ROOT/bin/busybox $BUILD_ROOT/etc/apk $BUILD_ROOT/var/cache/apk $BUILD_ROOT/usr/share/apk + if $_chroot /usr/bin/dash -c '[ ! -x /bin/sh ]'; then + >&2 echo '/bin/sh in chroot failed the vibe check, replacing with a symlink to /usr/bin/dash!' + mv $BUILD_ROOT/bin/sh.bak $BUILD_ROOT/bin/sh + else + if [ "$DEBUG" = '1' ]; then + >&2 echo '/bin/sh passed the vibe check' + fi + rm $BUILD_ROOT/bin/sh.bak + fi + exit $rc +} + +trap fini EXIT + +while [ "x$1" = x/usr/bin/env -o "x$1" = xenv ]; do + shift +done + +if [ "x$@" = x -o x"$@" = 'x-h' -o x"$@" = 'x--help' -o x"$@" = 'x-?' ]; then + >&2 echo 'chroot-exec [NAME=VALUE]... [COMMAND [ARG]...] + +let NAME=VALUE denote environment variables + +All file accesses in this command are relative to the '"$BUILD_ROOT"' +' + exit 1 +fi + +setup + +chroot $BUILD_ROOT /usr/bin/env $@ diff --git a/chroot-ln.sh b/chroot-ln.sh index 8ab8fa8..4d00e86 100644 --- a/chroot-ln.sh +++ b/chroot-ln.sh @@ -5,31 +5,44 @@ if [ "x$CACHE_ROOT" = 'x' ] || [ "x$BUILD_ROOT" = 'x' ]; then exit 1 fi +DEBUG="${CHROOT_LN_DEBUG:-0}" + set -e set -o pipefail setup () { - >&2 echo "Grafting $CACHE_ROOT into $BUILD_ROOT..." - tar -C "$CACHE_ROOT" -cpf - . | tar -C "$BUILD_ROOT" -xpf - + local extra= + if [ "$DEBUG" = '1' ]; then + >&2 echo "Grafting $CACHE_ROOT into $BUILD_ROOT..." + extra='-v' + fi + tar -C "$CACHE_ROOT" -cpf - . | eval tar -C "$BUILD_ROOT" $extra -xpf - return $? } fini () { - >&2 echo "Removing APK data from $BUILD_ROOT, storing in $CACHE_ROOT" - tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | tar -C "$CACHE_ROOT" -xpf - + local rc=$? + local extra= + if [ "$DEBUG" = '1' ]; then + >&2 echo "Removing APK data from $BUILD_ROOT, storing in $CACHE_ROOT" + extra=-v + fi + mkdir -p $BUILD_ROOT/var/cache/apk + tar -C "$BUILD_ROOT" -cpf - etc/apk bin/ln bin/busybox var/cache/apk usr/share/apk | eval tar -C "$CACHE_ROOT" $extra -xpf - $_chroot /bin/ln -sf /usr/bin/dash /bin/sh.bak rm -rf $BUILD_ROOT/bin/ln $BUILD_ROOT/bin/busybox $BUILD_ROOT/etc/apk $BUILD_ROOT/var/cache/apk $BUILD_ROOT/usr/share/apk if $_chroot /usr/bin/dash -c '[ ! -x /bin/sh ]'; then >&2 echo '/bin/sh in chroot failed the vibe check, replacing with a symlink to /usr/bin/dash!' mv $BUILD_ROOT/bin/sh.bak $BUILD_ROOT/bin/sh else - >&2 echo '/bin/sh passed the vibe check' + if [ "$DEBUG" = '1' ]; then + >&2 echo '/bin/sh passed the vibe check' + fi rm $BUILD_ROOT/bin/sh.bak fi - return $? + exit $rc } trap fini EXIT setup -set -x -chroot $BUILD_ROOT /usr/bin/env ln $@ \ No newline at end of file +chroot $BUILD_ROOT /usr/bin/env ln $@ diff --git a/chroot-pip.sh b/chroot-pip.sh index f5c3d20..775a820 100644 --- a/chroot-pip.sh +++ b/chroot-pip.sh @@ -5,10 +5,20 @@ if [ "x$CACHE_ROOT" = 'x' ] || [ "x$BUILD_ROOT" = 'x' ]; then exit 1 fi +DEBUG="${CHROOT_PIP_DEBUG:-0}" + +if [ "x${SOURCE_DATE_EPOCH:-}" = x ]; then + SOURCE_DATE_EPOCH=0 +elif [ "x${SOURCE_DATE_EPOCH:-}" = x- ]; then + # ARJ: if the SOURCE_DATE_EPOCH is '-' then it + # will have the effect of clearing the default SOURCE_DATE_EPOCH + SOURCE_DATE_EPOCH= +fi + set -e set -o pipefail -if [ "$1" = '-O' ] || [ "$1" = '--optimize' ]; then +if [ "x${1:-}" = 'x-O' ] || [ "x${1:-}" = 'x--optimize' ]; then PIP_OPTIMIZE='1' shift fi @@ -20,42 +30,100 @@ setup () { } fini () { - if [ $PIP_OPTIMIZE = '1' ]; then + local rv= + local rc=$? + local extra=-q + local new_packages= + if [ "x${DEBUG:-}" = x1 ]; then + extra= + fi + + if [ x${PIP_OPTIMIZE:-} = 'x1' ]; then python -m pip freeze >"$AFTER_PACKAGES" - new_packages="$(diff -Naur "$BEFORE_PACKAGES" "$AFTER_PACKAGES" | grep -vE '^\+\+' | grep -E '^\+' | cut -f2 -d+ | cut -f1 -d= | xargs)" + rv=0 + new_packages="$(diff -Naur "$BEFORE_PACKAGES" "$AFTER_PACKAGES" | grep -vE '^\+\+' | grep -E '^\+' | cut -f2 -d+ | cut -f1 -d= | xargs)" || rv=$? if [ "x$new_packages" != 'x' ]; then - >&2 echo "Optimizing packages (${new_packages})..." + if [ "x${DEBUG:-}" = 'x1' ]; then + >&2 echo "Optimizing packages (${new_packages})..." + fi for package in $new_packages do package_location=$(python -m pip show -f "$package" | grep -E '^Location' | cut -f2 -d: | xargs) for dir in $(python -m pip show -f "$package" | awk 'f;/Files:/{f=1}' | cut -f1 -d/ | sort | uniq | xargs) do - python -m compileall -b "$package_location/$dir" + eval python -m compileall \ + $extra \ + -b "$package_location/$dir" done done else - >&2 echo 'No new packages installed or changed to optimize with.' + if [ "x${DEBUG:-}" = 'x1' ]; then + >&2 echo 'No new packages installed or changed to optimize with.' + fi fi rm -f "$BEFORE_PACKAGES" "$AFTER_PACKAGES" fi export ALLOW_SITE_PACKAGES=1 find $BUILD_ROOT/usr/local/lib/python$PYTHON_VERSION/site-packages \ - -type f -name '*.py' -exec sh -c "remove-py-if-pyc-exists {}" \; ; - return 0 + -type f -name '*.py' -exec sh -c "remove-py-if-pyc-exists $extra {}" \; ; + return $rc } trap fini EXIT setup export PYTHONPATH="${BUILD_ROOT}/usr/local/lib/python${PYTHON_VERSION}/site-packages" export PIP_PREFIX="${BUILD_ROOT}/usr/local" -if [ $PIP_OPTIMIZE = '1' ]; then + +case "$1" in + optimize) + PIP_OPTIMIZE='1' + ;; +esac + +if [ x$PIP_OPTIMIZE = 'x1' ]; then BEFORE_PACKAGES=$(mktemp) AFTER_PACKAGES=$(mktemp) python -m pip freeze >"$BEFORE_PACKAGES" fi -set -x -retval=0 -python -m pip $@ || retval=$? -set +ex -exit $retval +case "$@" in + *'--force-reinstall'*|optimize*) + maybe_packages= + index='-1' + for package_name in $@ + do + if [ x"$package_name" != x ] && ! case "$package_name" in '-'*) true ;; *) false ;; esac ; then + index="$(expr $index \+ 1)" || index='0' + if case "$package_name" in *'=='*) true ;; *) false ;; esac ; then + package_name="$(echo "${package_name}" | cut -d'=' -f1 | xargs)" + fi + if [ $index -ne 0 ]; then + maybe_packages="${maybe_packages} ${package_name}" + fi + fi + done + if [ x$maybe_packages != x ]; then + for package_name in $maybe_packages + do + # if the package is already installed, flag it + # as if it wasn't installed so we can optimize it + if [ x$package_name != x ] && pip show "${package_name}" >/dev/null ; then + sed -i'' '/^'"${package_name}"'==/d' $BEFORE_PACKAGES + fi + done + fi + ;; +esac + +if [ x"${1:-}" = xoptimize ]; then + shift + if [ x"$@" = x ]; then + >&2 echo 'optimize [PACKAGE] [PACKAGE2] ... [PACKAGEN] +pass in package names to run optimize on +' + exit 1 + fi + exit +fi + +python -m pip $@ diff --git a/README.rst.tmpl b/docs/README.rst.tmpl similarity index 97% rename from README.rst.tmpl rename to docs/README.rst.tmpl index 990ba17..27fd798 100644 --- a/README.rst.tmpl +++ b/docs/README.rst.tmpl @@ -92,10 +92,7 @@ Given the following ``Dockerfile``, we will add ``httpie`` to the image and refe #syntax=docker/dockerfile:1 FROM autumnjolitz/distroless-python:3.12-alpine3.20-buildroot AS buildroot - RUN python -m pip install \ - --no-cache \ - --prefix "$BUILD_ROOT/usr/local" \ - httpie + RUN chroot-pip install httpie FROM autumnjolitz/distroless-python:3.12-alpine3.20 COPY --from=buildroot \ diff --git a/examples/simple-flask/Dockerfile b/examples/simple-flask/Dockerfile index 88a008b..9a7ce17 100644 --- a/examples/simple-flask/Dockerfile +++ b/examples/simple-flask/Dockerfile @@ -1,13 +1,12 @@ #syntax=docker/dockerfile:1 ARG SOURCE_IMAGE=autumnjolitz/distroless-python:3.12-alpine3.20 -FROM --platform=$TARGETPLATFORM ${SOURCE_IMAGE}-buildroot AS buildroot +FROM ${SOURCE_IMAGE}-buildroot AS buildroot ADD requirements.txt . -RUN python -m pip install \ +RUN chroot-pip install \ --no-cache \ - --prefix "$BUILD_ROOT/usr/local" \ -r requirements.txt ARG SOURCE_IMAGE=autumnjolitz/distroless-python:3.12-alpine3.20 diff --git a/examples/simple-flask/hello.py b/examples/simple-flask/hello.py index 0d2c13a..6f50a38 100644 --- a/examples/simple-flask/hello.py +++ b/examples/simple-flask/hello.py @@ -1,3 +1,5 @@ +import platform + from flask import Flask app = Flask(__name__) @@ -6,3 +8,13 @@ @app.route("/") def hello_world() -> str: return "

Hello, World!

" + + +@app.route("/_health") +def check_health() -> str: + return "ok" + + +@app.route("/_arch") +def show_config() -> str: + return platform.uname().machine diff --git a/remove-py-if-pyc-exists.sh b/remove-py-if-pyc-exists.sh index 7db94f6..ba0fc3a 100644 --- a/remove-py-if-pyc-exists.sh +++ b/remove-py-if-pyc-exists.sh @@ -1,12 +1,21 @@ #!/usr/bin/env sh ALLOW_SITE_PACKAGES=${ALLOW_SITE_PACKAGES:-0} +quiet=0 +if [ $1 = '-q' ]; then + quiet=1 + shift +fi if [ -f "${1}c" ]; then if [ "x$ALLOW_SITE_PACKAGES" = 'x0' ] && ! case "$1" in *site-packages/*) false ;; *) true ;; esac ; then - echo 'Skipping optimization of '"$1" + if [ $quiet -eq 0 ]; then + >&2 echo 'Skipping optimization of '"$1" + fi exit 0 fi - echo 'Removing '"${1}" + if [ $quiet -eq 0 ]; then + >&2 echo 'Removing '"${1}" + fi rm -f "${1}" fi \ No newline at end of file diff --git a/runtime/Dockerfile.alpine b/runtime/Dockerfile.alpine new file mode 100644 index 0000000..c1a449b --- /dev/null +++ b/runtime/Dockerfile.alpine @@ -0,0 +1,30 @@ +# syntax=docker/dockerfile:1 + +ARG ALPINE_VERSION=3.20 +ARG PYTHON_VERSION=3.12 +ARG BUILDROOT_IMAGE=docker.io/autumnjolitz/python:${PYTHON_VERSION}-alpine${ALPINE_VERSION}-buildroot +FROM ${BUILDROOT_IMAGE} AS buildroot + +FROM scratch AS distroless-python +ARG ALPINE_VERSION=3.20 +ARG PYTHON_VERSION=3.12 +ARG SOURCE_IMAGE=docker.io/python:${PYTHON_VERSION}-alpine${ALPINE_VERSION} +ARG BASE_IMAGE_DIGEST +ARG BUILD_ROOT='/dest' +ENV BUILD_ROOT=$BUILD_ROOT \ + PYTHON_VERSION=$PYTHON_VERSION \ + ALPINE_VERSION=$ALPINE_VERSION +COPY --from=buildroot $BUILD_ROOT / +LABEL \ + org.opencontainers.image.authors="distroless-python image developers " \ + org.opencontainers.image.source="https://github.com/autumnjolitz/distroless-python" \ + org.opencontainers.image.title="Distroless Python ${PYTHON_VERSION} on alpine${ALPINE_VERSION}" \ + org.opencontainers.image.description="Distroless, optimized Python images distilled from the DockerHub official Python images. These images only have a python interpreter and the dash shell." \ + org.opencontainers.image.base.digest="${BASE_IMAGE_DIGEST}" \ + org.opencontainers.image.base.name="$SOURCE_IMAGE" \ + distroless.python-version="${PYTHON_VERSION}" \ + distroless.alpine-version="${ALPINE_VERSION}" \ + distroless.base-image="alpine${ALPINE_VERSION}" + +SHELL ["/usr/bin/dash", "-c"] +ENTRYPOINT [ "/usr/local/bin/python" ]