mirror of
https://github.com/armbian/build.git
synced 2025-08-09 12:46:58 +02:00
- armbian-next: introduce `USE_LOCAL_APT_DEB_CACHE` (default `=yes`) as alternative/in addition to `apt-cacher-ng` (eg, in Docker) - this uses `cache/aptcache/${RELEASE}-${ARCH}` (in the host) for - apt cache, by bind-mounting it to `${SDCARD}/var/cache/apt` in the `chroot_sdcard_apt_get()` runner and its usages - debootstrap, by passing it `--cache-dir` - utility function to help understand what is happening to cache during usage - apt itself mantains this cache, removing old packages when new ones are installed. apt does this _by default_ - introduce `DONT_MAINTAIN_APT_CACHE=yes` to skip out of automatic apt maintenance of apt cache, eg, during `remove`s - don't do `apt clean` and such if using local cache, that would clean the cache, not the chroot - clean up `install_deb_chroot()` a little, find an unrelated bug there - WiP: the great cli entrypoint (+docker) rewrite, Phase 6: relaunching structure; re-pass ARMBIAN_BUILD_UUID; use ARMBIAN_COMMAND for log filename; fix for output/logs dir perms - WiP: the great cli entrypoint (+docker) rewrite, Phase 5: cleanups 4/x; better logging, check & force `DEST_LANG` - WiP: the great cli entrypoint (+docker) rewrite, Phase 5: cleanups 3/x; don't write to stderr in generated Dockerfile - it's `drastic red` on non-buildx dockers - WiP: the great cli entrypoint (+docker) rewrite, Phase 5: cleanups 2/x, logging - WiP: the great cli entrypoint (+docker) rewrite, Phase 5: cleanups 1/x - source configs in a logging section. - Docker: silent, fast retries to make sure `docker system df` works - shut-up `chown` (no `-v`) output related to `SET_OWNER_TO_UID` - ask user to wait while `DESTIMG` is rsync'ed to `FINALDEST` -- it's potentially very slow - use green apple for Mac logging, instead of red apple which might imply error... - WiP: the great cli entrypoint (+docker) rewrite, Phase 4: run as non-root, maybe-with-Docker - introduce `is_docker_ready_to_go()`; if it is, and we're not root, use Docker instead of sudo. <- GOOD IDEA? BAD IDEA? lol - introduce `SET_OWNER_TO_UID` var to be passed to Docker/sudo so written files are owned by the launching user, not root. - introduce `mkdir_recursive_and_set_uid_owner()` and `reset_uid_owner()` to reset owner based on `SET_OWNER_TO_UID` - use it for userpatches files created, logs, and output files, including images and debs. - @TODOs ref. `$SUDO_USER` which I think the old version of this? - add a lot of @TODOs, ref being able to relaunch something that's not `build` inside Docker, also add/change params and configs and command. - initially add `ARMBIAN_DOCKER_RELAUNCH_EXTRA_ARGS` - WiP: the great cli entrypoint (+docker) rewrite, Phase 3: rpardini is demented, v3 - WiP: the great cli entrypoint (+docker) rewrite, Phase 2: rpardini is demented - WiP: the great cli entrypoint (+docker) rewrite, Phase 1 - armbian-next: WiP: Docker: actually use the GHA-image as base; pull it every 24hs. - using image in my private repo. - this has significant speedup to "start building time" on the 1st run - move some Linux specific stuff to its own if - add comments and todo - armbian-next: WiP: Docker, high-WiP, beginnings of Armbian mount dict, with linux/darwin preferences - armbian-next: WiP: Docker, configure `BUILDKIT_COLORS` - armbian-next: WiP: Docker, make docker image from Dockerfile more compact by flattening layers - armbian-next: `logging`: add whale indicator if build running under Docker - armbian-next: WiP: `docker`: working with `bookworm`, `sid`, and `jammy` on Darwin & Linux; works with `bullseye` on Linux only - armbian-next: WiP: `docker`: force ARMBIAN_RUNNING_IN_CONTAINER both in Dockerfile and passed as `--env`; apt update and install in same layer; back to jammy - armbian-next: introduce `armbian_is_running_in_container()` and `armbian_is_host_running_systemd()`, replacing `systemd-detect-virt` in multiple spots - WiP: try with debian:bullseye -- can't detect docker at all - armbian-next: WiP: 2nd stab at new Docker support; Darwin still works; Linux `docker.io` working - gen .dockerignore together with Dockerfile - split in funcs - hacks for Linux and `/dev/loop` stuff, CONTAINER_COMPAT=yes - mac still works, Linux stuff would break it but I if'fed - armbian-next: the secrets of `CONTAINER_COMPAT` revealed; add size checking to check_loop_device() and avoid retry when `mknod`ing - this fails for the right reasons now, causing retries, which are then retried and work ;-) - this is related to building under Docker on Linux, using docker.io package (not docker-ce) - armbian-next: remove `.dockerignore` and add it to `.gitignore`; it's going to be auto-generated - armbian-next: `.dockerignore`: Docker context should only have minimal files and folders, to speed up Dockerfile build - IMPORTANT: `.dockerignore` is going to be generated from now on: so this is the last commit with changes before removal - armbian-next: WiP: initial stab at new Docker support; really run the passed cmdline; add Dockerfile to gitignore - armbian-next: WiP: initial stab at new Docker support; generate Dockerfile; introduce REQUIREMENTS_DEFS_ONLY - uses REQUIREMENTS_DEFS_ONLY - works on Docker Desktop on Mac; - linux TBA - armbian-next: don't error out if `.git` not present; other small fixes - armbian-next: general "work or at least don't misbehave when run on a very bare ubuntu:latest instance" - can't assume things, for example: - that `sudo` will be available; it might not, and might be already root, no reason to fail - that `/etc/timezone` will exist - that `systemd-detect-virt` will be available - that `git` will be available - that `locale-gen` will be available
236 lines
8.8 KiB
Bash
236 lines
8.8 KiB
Bash
#!/usr/bin/env bash
|
|
#
|
|
# This function retries Git operations to avoid failure in case remote is borked
|
|
#
|
|
improved_git() {
|
|
local real_git
|
|
real_git="$(command -v git)"
|
|
local retries=3
|
|
local delay=10
|
|
local count=0
|
|
while [ $count -lt $retries ]; do
|
|
run_host_command_logged_raw "$real_git" --no-pager "$@" && return 0 # this gobbles up errors, but returns if OK, so everything after is error
|
|
count=$((count + 1))
|
|
display_alert "improved_git try $count failed, retrying in ${delay} seconds" "git $*" "warn"
|
|
sleep $delay
|
|
done
|
|
display_alert "improved_git, too many retries" "git $*" "err"
|
|
return 17 # explode with error if this is reached, "too many retries"
|
|
}
|
|
|
|
# Not improved, just regular, but logged "correctly".
|
|
regular_git() {
|
|
run_host_command_logged_raw git --no-pager "$@"
|
|
}
|
|
|
|
# avoid repeating myself too much
|
|
function improved_git_fetch() {
|
|
improved_git fetch --progress --verbose --no-auto-maintenance "$@"
|
|
}
|
|
|
|
# workaround new limitations imposed by CVE-2022-24765 fix in git, otherwise "fatal: unsafe repository"
|
|
function git_ensure_safe_directory() {
|
|
if [[ -n "$(command -v git)" ]]; then
|
|
local git_dir="$1"
|
|
display_alert "git: Marking directory as safe" "$git_dir" "debug"
|
|
run_host_command_logged git config --global --add safe.directory "$git_dir"
|
|
else
|
|
display_alert "git not installed" "a true wonder how you got this far without git - it will be installed for you" "warn"
|
|
fi
|
|
}
|
|
|
|
# fetch_from_repo <url> <directory> <ref> <ref_subdir>
|
|
# <url>: remote repository URL
|
|
# <directory>: local directory; subdir for branch/tag will be created
|
|
# <ref>:
|
|
# branch:name
|
|
# tag:name
|
|
# head(*)
|
|
# commit:hash
|
|
#
|
|
# *: Implies ref_subdir=no
|
|
#
|
|
# <ref_subdir>: "yes" to create subdirectory for tag or branch name
|
|
#
|
|
fetch_from_repo() {
|
|
display_alert "fetch_from_repo" "$*" "git"
|
|
local url=$1
|
|
local dir=$2
|
|
local ref=$3
|
|
local ref_subdir=$4
|
|
local git_work_dir
|
|
|
|
# Set GitHub mirror before anything else touches $url
|
|
url=${url//'https://github.com/'/$GITHUB_SOURCE'/'}
|
|
|
|
# The 'offline' variable must always be set to 'true' or 'false'
|
|
local offline=false
|
|
if [[ "${OFFLINE_WORK}" == "yes" ]]; then
|
|
offline=true
|
|
fi
|
|
|
|
[[ -z $ref || ($ref != tag:* && $ref != branch:* && $ref != head && $ref != commit:*) ]] && exit_with_error "Error in configuration"
|
|
local ref_type=${ref%%:*} ref_name=${ref##*:}
|
|
if [[ $ref_type == head ]]; then
|
|
ref_name=HEAD
|
|
fi
|
|
|
|
display_alert "Getting sources from Git" "$dir $ref_name" "info"
|
|
|
|
local workdir=$dir
|
|
if [[ $ref_subdir == yes ]]; then
|
|
workdir=$dir/$ref_name
|
|
fi
|
|
|
|
git_work_dir="${SRC}/cache/sources/${workdir}"
|
|
|
|
# if GIT_FIXED_WORKDIR has something, ignore above logic and use that directly.
|
|
if [[ "${GIT_FIXED_WORKDIR}" != "" ]]; then
|
|
display_alert "GIT_FIXED_WORKDIR is set to" "${GIT_FIXED_WORKDIR}" "git"
|
|
git_work_dir="${SRC}/cache/sources/${GIT_FIXED_WORKDIR}"
|
|
fi
|
|
|
|
mkdir -p "${git_work_dir}" || exit_with_error "No path or no write permission" "${git_work_dir}"
|
|
|
|
cd "${git_work_dir}" || exit
|
|
|
|
display_alert "Git working dir" "${git_work_dir}" "git"
|
|
|
|
git_ensure_safe_directory "${git_work_dir}"
|
|
|
|
local expected_origin_url actual_origin_url
|
|
expected_origin_url="$(echo -n "${url}" | sed 's/^.*@//' | sed 's/^.*\/\///')"
|
|
|
|
# Make sure the origin matches what is expected. If it doesn't, clean up and start again.
|
|
if [[ "$(git rev-parse --git-dir)" == ".git" ]]; then
|
|
actual_origin_url="$(git config remote.origin.url | sed 's/^.*@//' | sed 's/^.*\/\///')"
|
|
if [[ "${expected_origin_url}" != "${actual_origin_url}" ]]; then
|
|
display_alert "Remote git URL does not match, deleting working copy" "${git_work_dir} expected: '${expected_origin_url}' actual: '${actual_origin_url}'" "warn"
|
|
cd "${SRC}" || exit 3 # free up cwd
|
|
run_host_command_logged rm -rf "${git_work_dir}" # delete the dir
|
|
mkdir -p "${git_work_dir}" || exit_with_error "No path or no write permission" "${git_work_dir}" # recreate
|
|
cd "${git_work_dir}" || exit #reset cwd
|
|
fi
|
|
fi
|
|
|
|
local do_add_origin="no"
|
|
|
|
if [[ "$(git rev-parse --git-dir)" != ".git" ]]; then
|
|
# Dir is not a git working copy. Make it so;
|
|
# If callback is defined, call it. Give it the dir as param. The rest it will read from environment.
|
|
# If not callback defined, do an init, and schedule a fetch.
|
|
|
|
if [[ $(type -t ${GIT_INIT_REPO_HOOK} || true) == function ]]; then
|
|
display_alert "Delegating to ${GIT_INIT_REPO_HOOK}()" "git init: $dir $ref_name" "debug"
|
|
${GIT_INIT_REPO_HOOK} "${git_work_dir}"
|
|
else
|
|
display_alert "Initializing empty git local copy" "git init: $dir $ref_name"
|
|
regular_git init -q --initial-branch="armbian_unused_initial_branch" .
|
|
fi
|
|
|
|
offline=false # Force online, we'll need to fetch.
|
|
do_add_origin="yes" # Just created the repo, it needs an origin later.
|
|
fi
|
|
|
|
local changed=false
|
|
|
|
# get local hash; might fail
|
|
local local_hash
|
|
local_hash=$(git rev-parse @ 2> /dev/null || true) # Don't fail nor output anything if failure
|
|
|
|
# when we work offline we simply return the sources to their original state
|
|
if ! $offline; then
|
|
|
|
case $ref_type in
|
|
branch)
|
|
# TODO: grep refs/heads/$name
|
|
local remote_hash
|
|
remote_hash=$(git ls-remote -h "${url}" "$ref_name" | head -1 | cut -f1)
|
|
[[ -z $local_hash || "${local_hash}" != "a${remote_hash}" ]] && changed=true
|
|
;;
|
|
tag)
|
|
local remote_hash
|
|
remote_hash=$(git ls-remote -t "${url}" "$ref_name" | cut -f1)
|
|
if [[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]]; then
|
|
remote_hash=$(git ls-remote -t "${url}" "$ref_name^{}" | cut -f1)
|
|
[[ -z $remote_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
|
|
fi
|
|
;;
|
|
head)
|
|
local remote_hash
|
|
remote_hash=$(git ls-remote "${url}" HEAD | cut -f1)
|
|
[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
|
|
;;
|
|
commit)
|
|
[[ -z $local_hash || $local_hash == "@" ]] && changed=true
|
|
;;
|
|
esac
|
|
|
|
display_alert "Git local_hash vs remote_hash" "${local_hash} vs ${remote_hash}" "git"
|
|
|
|
fi # offline
|
|
|
|
local checkout_from="HEAD" # Probably best to use the local revision?
|
|
|
|
if [[ "${changed}" == "true" ]]; then
|
|
|
|
if [[ $(type -t ${GIT_PRE_FETCH_HOOK} || true) == function ]]; then
|
|
display_alert "Delegating to ${GIT_PRE_FETCH_HOOK}()" "before git fetch" "debug"
|
|
${GIT_PRE_FETCH_HOOK} "${git_work_dir}" "${url}" "$ref_type" "$ref_name"
|
|
fi
|
|
|
|
if [[ "${do_add_origin}" == "yes" ]]; then
|
|
regular_git remote add origin "${url}"
|
|
fi
|
|
|
|
# remote was updated, fetch and check out updates, but not tags; tags pull their respective commits too, making it a huge fetch.
|
|
display_alert "Fetching updates from origin" "$dir $ref_name"
|
|
case $ref_type in
|
|
branch | commit) improved_git_fetch --no-tags origin "${ref_name}" ;;
|
|
tag) improved_git_fetch --no-tags origin tags/"${ref_name}" ;;
|
|
head) improved_git_fetch --no-tags origin HEAD ;;
|
|
esac
|
|
display_alert "Origin fetch completed, working copy size" "$(du -h -s | awk '{print $1}')" "git"
|
|
checkout_from="FETCH_HEAD"
|
|
fi
|
|
|
|
# should be declared in outside scope, so can be read.
|
|
checked_out_revision_ts="$(git log -1 --pretty=%ct "${checkout_from}")" # unix timestamp of the commit date
|
|
checked_out_revision_mtime="$(date +%Y%m%d%H%M%S -d "@${checked_out_revision_ts}")" # convert timestamp to local date/time
|
|
display_alert "checked_out_revision_mtime set!" "${checked_out_revision_mtime} - ${checked_out_revision_ts}" "git"
|
|
|
|
display_alert "Cleaning git dir" "$(git status -s 2> /dev/null | wc -l) files" # working directory is not clean, show it
|
|
|
|
#fasthash_debug "before git checkout of $dir $ref_name" # fasthash interested in this
|
|
regular_git checkout -f -q "${checkout_from}" # Return the files that are tracked by git to the initial state.
|
|
|
|
#fasthash_debug "before git clean of $dir $ref_name"
|
|
regular_git clean -q -d -f # Files that are not tracked by git and were added when the patch was applied must be removed.
|
|
|
|
if [[ -f .gitmodules ]]; then
|
|
if [[ "${GIT_SKIP_SUBMODULES}" == "yes" ]]; then
|
|
display_alert "Skipping submodules" "GIT_SKIP_SUBMODULES=yes" "debug"
|
|
else
|
|
display_alert "Updating submodules" "" "ext"
|
|
# FML: http://stackoverflow.com/a/17692710
|
|
for i in $(git config -f .gitmodules --get-regexp path | awk '{ print $2 }'); do
|
|
cd "${git_work_dir}" || exit
|
|
local surl sref
|
|
surl=$(git config -f .gitmodules --get "submodule.$i.url")
|
|
sref=$(git config -f .gitmodules --get "submodule.$i.branch" || true)
|
|
if [[ -n $sref ]]; then
|
|
sref="branch:$sref"
|
|
else
|
|
sref="head"
|
|
fi
|
|
# @TODO: in case of the bundle stuff this will fail terribly
|
|
fetch_from_repo "$surl" "$workdir/$i" "$sref"
|
|
done
|
|
fi
|
|
fi
|
|
|
|
display_alert "Final working copy size" "$(du -h -s | awk '{print $1}')" "git"
|
|
#fasthash_debug "at the end of fetch_from_repo $dir $ref_name"
|
|
}
|
|
|