Merge branch 'master' into flatcar-master

This commit is contained in:
Dongsu Park 2018-07-25 12:24:40 +02:00
commit 7990e7234a
46 changed files with 498 additions and 1178 deletions

View File

@ -11,7 +11,7 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -13,7 +13,7 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -423,6 +423,9 @@ def FormatFat(part, device):
cmd = ['mkfs.vfat']
if 'fs_label' in part:
cmd += ['-n', part['fs_label']]
if part['type'] == 'efi':
# ESP is FAT32 irrespective of size
cmd += ['-F', '32']
Sudo(cmd + [device, vfat_blocks], stdout_null=True)

View File

@ -146,7 +146,7 @@ function gptprio {
fi
}
menuentry "Flatcar default" --id=flatcar {
menuentry "Flatcar default" --id=flatcar --unrestricted {
gptprio
linux$suf $gptprio_kernel $gptprio_cmdline $linux_cmdline
}

View File

@ -218,8 +218,9 @@ case "${VM_BOARD}" in
qemu-system-x86_64 \
-name "$VM_NAME" \
-m ${VM_MEMORY} \
-net nic,vlan=0,model=virtio \
-net user,vlan=0,hostfwd=tcp::"${SSH_PORT}"-:22,hostname="${VM_NAME}" \
-netdev user,id=eth0,hostfwd=tcp::"${SSH_PORT}"-:22,hostname="${VM_NAME}" \
-device virtio-net-pci,netdev=eth0 \
-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0 \
"$@"
;;
arm64-usr)
@ -228,6 +229,7 @@ case "${VM_BOARD}" in
-m ${VM_MEMORY} \
-netdev user,id=eth0,hostfwd=tcp::"${SSH_PORT}"-:22,hostname="${VM_NAME}" \
-device virtio-net-device,netdev=eth0 \
-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0 \
"$@"
;;
*) die "Unsupported arch" ;;

View File

@ -93,7 +93,7 @@
<vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
<vssd:InstanceID>0</vssd:InstanceID>
<vssd:VirtualSystemIdentifier>@@NAME@@</vssd:VirtualSystemIdentifier>
<vssd:VirtualSystemType>vmx-07</vssd:VirtualSystemType>
<vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>
</System>
<Item>
<rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
@ -191,12 +191,12 @@
<rasd:ResourceType>10</rasd:ResourceType>
<vmw:Config ovf:required="false" vmw:key="wakeOnLanEnabled" vmw:value="true"/>
</Item>
<vmw:Config ovf:required="false" vmw:key="cpuHotAddEnabled" vmw:value="false"/>
<vmw:Config ovf:required="false" vmw:key="cpuHotRemoveEnabled" vmw:value="false"/>
<vmw:Config ovf:required="false" vmw:key="cpuHotAddEnabled" vmw:value="true"/>
<vmw:Config ovf:required="false" vmw:key="cpuHotRemoveEnabled" vmw:value="true"/>
<vmw:Config ovf:required="false" vmw:key="firmware" vmw:value="bios"/>
<vmw:Config ovf:required="false" vmw:key="virtualICH7MPresent" vmw:value="false"/>
<vmw:Config ovf:required="false" vmw:key="virtualSMCPresent" vmw:value="false"/>
<vmw:Config ovf:required="false" vmw:key="memoryHotAddEnabled" vmw:value="false"/>
<vmw:Config ovf:required="false" vmw:key="memoryHotAddEnabled" vmw:value="true"/>
<vmw:Config ovf:required="false" vmw:key="nestedHVEnabled" vmw:value="false"/>
<vmw:Config ovf:required="false" vmw:key="powerOpInfo.powerOffType" vmw:value="preset"/>
<vmw:Config ovf:required="false" vmw:key="powerOpInfo.resetType" vmw:value="preset"/>

View File

@ -6,6 +6,7 @@ GLSA_WHITELIST=(
201412-09 # incompatible CA certificate version numbers
201710-23 # we handle Go differently; drop when 1.9 builds everything
201803-03 # same as above, drop when all Go < 1.9 packages are gone
201804-12 # same as above, except this requires only Go 1.10 or later
)
glsa_image() {

View File

@ -189,8 +189,8 @@ get_cross_pkgs() {
# Get portage arguments restricting toolchains to binary packages only.
get_binonly_args() {
local pkgs=( "${TOOLCHAIN_PKGS[@]}" $(get_cross_pkgs "$@") )
# XXX: Revert this after stable > 1632.
local tmppkgs=( ${pkgs[*]##*/binutils} ) ; tmppkgs=( ${tmppkgs[*]##*/gcc} )
# XXX: Drop this exception after stable > 1729.
local tmppkgs=( ${pkgs[*]##*/glibc} )
echo "${tmppkgs[@]/#/--useoldpkg-atoms=}" "${pkgs[@]/#/--rebuild-exclude=}"
}

View File

@ -12,7 +12,6 @@ VALID_IMG_TYPES=(
iso
openstack
openstack_mini
oracle_oci_qcow
qemu
qemu_uefi
qemu_uefi_secure
@ -53,7 +52,6 @@ VALID_OEM_PACKAGES=(
exoscale
gce
hyperv
oracle-oci
rackspace
rackspace-onmetal
vagrant
@ -245,10 +243,6 @@ IMG_gce_CONF_FORMAT=gce
IMG_gce_OEM_PACKAGE=oem-gce
IMG_gce_OEM_ACI=gce
## Oracle OCI
IMG_oracle_oci_qcow_DISK_FORMAT=qcow2
IMG_oracle_oci_qcow_OEM_PACKAGE=oem-oracle-oci
## rackspace
IMG_rackspace_OEM_PACKAGE=oem-rackspace
IMG_rackspace_vhd_DISK_FORMAT=vhd
@ -794,7 +788,7 @@ _write_vmx_conf() {
#!/usr/bin/vmware
.encoding = "UTF-8"
config.version = "8"
virtualHW.version = "7"
virtualHW.version = "11"
cleanShutdown = "TRUE"
displayName = "${VM_NAME}"
ethernet0.addressType = "generated"
@ -828,6 +822,9 @@ pciBridge6.functions = "8"
pciBridge7.present = "TRUE"
pciBridge7.virtualDev = "pcieRootPort"
pciBridge7.functions = "8"
hpet0.present = "TRUE"
vcpu.hotadd = "TRUE"
mem.hotadd = "TRUE"
EOF
# Only upload the vmx if it won't be bundled
if [[ -z "$(_get_vm_opt BUNDLE_FORMAT)" ]]; then

View File

@ -11,7 +11,7 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -7,7 +7,7 @@
. "$(dirname "$0")/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user
@ -86,8 +86,6 @@ fi
check_gsutil_opts
CHROMITE_BIN="${GCLIENT_ROOT}/chromite/bin"
# Before we can run any tools, we need to update chroot or setup_board.
UPDATE_ARGS=( --regen_configs )
if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
@ -170,8 +168,7 @@ fi
# Build cros_workon packages when they are changed.
CROS_WORKON_PKGS=()
if [ "${FLAGS_workon}" -eq "${FLAGS_TRUE}" ]; then
LIST_MODIFIED_PACKAGES="${CHROMITE_BIN}/cros_list_modified_packages"
CROS_WORKON_PKGS+=( $("${LIST_MODIFIED_PACKAGES}" --board=${FLAGS_board}) )
CROS_WORKON_PKGS+=( $(cros_workon list --board=${FLAGS_board}) )
fi
if [[ ${#CROS_WORKON_PKGS[@]} -gt 0 ]]; then

View File

@ -7,7 +7,7 @@
. "$(dirname "$0")/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user
@ -225,14 +225,14 @@ function torcx_package() {
# swapping default package versions for different OS releases by reordering.
DEFAULT_IMAGES=(
=app-torcx/docker-1.12
=app-torcx/docker-18.02
=app-torcx/docker-18.06
)
# This list contains extra images which will be uploaded and included in the
# generated manifest, but won't be included in the vendor store.
EXTRA_IMAGES=(
=app-torcx/docker-17.03
=app-torcx/docker-17.12
=app-torcx/docker-18.03
)
mkdir -p "${BUILD_DIR}"

188
check_out_of_date.py Executable file
View File

@ -0,0 +1,188 @@
#!/usr/bin/python2
# needs to be python2 for portage
# Prints out a list of all packages in portage-stable and how they stand relative to gentoo upstream
import argparse
import json
import os
import subprocess
import sys
import portage.versions
def split_package(p):
# split into cat/package,ver-rev
split = portage.versions.catpkgsplit(p.strip())
return (split[0] + "/" + split[1], split[2] + "-" + split[3])
def build_pkg_map(pkgs):
pkgs = map(split_package, pkgs)
package_map = dict()
for pkg, ver in pkgs:
if pkg not in package_map:
package_map[pkg] = [ver]
else:
package_map[pkg].append(ver)
return package_map
def exec_command_strict(cmd):
""" Wraps check_output splitting the input and string'ing the output"""
return bytes.decode(subprocess.check_output(cmd.split()))
def exec_command(cmd):
""" Like exec_command_strict but returns the output even if the command exited unsuccessfully"""
try:
return exec_command_strict(cmd)
except subprocess.CalledProcessError as e:
return bytes.decode(e.output)
def get_portage_tree_packages(tree_path):
""" returns a list of all packages in a portage tree/overlay in the form of cat/pkg-ver"""
pkgs = exec_command_strict("find -L {} -maxdepth 3 -type f -name *.ebuild -not -name skel.ebuild -printf %P\\n".format(tree_path))
def process_line(line):
# cat/pkg/pkg-ver.ebuild -> cat/pkg-ver
chunks = line.split("/")
end = chunks[2].replace(".ebuild", "")
return chunks[0] + "/" + end
return build_pkg_map(map(process_line, pkgs.splitlines()))
def process_emerge_output(eout):
""" transform from emerge --unordered-dispaly to cat/pkg-ver"""
def process_line(line):
return line.strip().split("] ")[1].split(":")[0]
def is_package(line):
# none of the header line have a /
return "/" in line
return map(process_line, filter(is_package, eout.splitlines()))
def get_board_packages(board):
""" gets a list of packages used by a board. valid boards are {arm,amd}64-usr, sdk, and bootstrap"""
emerge_args = "--emptytree --pretend --verbose --unordered-display"
if board == "sdk":
cmd = "emerge {} @system sdk-depends sdk-extras".format(emerge_args)
elif board == "amd64-usr" or board == "arm64-usr":
cmd = "emerge-{} {} @system board-packages".format(board, emerge_args)
elif board == "bootstrap":
pkgs = exec_command_strict("/usr/lib64/catalyst/targets/stage1/build.py")
cmd = "emerge {} {}".format(emerge_args, pkgs)
elif board == "image":
cmd = "emerge-amd64-usr {} --usepkgonly board-packages".format(emerge_args)
else:
raise "invalid board"
return build_pkg_map(process_emerge_output(exec_command(cmd)))
def print_table(report, head, line_head, line_tail, tail, joiner, pkg_joiner):
print(head)
# metapackage that acts as the header
report.insert(0, {"name": "Package",
"common": ["Common"],
"ours": ["Ours"],
"upstream": ["Upstream"],
"tag": "Tag",
"sdk": ["sdk"],
"arm64-usr": ["arm64-usr"],
"amd64-usr": ["amd64-usr"],
"bootstrap": ["bootstrap"],
"modified": "Modified"})
for entry in report:
print(line_head + joiner.join([entry.get("name",""),
pkg_joiner.join(entry.get("common",[])),
pkg_joiner.join(entry.get("ours",[])),
pkg_joiner.join(entry.get("upstream",[])),
entry.get("tag",""),
pkg_joiner.join(entry.get("sdk", [])),
pkg_joiner.join(entry.get("arm64-usr", [])),
pkg_joiner.join(entry.get("amd64-usr", [])),
pkg_joiner.join(entry.get("bootstrap", [])),
entry.get("modified","")]) + line_tail)
print(tail)
def print_table_human(report):
print_table(report, "", "", "", "", "\t", " ")
def print_html_table(report):
print_table(report, "<html><body><table border=1>", "<tr><td>", "</td></tr>", "</table></body></html>", "</td><td>", "<br>")
def get_date(pkg, repo_root, fmt):
return exec_command_strict("git -C {} --no-pager log -1 --pretty=%ad --date={} {}".format(repo_root, fmt, pkg)).strip()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--update-upstream", help="run git-pull in the gentoo mirror repo first", action="store_true")
parser.add_argument("--upstream-git", help="git uri to clone for upstream", default="https://github.com/gentoo/gentoo.git")
parser.add_argument("--upstream-path", help="path to gentoo tree", default="/mnt/host/source/src/gentoo-portage")
parser.add_argument("--portage-stable-path", help="path to portage-stable", default="/mnt/host/source/src/third_party/portage-stable")
parser.add_argument("--date-fmt", help="format for git-date to use", default="relative")
parser.add_argument("--output", help="output format, json, table, and html are accepted", default="json")
args = parser.parse_args()
if not os.path.exists(args.upstream_path):
os.makedirs(args.upstream_path)
subprocess.check_call(["git", "clone", args.upstream_git, args.upstream_path])
elif args.update_upstream:
# elif to not pull if we just cloned
subprocess.check_call(["git", "-C", args.upstream_path, "pull"])
pkg_lists = {}
sources = ["sdk", "bootstrap", "amd64-usr", "arm64-usr", "image"]
for i in sources:
pkg_lists[i] = get_board_packages(i)
gentoo_packages = get_portage_tree_packages(args.upstream_path)
packages = get_portage_tree_packages(args.portage_stable_path)
# time to make the report
report = []
for pkg, vers in packages.iteritems():
upstream = gentoo_packages.get(pkg, [])
entry = {
"name": pkg,
"common": list(set(vers).intersection(upstream)),
"ours": list(set(vers).difference(upstream)),
"upstream": list(set(upstream).difference(vers)),
"modified": get_date(pkg, args.portage_stable_path, args.date_fmt)
}
if not entry["upstream"]:
entry["tag"] = "updated"
elif entry["common"]:
entry["tag"] = "has_update"
elif pkg in gentoo_packages:
entry["tag"] = "no_ebuild_upstream"
else:
entry["tag"] = "deleted_upstream"
for src in sources:
if pkg in pkg_lists[src]:
entry[src] = pkg_lists[src][pkg]
report.append(entry)
if args.output == "json":
print(json.dumps(report))
elif args.output == "table":
print_table_human(report)
elif args.output == "html":
print_html_table(report)
else:
print("Unknown output type. Dying.")
sys.exit(2)
if __name__ == "__main__":
main()

View File

@ -417,16 +417,6 @@ warn_if_nfs() {
fi
}
# Enter a chroot and restart the current script if needed
restart_in_chroot_if_needed() {
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
if [[ ${INSIDE_CHROOT} -ne 1 ]]; then
# Get inside_chroot path for script.
local chroot_path="$(reinterpret_path_for_chroot "$0")"
exec ${GCLIENT_ROOT}/chromite/bin/cros_sdk -- "${chroot_path}" "$@"
fi
}
# Fail unless we're inside the chroot. This guards against messing up your
# workstation.
assert_inside_chroot() {
@ -873,72 +863,6 @@ assert_interactive() {
fi
}
# Selection menu with a default option: this is similar to bash's select
# built-in, only that in case of an empty selection it'll return the default
# choice. Like select, it uses PS3 as the prompt.
#
# $1: name of variable to be assigned the selected value; it better not be of
# the form choose_foo to avoid conflict with local variables.
# $2: default value to return in case of an empty user entry.
# $3: value to return in case of an invalid choice.
# $...: options for selection.
#
# Usage example:
#
# PS3="Select one [1]: "
# choose reply "foo" "ERROR" "foo" "bar" "foobar"
#
# This will present the following menu and prompt:
#
# 1) foo
# 2) bar
# 3) foobar
# Select one [1]:
#
# The return value will be stored in a variable named 'reply'. If the input is
# 1, 2 or 3, the return value will be "foo", "bar" or "foobar", respectively.
# If it is empty (i.e. the user clicked Enter) it will be "foo". Anything else
# will return "ERROR".
choose() {
typeset -i choose_i=1
# Retrieve output variable name and default return value.
local choose_reply=$1
local choose_default=$2
local choose_invalid=$3
shift 3
# Select a return value
unset REPLY
if [[ $# -gt 0 ]]; then
assert_interactive
# Actual options provided, present a menu and prompt for a choice.
local choose_opt
for choose_opt in "$@"; do
echo "${choose_i}) ${choose_opt}" >&2
: $(( ++choose_i ))
done
read -p "$PS3"
fi
# Filter out strings containing non-digits.
if [[ ${REPLY} != "${REPLY%%[!0-9]*}" ]]; then
REPLY=0
fi
choose_i="${REPLY}"
if [[ ${choose_i} -ge 1 && ${choose_i} -le $# ]]; then
# Valid choice, return the corresponding value.
eval ${choose_reply}=\""${!choose_i}"\"
elif [[ -z ${REPLY} ]]; then
# Empty choice, return default value.
eval ${choose_reply}=\""${choose_default}"\"
else
# Invalid choice, return corresponding value.
eval ${choose_reply}=\""${choose_invalid}\""
fi
}
# Display --help if requested. This is used to hide options from help
# that are not intended for developer use.
#

86
core_dev_sign_update Executable file
View File

@ -0,0 +1,86 @@
#!/usr/bin/env bash
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
assert_inside_chroot
DEFINE_string data_dir "" "Directory containing downloaded release artifacts"
DEFINE_string board "" "Board to sign artifacts for"
DEFINE_string version "" "Version to sign artifacts for"
DEFINE_integer n_signatures "2" "Number of signatures this release will be signed with"
DEFINE_string output_dir "" "Output directory"
DEFINE_string gpg_key "" "Value for '--default-key' argument to gpg --sign"
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
set -e
data_dir="${FLAGS_data_dir}/${FLAGS_board}/${FLAGS_version}"
output_dir="${FLAGS_output_dir}/${FLAGS_board}/${FLAGS_version}"
mkdir -p "$output_dir"
cleanup() {
# core_sign_update expects to unpack this too, so we'll clean it up.
rm -f "${data_dir}/coreos_production_update.bin"
rm -f "${data_dir}/update"
rm -f "${data_dir}/update.hash"
}
trap cleanup INT TERM EXIT
# delta_generator expects a list of colon-separated sizes for signature hash algorithms in order to
# build the update payload protobuf properly. Since we already assume sha256 elsewhere in
# core_sign_update, do it here as well.
signature_sizes=""
for i in $(seq 1 $FLAGS_n_signatures); do
signature_sizes="${signature_sizes}:256"
done
signature_sizes="${signature_sizes:1:${#signature_sizes}}"
echo "=== Verifying update payload... ==="
gpg2 --verify "${data_dir}/coreos_production_update.bin.bz2.sig"
gpg2 --verify "${data_dir}/coreos_production_image.vmlinuz.sig"
gpg2 --verify "${data_dir}/coreos_production_update.zip.sig"
echo "=== Decompressing update payload... ==="
bunzip2 --keep "${data_dir}/coreos_production_update.bin.bz2"
echo "=== Creating signable update payload... ==="
delta_generator \
-new_image "${data_dir}/coreos_production_update.bin" \
-new_kernel "${data_dir}/coreos_production_image.vmlinuz" \
-out_file "${data_dir}/update"
delta_generator \
--signature_size ${signature_sizes} \
--in_file "${data_dir}/update" \
--out_hash_file "${data_dir}/update.hash"
echo "=== Signing update payload... ==="
if [[ -z "${FLAGS_gpg_key}" ]]; then
gpg2 \
--output "${output_dir}/update.sig.$(whoami)" \
--armor --detach-sign "${data_dir}/update.hash"
else
gpg2 \
--local-user "$FLAGS_gpg_key" \
--output "${output_dir}/update.sig.$(whoami)" \
--armor --detach-sign "${data_dir}/update.hash"
fi
echo "=== Update payload signed successfully. ==="
echo "=== Verifying torcx manifest... ==="
gpg2 --verify "${data_dir}/torcx_manifest.json.sig"
echo "=== Signing torcx manifest... ==="
if [[ -z "${FLAGS_gpg_key}" ]]; then
gpg2 \
--output "${output_dir}/torcx_manifest.json.sig.$(whoami)" \
--detach-sign --armor "${data_dir}/torcx_manifest.json"
else
gpg2 \
--local-user "$FLAGS_gpg_key" \
--output "${output_dir}/torcx_manifest.json.sig.$(whoami)" \
--detach-sign --armor "${data_dir}/torcx_manifest.json"
fi
echo "=== Torcx manifest signed successfully. ==="

View File

@ -18,9 +18,13 @@ export GCLIENT_ROOT=$(readlink -f "${SCRIPT_ROOT}/../../")
DEFINE_string image "" "The filesystem image of /usr"
DEFINE_string kernel "" "The kernel image"
DEFINE_string output "" "Output file"
DEFINE_string private_keys "" "Path or pkcs11 URI to private keys."
DEFINE_string private_keys "" "Path, pkcs11 URI, or fero:<keyname> for private keys."
DEFINE_string public_keys "" "Path to public keys in .pem format."
DEFINE_string keys_separator ":" "Separator for the above keys"
DEFINE_string user_signatures "" \
"Colon-separated paths to user signatures to provide to signing server"
DEFINE_string signing_server_address "" "Hostname of the signing server"
DEFINE_integer signing_server_port "50051" "Port of the signing server"
# Parse command line
FLAGS "$@" || exit 1
@ -41,6 +45,7 @@ cleanup() {
trap cleanup INT TERM EXIT
echo "=== Creating signable update payload... ==="
delta_generator \
-new_image "$FLAGS_image" \
-new_kernel "$FLAGS_kernel" \
@ -63,6 +68,16 @@ for key in "${private_keys[@]}"; do
done
signature_sizes="${signature_sizes:1:${#signature_sizes}}"
# We don't need to maintain backwards compatibility with old `sign.sh` scripts here, so we only
# allow colon-separated values for user signature files.
IFS=":" read -a user_signatures <<< "$FLAGS_user_signatures"
user_signatures_arg=""
for user_signature in "${user_signatures[@]}"; do
user_signatures_arg="${user_signatures_arg} --signature ${user_signature}"
done
user_signatures_arg="${user_signatures_arg:1:${#user_signatures_arg}}"
delta_generator \
--signature_size ${signature_sizes} \
--in_file update \
@ -116,12 +131,21 @@ cat padding-pkcs11 update.hash > update.pkcs11-padhash
echo "AAH/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////ADAxMA0GCWCGSAFlAwQCAQUABCA=" | base64 -d > padding
cat padding update.hash > update.padhash
echo "=== Signing update payload... ==="
i=1
signature_sizes=""
for key in "${private_keys[@]}"; do
if [[ "${key}" == pkcs11* ]]; then
openssl rsautl -engine pkcs11 -pkcs -sign -inkey ${key} -keyform engine -in update.pkcs11-padhash -out update.sig.${i}
elif [[ "${key}" == fero* ]]; then
fero-client \
--address $FLAGS_signing_server_address \
--port $FLAGS_signing_server_port \
sign --pkcs1 \
--file update.hash \
--output update.sig.${i} \
--secret-key ${key:5:${#key}} \
${user_signatures_arg}
else
openssl rsautl -raw -sign -inkey ${key} -in update.padhash -out update.sig.${i}
fi
@ -148,6 +172,7 @@ for key in "${public_keys[@]}"; do
done
mv update.signed ${FLAGS_output}
echo "=== Update payload signed successfully. ==="
trap - INT TERM EXIT
cleanup noexit

35
find_overlay_dups Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
# Prints packages which are in both portage-stable and coreos-overlay
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
DEFINE_string overlay_path "${SRC_ROOT}/third_party/coreos-overlay" \
"Directory containing the overlay"
DEFINE_string portage_stable_path "${SRC_ROOT}/third_party/portage-stable" \
"Path to portage-stable"
# Parse flags
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
function get_tree_packages() {
# gets a list of all packages in a tree
find "$1" -maxdepth 3 -type f -name "*.ebuild" -printf "%P\n" | xargs dirname | sort | uniq
}
portage_stable_packages=$(get_tree_packages ${FLAGS_portage_stable_path})
overlay_packages=$(get_tree_packages ${FLAGS_overlay_path})
all_packages="$portage_stable_packages $overlay_packages"
dups=$(sort <<< "$all_packages" | uniq -D | uniq)
if [[ -z "$dups" ]]; then
info "No duplicate packages, all good!"
exit 0
fi
warn "Found duplicate package(s):"
warn "$dups"
exit 1

View File

@ -8,7 +8,7 @@ SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -8,7 +8,7 @@ SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -13,7 +13,7 @@ SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -16,7 +16,6 @@ hyperv
niftycloud
openstack
openstack_mini
oracle_oci_qcow
packet
parallels
rackspace

View File

@ -10,7 +10,7 @@ enter() {
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
verify_key=--verify-key=/etc/portage/gangue.asc
sudo ln -f "${GS_DEVEL_CREDS}" chroot/etc/portage/gangue.json
bin/cork enter --experimental -- env \
bin/cork enter --bind-gpg-agent=false -- env \
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
{FETCH,RESUME}COMMAND_GS="/usr/bin/gangue get \
--json-key=/etc/portage/gangue.json $verify_key \

View File

@ -12,7 +12,7 @@ enter() {
verify_key=--verify-key=/etc/portage/gangue.asc
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" \
chroot/etc/portage/gangue.json
bin/cork enter --experimental -- env \
bin/cork enter --bind-gpg-agent=false -- env \
CCACHE_DIR=/mnt/host/source/.cache/ccache \
CCACHE_MAXSIZE=5G \
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \

View File

@ -1,7 +1,7 @@
#!/bin/bash -ex
enter() {
bin/cork enter --experimental -- "$@"
bin/cork enter --bind-gpg-agent=false -- "$@"
}
source .repo/manifests/version.txt

View File

@ -1,7 +1,7 @@
#!/bin/bash -ex
enter() {
bin/cork enter --experimental -- "$@"
bin/cork enter --bind-gpg-agent=false -- "$@"
}
source .repo/manifests/version.txt

View File

@ -10,7 +10,7 @@ enter() {
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
verify_key=--verify-key=/etc/portage/gangue.asc
sudo ln -f "${GS_DEVEL_CREDS}" chroot/etc/portage/gangue.json
bin/cork enter --experimental -- env \
bin/cork enter --bind-gpg-agent=false -- env \
FLATCAR_DEV_BUILDS="${GS_DEVEL_ROOT}" \
{FETCH,RESUME}COMMAND_GS="/usr/bin/gangue get \
--json-key=/etc/portage/gangue.json $verify_key \

View File

@ -8,7 +8,7 @@ SCRIPT_ROOT=$(dirname "$(readlink -f "$0")")
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -1,40 +0,0 @@
# Get the tenancy ID, which is also the ID of the root compartment.
# Unconditionally uses the first profile in the conffile.
get_tenancy_id() {
local line=$(grep -m 1 "^tenancy=" "$HOME/.oraclebmc/config")
echo "${line#*=}"
}
# Pick an availability domain by listing them and choosing the first one.
get_availability_domain() {
local compartment="$1"
bmcs iam availability-domain list \
-c "${compartment}" | jq -r ".data[0].name"
}
# Pick a subnet ID by picking the first VCN and then the first subnet in the
# specified availability domain.
get_subnet_id() {
local compartment="$1"
local availability_domain="$2"
local vcn=$(bmcs network vcn list \
-c "${compartment}" | jq -r ".data[0].id")
bmcs network subnet list \
-c "${compartment}" \
--vcn-id "${vcn}" | jq -r ".data[] | select(.[\"availability-domain\"] == \"${availability_domain}\").id"
}
# Get the object storage namespace ID.
get_namespace_id() {
bmcs os ns get | jq -r ".data"
}
# Get the ID of some arbitrary image. Useful for iPXE boot, which requires
# an image ID but doesn't seem to use it.
get_an_image_id() {
local compartment="$1"
bmcs compute image list \
-c "${compartment}" \
--operating-system "CentOS" \
--operating-system-version 7 | jq -r '.data[0].id'
}

View File

@ -1,35 +0,0 @@
#!/bin/bash
set -e -o pipefail
# Parse args
usage="Usage: $0 [args] -i instance-id
Options:
-i INSTANCE-ID Instance ID
-h This ;-)
"
while getopts "i:h" OPTION
do
case "${OPTION}" in
i) instance_id="${OPTARG}" ;;
h) echo "${usage}"; exit 2 ;;
*) exit 2 ;;
esac
done
if [[ -z "${instance_id}" ]]; then
echo "Instance ID is required." >&2
exit 2
fi
id=$(bmcs compute console-history capture --instance-id "${instance_id}" | jq -r .data.id)
trap 'bmcs compute console-history delete --instance-console-history-id "${id}" --force' EXIT
while true; do
state=$(bmcs compute console-history get --instance-console-history-id "${id}" | jq -r '.data["lifecycle-state"]')
if [[ "${state}" = SUCCEEDED ]]; then
break
fi
sleep 1
done
# Default length is 10 KB; maximum is 1 MB. Request at least that much.
bmcs compute console-history get-content --instance-console-history-id "${id}" --file - --length 2000000

View File

@ -1,118 +0,0 @@
#!/bin/bash
set -e -o pipefail
. common.sh
# Initialize vars
compartment=$(get_tenancy_id)
availability_domain=$(get_availability_domain "${compartment}")
image_id=$(get_an_image_id "${compartment}")
subnet_id=$(get_subnet_id "${compartment}" "${availability_domain}")
name=
shape="VM.Standard1.1"
ipxe=0
ignition=
keyfile="$HOME/.ssh/id_rsa.pub"
baseurl="http://alpha.release.flatcar-linux.net/amd64-usr/current"
bucket_base="users.developer.core-os.net/$USER/bmcs"
kargs=
if [[ ! -f "$keyfile" ]]; then
keyfile=""
fi
# Parse args
usage="Usage: $0 [args] -n name
Options:
-c COMPARTMENT Compartment ID (default: ${compartment})
-a AVAIL-DOMAIN Availability domain ID (default: ${availability_domain})
-n NAME Instance name
-s SHAPE Instance shape (default: ${shape})
-S SUBNET-ID Subnet ID to use (default: ${subnet_id})
-i IGNITION Path to Ignition config
-A SSH-KEYS Path to SSH keys (default: ${keyfile})
-p Boot with iPXE
-I IMAGE-ID Image ID to use; only relevant for non-iPXE (default: ${image_id})
-b BASEURL URL to the image mirror; only relevant for iPXE (default: ${baseurl})
-B BUCKET-BASE GS bucket and relative path for iPXE script (default: ${bucket_base})
-k ARGS Additional kernel command line arguments for iPXE script
-h This ;-)
"
while getopts "c:a:n:s:S:i:A:pI:b:B:k:h" OPTION
do
case "${OPTION}" in
c) compartment="${OPTARG}" ;;
a) availability_domain="${OPTARG}" ;;
n) name="${OPTARG}" ;;
s) shape="${OPTARG}" ;;
S) subnet_id="${OPTARG}" ;;
i) ignition="${OPTARG}" ;;
A) keyfile="${OPTARG}" ;;
p) ipxe=1 ;;
I) image_id="${OPTARG}" ;;
b) baseurl="${OPTARG}" ;;
B) bucket_base="${OPTARG}" ;;
k) kargs="${OPTARG}" ;;
h) echo "${usage}"; exit 2 ;;
*) exit 2 ;;
esac
done
if [[ -z "${name}" ]]; then
echo "Instance name is required." >&2
exit 2
fi
launch_args=()
if [[ "${ipxe}" = 1 ]]; then
# Make scratch dir
tmpdir=$(mktemp -d bmcs-XXXXXX)
trap "rm -rf '${tmpdir}'" EXIT
if [[ -n "${ignition}" ]]; then
# Generate OEM image
mkdir -p "${tmpdir}/usr/share/oem"
cp "${ignition}" "${tmpdir}/usr/share/oem/ign.ign"
pushd "${tmpdir}" >/dev/null
find usr | cpio -o -H newc --quiet | gzip -c > oem.cpio.gz
popd >/dev/null
# Upload it. Don't delete it afterward, since the instance could
# reboot at any point and need the OEM image again.
oempath="${bucket_base}/$(mktemp -u XXXXXXX)"
gsutil -q cp "${tmpdir}/oem.cpio.gz" "gs://${oempath}"
ignition_initrd_args="initrd=ignition flatcar.config.url=oem:///ign.ign"
ignition_initrd_cmd="initrd --name ignition http://${oempath}"
fi
# Create iPXE script
cat >"${tmpdir}/ipxe" <<EOF
#!ipxe
kernel ${baseurl}/flatcar_production_pxe.vmlinuz initrd=flatcar_production_pxe_image.cpio.gz flatcar.first_boot=1 ${ignition_initrd_args} console=ttyS0,9600 ${kargs}
initrd ${baseurl}/flatcar_production_pxe_image.cpio.gz
${ignition_initrd_cmd}
boot
EOF
launch_args+=("--ipxe-script-file" "${tmpdir}/ipxe")
fi
# Launch image
if [[ -n "${ignition}" ]]; then
launch_args+=("--user-data-file" "${ignition}")
fi
if [[ -n "${keyfile}" ]]; then
launch_args+=("--ssh-authorized-keys-file" "${keyfile}")
fi
bmcs compute instance launch \
--availability-domain "${availability_domain}" \
--compartment-id "${compartment}" \
--image-id "${image_id}" \
--shape "${shape}" \
--display-name "${name}" \
--hostname-label "${name}" \
--subnet-id "${subnet_id}" \
${launch_args[@]}

View File

@ -1,75 +0,0 @@
#!/bin/bash
set -e -o pipefail
. common.sh
compartment=$(get_tenancy_id)
display_name=
path=
namespace=$(get_namespace_id)
bucket="image-upload"
# Parse args
usage="Usage: $0 [args] -n name -f file
Options:
-c COMPARTMENT Compartment ID (default: ${compartment})
-n DISPLAY-NAME Image display name
-f PATH Image file
-N NAMESPACE Object storage namespace (default: ${namespace})
-B BUCKET Bucket name (default: ${bucket})
-h This ;-)
"
while getopts "c:n:f:N:B:h" OPTION
do
case "${OPTION}" in
c) compartment="${OPTARG}" ;;
n) display_name="${OPTARG}" ;;
f) path="${OPTARG}" ;;
N) namespace="${OPTARG}" ;;
B) bucket="${OPTARG}" ;;
h) echo "${usage}"; exit 2 ;;
*) exit 2 ;;
esac
done
if [[ -z "${display_name}" ]]; then
echo "Display name is required." >&2
exit 2
fi
if [[ -z "${path}" ]]; then
echo "Image file is required." >&2
exit 2
fi
object=$(mktemp -u XXXXXXXXXXXXXXXX)
# Upload object
bmcs os object put \
--namespace "${namespace}" \
--bucket-name "${bucket}" \
--file "${path}" \
--name "${object}"
trap 'bmcs os object delete \
--namespace "${namespace}" \
--bucket-name "${bucket}" \
--name "${object}" \
--force' EXIT
# Initiate import
image_id=$(bmcs compute image import from-object \
--compartment-id "${compartment}" \
--display-name "${display_name}" \
--namespace "${namespace}" \
--bucket-name "${bucket}" \
--name "${object}" | jq -r .data.id)
# Wait for import
echo "Waiting for import..."
state=IMPORTING
while [[ "$state" = IMPORTING ]]; do
sleep 10
state=$(bmcs compute image get --image-id "${image_id}" | jq -r '.data["lifecycle-state"]')
done
echo "${state} ${image_id}"

View File

@ -1,22 +0,0 @@
#!/usr/bin/env bash
set -ex
DATA_DIR="$(readlink -f "$1")"
KEYS_DIR="$(readlink -f "$(dirname "$0")")"
gpg2 --verify "${DATA_DIR}/flatcar_production_update.bin.bz2.sig"
gpg2 --verify "${DATA_DIR}/flatcar_production_image.vmlinuz.sig"
gpg2 --verify "${DATA_DIR}/flatcar_production_update.zip.sig"
bunzip2 --keep "${DATA_DIR}/flatcar_production_update.bin.bz2"
unzip "${DATA_DIR}/flatcar_production_update.zip" -d "${DATA_DIR}"
export PATH="${DATA_DIR}:${PATH}"
cd "${DATA_DIR}"
./core_sign_update \
--image "${DATA_DIR}/flatcar_production_update.bin" \
--kernel "${DATA_DIR}/flatcar_production_image.vmlinuz" \
--output "${DATA_DIR}/flatcar_production_update.gz" \
--private_keys "${KEYS_DIR}/devel.key.pem+pkcs11:object=CoreOS_Update_Signing_Key;type=private" \
--public_keys "${KEYS_DIR}/devel.pub.pem+${KEYS_DIR}/prod-2.pub.pem" \
--keys_separator "+"

View File

@ -8,7 +8,7 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -1,437 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to enter the chroot environment
SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must be run outside the chroot and as root.
assert_outside_chroot
assert_root_user
assert_kernel_version
# Define command line flags
# See http://code.google.com/p/shflags/wiki/Documentation10x
DEFINE_string chroot "$DEFAULT_CHROOT_DIR" \
"The destination dir for the chroot environment." "d"
DEFINE_string trunk "$GCLIENT_ROOT" \
"The source trunk to bind mount within the chroot." "s"
DEFINE_string build_number "" \
"The build-bot build number (when called by buildbot only)." "b"
DEFINE_string chrome_root "" \
"The root of your chrome browser source. Should contain a 'src' subdir."
DEFINE_string chrome_root_mount "/home/${SUDO_USER}/chrome_root" \
"The mount point of the chrome broswer source in the chroot."
DEFINE_string cache_dir "" "unused"
DEFINE_boolean official_build $FLAGS_FALSE \
"Set COREOS_OFFICIAL=1 for release builds."
DEFINE_boolean ssh_agent $FLAGS_TRUE "Import ssh agent."
DEFINE_boolean early_make_chroot $FLAGS_FALSE \
"Internal flag. If set, the command is run as root without sudo."
DEFINE_boolean verbose $FLAGS_FALSE "Print out actions taken"
# More useful help
FLAGS_HELP="USAGE: $0 [flags] [VAR=value] [-- command [arg1] [arg2] ...]
One or more VAR=value pairs can be specified to export variables into
the chroot environment. For example:
$0 FOO=bar BAZ=bel
If [-- command] is present, runs the command inside the chroot,
after changing directory to /${SUDO_USER}/trunk/src/scripts. Note that neither
the command nor args should include single quotes. For example:
$0 -- ./build_platform_packages.sh
Otherwise, provides an interactive shell.
"
CROS_LOG_PREFIX=cros_sdk:enter_chroot
SUDO_HOME=$(eval echo ~${SUDO_USER})
# Version of info from common.sh that only echos if --verbose is set.
debug() {
if [ $FLAGS_verbose -eq $FLAGS_TRUE ]; then
info "$*"
fi
}
# Parse command line flags
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
if [ $FLAGS_official_build -eq $FLAGS_TRUE ]; then
COREOS_OFFICIAL=1
fi
# Only now can we die on error. shflags functions leak non-zero error codes,
# so will die prematurely if 'switch_to_strict_mode' is specified before now.
# TODO: replace shflags with something less error-prone, or contribute a fix.
switch_to_strict_mode
# These config files are to be copied into chroot if they exist in home dir.
FILES_TO_COPY_TO_CHROOT=(
.gdata_cred.txt # User/password for Google Docs on chromium.org
.gdata_token # Auth token for Google Docs on chromium.org
.disable_build_stats_upload # Presence of file disables command stats upload
.netrc # May contain required source fetching credentials
.boto # Auth information for gsutil
.boto-key.p12 # Service account key for gsutil
.ssh/config # User may need this for fetching git over ssh
.ssh/known_hosts # Reuse existing known hosts
)
INNER_CHROME_ROOT=$FLAGS_chrome_root_mount # inside chroot
CHROME_ROOT_CONFIG="/var/cache/chrome_root" # inside chroot
FUSE_DEVICE="/dev/fuse"
# We can't use /var/lock because that might be a symlink to /run/lock outside
# of the chroot. Or /run on the host system might not exist.
LOCKFILE="${FLAGS_chroot}/.enter_chroot.lock"
MOUNTED_PATH=$(readlink -f "$FLAGS_chroot")
setup_mount() {
# If necessary, mount $source in the host FS at $target inside the
# chroot directory with $mount_args. We don't write to /etc/mtab because
# these mounts are all contained within an unshare and are therefore
# inaccessible to other namespaces (e.g. the host desktop system).
local source="$1"
local mount_args="-n $2"
local target="$3"
local mounted_path="${MOUNTED_PATH}$target"
case " ${MOUNT_CACHE} " in
*" ${mounted_path} "*)
# Already mounted!
;;
*)
mkdir -p "${mounted_path}"
# The args are left unquoted on purpose.
if [[ -n ${source} ]]; then
mount ${mount_args} "${source}" "${mounted_path}"
else
mount ${mount_args} "${mounted_path}"
fi
;;
esac
}
copy_into_chroot_if_exists() {
# $1 is file path outside of chroot to copy to path $2 inside chroot.
[ -e "$1" ] && cp -p "$1" "${FLAGS_chroot}/$2"
}
# Usage: promote_api_keys
# This takes care of getting the developer API keys into the chroot where
# chrome can build with them. It needs to take it from the places a dev
# is likely to put them, and recognize that older chroots may or may not
# have been used since the concept of keys got added, as well as before
# and after the developer decding to grab his own keys.
promote_api_keys() {
local destination="${FLAGS_chroot}/home/${SUDO_USER}/.googleapikeys"
# Don't disturb existing keys. They could be set differently
if [[ -s "${destination}" ]]; then
return 0
fi
if [[ -r "${SUDO_HOME}/.googleapikeys" ]]; then
cp -p "${SUDO_HOME}/.googleapikeys" "${destination}"
if [[ -s "${destination}" ]] ; then
info "Copied Google API keys into chroot."
fi
elif [[ -r "${SUDO_HOME}/.gyp/include.gypi" ]]; then
local NAME="('google_(api_key|default_client_(id|secret))')"
local WS="[[:space:]]*"
local CONTENTS="('[^\\\\']*')"
sed -nr -e "/^${WS}${NAME}${WS}[:=]${WS}${CONTENTS}.*/{s//\1: \4,/;p;}" \
"${SUDO_HOME}/.gyp/include.gypi" | user_clobber "${destination}"
if [[ -s "${destination}" ]]; then
info "Put discovered Google API keys into chroot."
fi
fi
}
generate_locales() {
# Going forward the SDK will no longer include locale-gen and instead
# glibc will just install the full locale archive, skipping this goo.
[[ -x "${FLAGS_chroot}/usr/sbin/locale-gen" ]] || return 0
# Make sure user's requested locales are available
# http://crosbug.com/19139
# And make sure en_US{,.UTF-8} are always available as
# that what buildbot forces internally
local l locales gen_locales=()
locales=$(printf '%s\n' en_US en_US.UTF-8 ${LANG} \
$LC_{ADDRESS,ALL,COLLATE,CTYPE,IDENTIFICATION,MEASUREMENT,MESSAGES} \
$LC_{MONETARY,NAME,NUMERIC,PAPER,TELEPHONE,TIME} | \
sort -u | sed '/^C$/d')
for l in ${locales}; do
if [[ ${l} == *.* ]]; then
enc=${l#*.}
else
enc="ISO-8859-1"
fi
case $(echo ${enc//-} | tr '[:upper:]' '[:lower:]') in
utf8) enc="UTF-8";;
esac
gen_locales+=("${l} ${enc}")
done
if [[ ${#gen_locales[@]} -gt 0 ]] ; then
# Force LC_ALL=C to workaround slow string parsing in bash
# with long multibyte strings. Newer setups have this fixed,
# but locale-gen doesn't need to be run in any locale in the
# first place, so just go with C to keep it fast.
chroot "${FLAGS_chroot}" /usr/bin/env \
PATH="/usr/sbin:/usr/bin:/sbin:/bin" LC_ALL=C \
locale-gen -q -u -G "$(printf '%s\n' "${gen_locales[@]}")"
fi
}
setup_env() {
(
flock 200
# Make the lockfile writable for backwards compatibility.
chown ${SUDO_UID}:${SUDO_GID} "${LOCKFILE}"
# Refresh system config files in the chroot.
for copy_file in /etc/{hosts,localtime,resolv.conf}; do
if [ -f "${copy_file}" ] ; then
rm -f "${FLAGS_chroot}${copy_file}"
install -C -m644 "${copy_file}" "${FLAGS_chroot}${copy_file}"
fi
done
fix_mtab "${FLAGS_chroot}"
debug "Mounting chroot environment."
MOUNT_CACHE=$(echo $(awk '{print $2}' /proc/mounts))
# The cros_sdk script created a new filesystem namespace but the system
# default (namely on systemd hosts) may be for everything to be shared.
# Using 'slave' means we see global changes but cannot change global state.
mount --make-rslave /
# Make sure the new root directory itself is a mount point. Tools like
# unshare assume that things like `mount --make-rprivate /` work.
setup_mount "${MOUNTED_PATH}" "--rbind" /
setup_mount none "-t proc" /proc
setup_mount none "-t sysfs" /sys
setup_mount /dev "--bind" /dev
setup_mount /dev/pts "--bind" /dev/pts
setup_mount tmpfs "-t tmpfs -o nosuid,nodev,mode=755" /run
if [[ -d /run/shm && ! -L /run/shm ]]; then
setup_mount /run/shm "--bind" /run/shm
fi
mkdir -p "${MOUNTED_PATH}/run/user/${SUDO_UID}"
chown ${SUDO_UID}:${SUDO_GID} "${MOUNTED_PATH}/run/user/${SUDO_UID}"
# Do this early as it's slow and only needs basic mounts (above).
generate_locales &
mkdir -p "${FLAGS_chroot}/${CHROOT_TRUNK_DIR}"
setup_mount "${FLAGS_trunk}" "--rbind" "${CHROOT_TRUNK_DIR}"
debug "Setting up referenced repositories if required."
REFERENCE_DIR=$(git config --file \
"${FLAGS_trunk}/.repo/manifests.git/config" \
repo.reference)
if [ -n "${REFERENCE_DIR}" ]; then
ALTERNATES="${FLAGS_trunk}/.repo/alternates"
# Ensure this directory exists ourselves, and has the correct ownership.
user_mkdir "${ALTERNATES}"
unset ALTERNATES
IFS=$'\n';
required=( $( sudo -u "${SUDO_USER}" -- \
"${FLAGS_trunk}/chromite/lib/rewrite_git_alternates.py" \
"${FLAGS_trunk}" "${REFERENCE_DIR}" "${CHROOT_TRUNK_DIR}" ) )
unset IFS
setup_mount "${FLAGS_trunk}/.repo/chroot/alternates" --bind \
"${CHROOT_TRUNK_DIR}/.repo/alternates"
# Note that as we're bringing up each referened repo, we also
# mount bind an empty directory over its alternates. This is
# required to suppress git from tracing through it- we already
# specify the required alternates for CHROOT_TRUNK_DIR, no point
# in having git try recursing through each on their own.
#
# Finally note that if you're unfamiliar w/ chroot/vfs semantics,
# the bind is visible only w/in the chroot.
user_mkdir ${FLAGS_trunk}/.repo/chroot/empty
position=1
for x in "${required[@]}"; do
base="${CHROOT_TRUNK_DIR}/.repo/chroot/external${position}"
setup_mount "${x}" "--bind" "${base}"
if [ -e "${x}/.repo/alternates" ]; then
setup_mount "${FLAGS_trunk}/.repo/chroot/empty" "--bind" \
"${base}/.repo/alternates"
fi
position=$(( ${position} + 1 ))
done
unset required position base
fi
unset REFERENCE_DIR
user_mkdir "${FLAGS_chroot}/home/${SUDO_USER}/.ssh"
if [ $FLAGS_ssh_agent -eq $FLAGS_TRUE ]; then
# Clean up previous ssh agents.
rmdir "${FLAGS_chroot}"/tmp/ssh-* 2>/dev/null
if [ -n "${SSH_AUTH_SOCK}" -a -d "${SUDO_HOME}/.ssh" ]; then
# Don't try to bind mount the ssh agent dir if it has gone stale.
ASOCK=${SSH_AUTH_SOCK%/*}
if [ -d "${ASOCK}" ]; then
setup_mount "${ASOCK}" "--bind" "${ASOCK}"
fi
fi
fi
# Mount GnuPG's data directory for signing uploads
: ${GNUPGHOME:="$SUDO_HOME/.gnupg"}
if [[ -d "${GNUPGHOME}" ]]; then
debug "Mounting GnuPG"
setup_mount "${GNUPGHOME}" "--bind" "/home/${SUDO_USER}/.gnupg"
# bind mount the gpg agent dir if available
GPG_AGENT_DIR="${GPG_AGENT_INFO%/*}"
if [[ -d "$GPG_AGENT_DIR" ]]; then
setup_mount "$GPG_AGENT_DIR" "--bind" "$GPG_AGENT_DIR"
fi
fi
unset GNUPGHOME
# Mount additional directories as specified in .local_mounts file.
local local_mounts="${FLAGS_trunk}/src/scripts/.local_mounts"
if [[ -f ${local_mounts} ]]; then
info "Mounting local folders (read-only for safety concern)"
# format: mount_source
# or mount_source mount_point
# or # comments
local mount_source mount_point
while read mount_source mount_point; do
if [[ -z ${mount_source} ]]; then
continue
fi
# if only source is assigned, use source as mount point.
: ${mount_point:=${mount_source}}
debug " mounting ${mount_source} on ${mount_point}"
setup_mount "${mount_source}" "--bind" "${mount_point}"
# --bind can't initially be read-only so we have to do it via remount.
setup_mount "" "-o remount,ro" "${mount_point}"
done < <(sed -e 's:#.*::' "${local_mounts}")
fi
CHROME_ROOT="$(readlink -f "$FLAGS_chrome_root" || :)"
if [ -z "$CHROME_ROOT" ]; then
CHROME_ROOT="$(cat "${FLAGS_chroot}${CHROME_ROOT_CONFIG}" \
2>/dev/null || :)"
CHROME_ROOT_AUTO=1
fi
if [[ -n "$CHROME_ROOT" ]]; then
if [[ ! -d "${CHROME_ROOT}/src" ]]; then
error "Not mounting chrome source"
rm -f "${FLAGS_chroot}${CHROME_ROOT_CONFIG}"
if [[ ! "$CHROME_ROOT_AUTO" ]]; then
exit 1
fi
else
debug "Mounting chrome source at: $INNER_CHROME_ROOT"
echo $CHROME_ROOT > "${FLAGS_chroot}${CHROME_ROOT_CONFIG}"
setup_mount "$CHROME_ROOT" --bind "$INNER_CHROME_ROOT"
fi
fi
# Install fuse module. Skip modprobe when possible for slight
# speed increase when initializing the env.
if [ -c "${FUSE_DEVICE}" ] && ! grep -q fuse /proc/filesystems; then
modprobe fuse 2> /dev/null ||\
warn "-- Note: modprobe fuse failed. gmergefs will not work"
fi
# Certain files get copied into the chroot when entering.
for fn in "${FILES_TO_COPY_TO_CHROOT[@]}"; do
copy_into_chroot_if_exists "${SUDO_HOME}/${fn}" "/home/${SUDO_USER}/${fn}"
done
promote_api_keys
# Fix permissions on shared memory to allow non-root users access to POSIX
# semaphores.
chmod -R 777 "${FLAGS_chroot}/dev/shm"
# Have found a few chroots where ~/.gsutil is owned by root:root, probably
# as a result of old gsutil or tools. This causes permission errors when
# gsutil cp tries to create its cache files, so ensure the user can
# actually write to their directory.
gsutil_dir="${FLAGS_chroot}/home/${SUDO_USER}/.gsutil"
if [ -d "${gsutil_dir}" ]; then
chown -R ${SUDO_UID}:${SUDO_GID} "${gsutil_dir}"
fi
# The SDK should track mantle's master branch by default.
workon_dir="${FLAGS_trunk}/.config/cros_workon"
if [ ! -e "${workon_dir}" ]; then
mkdir -p "${workon_dir}"
echo '=coreos-devel/mantle-9999' > "${workon_dir}/host"
echo '<coreos-devel/mantle-9999' > "${workon_dir}/host.mask"
chown -R ${SUDO_UID}:${SUDO_GID} "${FLAGS_trunk}/.config"
fi
) 200>>"$LOCKFILE" || die "setup_env failed"
}
setup_env
CHROOT_PASSTHRU=(
"BUILDBOT_BUILD=$FLAGS_build_number"
"CHROMEOS_RELEASE_APPID=${CHROMEOS_RELEASE_APPID:-{DEV-BUILD}}"
"EXTERNAL_TRUNK_PATH=${FLAGS_trunk}"
)
# Add the whitelisted environment variables to CHROOT_PASSTHRU.
load_environment_whitelist
for var in "${ENVIRONMENT_WHITELIST[@]}" ; do
# skip empty/unset values
[[ "${!var+set}" == "set" ]] || continue
# skip values that aren't actually exported
[[ $(declare -p "${var}") == "declare -x ${var}="* ]] || continue
CHROOT_PASSTHRU+=( "${var}=${!var}" )
done
# Set up GIT_PROXY_COMMAND so git:// URLs automatically work behind a proxy.
if [[ -n "${all_proxy}" || -n "${https_proxy}" || -n "${http_proxy}" ]]; then
CHROOT_PASSTHRU+=(
"GIT_PROXY_COMMAND=${CHROOT_TRUNK_DIR}/src/scripts/bin/proxy-gw"
)
fi
# Run command or interactive shell. Also include the non-chrooted path to
# the source trunk for scripts that may need to print it (e.g.
# build_image.sh).
cmd=( /usr/bin/env PATH="/usr/sbin:/usr/bin:/sbin:/bin" LC_ALL=C )
if [ $FLAGS_early_make_chroot -eq $FLAGS_TRUE ]; then
cmd+=( /bin/bash -l -c 'env "$@"' -- )
elif [ ! -x "${FLAGS_chroot}/usr/bin/sudo" ]; then
# Complain that sudo is missing.
error "Failing since the chroot lacks sudo."
error "Requested enter_chroot command was: $@"
exit 127
else
cmd+=( sudo -i -u "${SUDO_USER}" )
fi
cmd+=( "${CHROOT_PASSTHRU[@]}" "$@" )
exec chroot "${FLAGS_chroot}" "${cmd[@]}"

View File

@ -1,311 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script sets up a Gentoo chroot environment. The script is passed the
# path to an empty folder, which will be populated with a Gentoo stage3 and
# setup for development. Once created, the password is set to PASSWORD (below).
# One can enter the chrooted environment for work by running enter_chroot.sh.
SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
. "${SCRIPT_ROOT}/common.sh" || exit 1
ENTER_CHROOT=$(readlink -f $(dirname "$0")/enter_chroot.sh)
if [ -n "${USE}" ]; then
echo "$SCRIPT_NAME: Building with a non-empty USE: ${USE}"
echo "This modifies the expected behaviour and can fail."
fi
# Check if the host machine architecture is supported.
ARCHITECTURE="$(uname -m)"
if [[ "$ARCHITECTURE" != "x86_64" ]]; then
echo "$SCRIPT_NAME: $ARCHITECTURE is not supported as a host machine architecture."
exit 1
fi
# Script must be run outside the chroot and as root.
assert_outside_chroot
assert_root_user
assert_kernel_version
# Define command line flags.
# See http://code.google.com/p/shflags/wiki/Documentation10x
DEFINE_string chroot "$DEFAULT_CHROOT_DIR" \
"Destination dir for the chroot environment."
DEFINE_boolean usepkg $FLAGS_TRUE "Use binary packages to bootstrap."
DEFINE_boolean getbinpkg $FLAGS_TRUE \
"Download binary packages from remote repository."
DEFINE_boolean delete $FLAGS_FALSE "Delete an existing chroot."
DEFINE_boolean replace $FLAGS_FALSE "Overwrite existing chroot, if any."
DEFINE_integer jobs "${NUM_JOBS}" \
"How many packages to build in parallel at maximum."
DEFINE_string stage3_path "" \
"Use the stage3 located on this path."
DEFINE_string cache_dir "" "unused"
# Parse command line flags.
FLAGS_HELP="usage: $SCRIPT_NAME [flags]"
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
check_flags_only_and_allow_null_arg "$@" && set --
CROS_LOG_PREFIX=cros_sdk:make_chroot
SUDO_HOME=$(eval echo ~${SUDO_USER})
# Set the right umask for chroot creation.
umask 022
# Only now can we die on error. shflags functions leak non-zero error codes,
# so will die prematurely if 'switch_to_strict_mode' is specified before now.
# TODO: replace shflags with something less error-prone, or contribute a fix.
switch_to_strict_mode
ENTER_CHROOT_ARGS=(
CROS_WORKON_SRCROOT="$CHROOT_TRUNK"
PORTAGE_USERNAME="${SUDO_USER}"
)
# Invoke enter_chroot. This can only be used after sudo has been installed.
enter_chroot() {
"$ENTER_CHROOT" --chroot "$FLAGS_chroot" -- "${ENTER_CHROOT_ARGS[@]}" "$@"
}
# Invoke enter_chroot running the command as root, and w/out sudo.
# This should be used prior to sudo being merged.
early_enter_chroot() {
"$ENTER_CHROOT" --chroot "$FLAGS_chroot" --early_make_chroot \
-- "${ENTER_CHROOT_ARGS[@]}" "$@"
}
# Run a command within the chroot. The main usage of this is to avoid
# the overhead of enter_chroot, and do not need access to the source tree,
# don't need the actual chroot profile env, and can run the command as root.
bare_chroot() {
chroot "${FLAGS_chroot}" /usr/bin/env \
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
"$@"
}
cleanup() {
# Clean up mounts
safe_umount_tree "${FLAGS_chroot}"
}
delete_existing() {
# Delete old chroot dir.
if [[ ! -e "$FLAGS_chroot" ]]; then
return
fi
info "Cleaning up old mount points..."
cleanup
info "Deleting $FLAGS_chroot..."
rm -rf "$FLAGS_chroot"
info "Done."
}
init_users () {
# make sure user/group database files exist
touch "${FLAGS_chroot}/etc/"{group,gshadow,passwd,shadow}
chmod 640 "${FLAGS_chroot}/etc/"{gshadow,shadow}
# do nothing with the Flatcar system user
if [[ "${SUDO_USER}" == core ]]; then
return
fi
local baselayout="${FLAGS_chroot}/usr/share/baselayout"
local full_name=$(getent passwd "${SUDO_USER}" | cut -d: -f5)
local group_name=$(getent group "${SUDO_GID}" | cut -d: -f1)
[[ -n "${group_name}" ]] || die "Looking up gid $SUDO_GID failed."
if ! grep -q "^${group_name}:" "${baselayout}/group"; then
info "Adding group ${group_name}..."
bare_chroot groupadd -o -g "${SUDO_GID}" "${group_name}"
fi
info "Adding user ${SUDO_USER}..."
bare_chroot useradd -o -g "${SUDO_GID}" -u "${SUDO_UID}" \
-s /bin/bash -m -c "${full_name}" "${SUDO_USER}"
# copy and update other system groups the developer should be in
local group
for group in kvm portage; do
grep "^${group}:" "${baselayout}/group" >> "${FLAGS_chroot}/etc/group"
bare_chroot gpasswd -a "${SUDO_USER}" "${group}"
done
}
init_setup () {
info "Running init_setup()..."
# clean up old catalyst configs to avoid error from env-update
# TODO(marineam): remove repos.conf bit in a week or so
rm -f "${FLAGS_chroot}/etc/portage/make.conf" \
"${FLAGS_chroot}/etc/portage/repos.conf/coreos.conf"
# Set up sudoers. Inside the chroot, the user can sudo without a password.
# (Safe enough, since the only way into the chroot is to 'sudo chroot', so
# the user's already typed in one sudo password...)
# Setup proxied vars.
load_environment_whitelist
local extended_whitelist=(
"${ENVIRONMENT_WHITELIST[@]}"
CROS_WORKON_SRCROOT
PORTAGE_USERNAME
)
cat > "${FLAGS_chroot}/etc/sudoers.d/90_cros" <<EOF
Defaults env_keep += "${extended_whitelist[*]}"
${SUDO_USER} ALL=NOPASSWD: ALL
EOF
chmod 0440 "${FLAGS_chroot}/etc/sudoers.d/90_cros"
# Add chromite/bin into the path globally
# We rely on 'env-update' getting called below.
target="${FLAGS_chroot}/etc/env.d/99flatcar"
cat <<EOF > "${target}"
PATH=${CHROOT_TRUNK_DIR}/chromite/bin
ROOTPATH=${CHROOT_TRUNK_DIR}/chromite/bin
CROS_WORKON_SRCROOT="${CHROOT_TRUNK_DIR}"
PORTAGE_USERNAME=${SUDO_USER}
EOF
early_enter_chroot env-update
# Add chromite into python path.
for python_path in "${FLAGS_chroot}/usr/lib/"python2.*; do
sudo mkdir -p "${python_path}"
sudo ln -s "${CHROOT_TRUNK_DIR}"/chromite "${python_path}"
done
# Create ~/trunk symlink, it must point to CHROOT_TRUNK_DIR
ln -sfT "${CHROOT_TRUNK_DIR}" "$FLAGS_chroot/home/${SUDO_USER}/trunk"
# Automatically change to scripts directory.
echo 'cd ${CHROOT_CWD:-~/trunk/src/scripts}' \
| user_append "$FLAGS_chroot/home/${SUDO_USER}/.bash_profile"
# Enable bash completion for build scripts.
echo ". ~/trunk/src/scripts/bash_completion" \
| user_append "$FLAGS_chroot/home/${SUDO_USER}/.bashrc"
if [[ -f ${SUDO_HOME}/.gitconfig ]]; then
# Copy .gitconfig into chroot so repo and git can be used from inside.
# This is required for repo to work since it validates the email address.
echo "Copying ~/.gitconfig into chroot"
user_cp "${SUDO_HOME}/.gitconfig" "$FLAGS_chroot/home/${SUDO_USER}/"
fi
# If the user didn't set up their username in their gitconfig, look
# at the default git settings for the user.
if ! git config -f "${SUDO_HOME}/.gitconfig" user.email >& /dev/null; then
ident=$(cd /; sudo -u ${SUDO_USER} -- git var GIT_COMMITTER_IDENT || :)
ident_name=${ident%% <*}
ident_email=${ident%%>*}; ident_email=${ident_email##*<}
gitconfig=${FLAGS_chroot}/home/${SUDO_USER}/.gitconfig
git config -f ${gitconfig} --replace-all user.name "${ident_name}" || :
git config -f ${gitconfig} --replace-all user.email "${ident_email}" || :
chown ${SUDO_UID}:${SUDO_GID} ${FLAGS_chroot}/home/${SUDO_USER}/.gitconfig
fi
if [[ -f ${SUDO_HOME}/.cros_chroot_init ]]; then
sudo -u ${SUDO_USER} -- /bin/bash "${SUDO_HOME}/.cros_chroot_init" \
"${FLAGS_chroot}"
fi
}
# Handle deleting an existing environment.
if [[ $FLAGS_delete -eq $FLAGS_TRUE || \
$FLAGS_replace -eq $FLAGS_TRUE ]]; then
delete_existing
[[ $FLAGS_delete -eq $FLAGS_TRUE ]] && exit 0
fi
CHROOT_TRUNK="${CHROOT_TRUNK_DIR}"
PORTAGE_STABLE_OVERLAY="/usr/local/portage/stable"
CROSSDEV_OVERLAY="/usr/local/portage/crossdev"
CHROOT_OVERLAY="/usr/local/portage/coreos"
CHROOT_STATE="${FLAGS_chroot}/etc/debian_chroot"
# Pass proxy variables into the environment.
for type in http ftp all; do
value=$(env | grep ${type}_proxy || true)
if [ -n "${value}" ]; then
CHROOT_PASSTHRU+=("$value")
fi
done
if [ ! -f "${FLAGS_stage3_path}" ]; then
error "Invalid stage3!"
exit 1;
fi
STAGE3="${FLAGS_stage3_path}"
# Create the destination directory.
mkdir -p "$FLAGS_chroot"
echo
if [ -f $CHROOT_STATE ]
then
info "STAGE3 already set up. Skipping..."
else
info "Unpacking STAGE3..."
case ${STAGE3} in
*.tbz2|*.tar.bz2) DECOMPRESS=$(type -p lbzip2 || echo bzip2) ;;
*.tar.xz) DECOMPRESS="xz" ;;
*) die "Unknown tarball compression: ${STAGE3}";;
esac
${DECOMPRESS} -dc "${STAGE3}" | \
tar -xp -C "${FLAGS_chroot}"
rm -f "$FLAGS_chroot/etc/"make.{globals,conf.user}
# Set up users, if needed, before mkdir/mounts below.
init_users
# Run all the init stuff to setup the env.
init_setup
fi
# Add file to indicate that it is a chroot.
echo STAGE3=$STAGE3 > $CHROOT_STATE
# Update chroot.
UPDATE_ARGS=()
if [[ ${FLAGS_usepkg} -eq ${FLAGS_TRUE} ]]; then
UPDATE_ARGS+=( --usepkg )
if [[ ${FLAGS_getbinpkg} -eq ${FLAGS_TRUE} ]]; then
UPDATE_ARGS+=( --getbinpkg )
else
UPDATE_ARGS+=( --nogetbinpkg )
fi
else
UPDATE_ARGS+=( --nousepkg )
fi
if [[ "${FLAGS_jobs}" -ne -1 ]]; then
UPDATE_ARGS+=( --jobs=${FLAGS_jobs} )
fi
enter_chroot "${CHROOT_TRUNK_DIR}/src/scripts/update_chroot" "${UPDATE_ARGS[@]}"
CHROOT_EXAMPLE_OPT=""
if [[ "$FLAGS_chroot" != "$DEFAULT_CHROOT_DIR" ]]; then
CHROOT_EXAMPLE_OPT="--chroot=$FLAGS_chroot"
fi
command_completed
cat <<EOF
${CROS_LOG_PREFIX:-cros_sdk}: All set up. To enter the chroot, run:
$ cros_sdk --enter $CHROOT_EXAMPLE_OPT
CAUTION: Do *NOT* rm -rf the chroot directory; if there are stale bind
mounts you may end up deleting your source tree too. To unmount and
delete the chroot cleanly, use:
$ cros_sdk --delete $CHROOT_EXAMPLE_OPT
EOF
warn_if_nfs "${SUDO_HOME}"

View File

@ -8,7 +8,7 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
. "${SCRIPT_ROOT}/common.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

View File

@ -8,7 +8,7 @@
. "${BUILD_LIBRARY_DIR}/toolchain_util.sh" || exit 1
# Script must run inside the chroot
restart_in_chroot_if_needed "$@"
assert_inside_chroot
assert_not_root_user

9
signing/prod-2.pub.pem Normal file
View File

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7pO21yN+b6yx9P+wHzS2
clxGs18eWxfoleETLbFVmcXu783rgBP0bFjYfqrNZNaI1Ip6bxEYkPacg0xFg7ri
lNdO/cxJV5Ltj40wFlpmzJOAH8hx5SF8KWg2NV1I6TS8pp+CQqcvvOKu6AIcWfeY
11V7eJ8rWcDsnqpTg8T1VRxytsg2UjTMfQwzcGLTb8cQ8AV39ED5WC5NdS9Bld4h
XqS9Dx6Pe3JOQLZze6XIIwWuB2jxGpM1GWfRNm5nxvne3l7ggC970482a7STGK10
fD8//k8myVxleMAeQoMRXoRq9p3C84H4Bw8v2dX13kFFCgfEQj6SOZ5huXZKLPpB
LwIDAQAB
-----END PUBLIC KEY-----

62
signing/sign.sh Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -ex
if [[ $# -lt 2 ]]; then
echo "Usage: $0 DATA_DIR SIGS_DIR [SERVER_ADDR [SERVER_PORT]]"
exit 1
fi
DATA_DIR="$(readlink -f "$1")"
KEYS_DIR="$(readlink -f "$(dirname "$0")")"
SIGS_DIR="$(readlink -f "$2")"
SERVER_ADDR="${3:-10.7.16.138}"
SERVER_PORT="${4:-50051}"
echo "=== Verifying update payload... ==="
gpg2 --verify "${DATA_DIR}/coreos_production_update.bin.bz2.sig"
gpg2 --verify "${DATA_DIR}/coreos_production_image.vmlinuz.sig"
gpg2 --verify "${DATA_DIR}/coreos_production_update.zip.sig"
echo "=== Decompressing update payload... ==="
bunzip2 --keep "${DATA_DIR}/coreos_production_update.bin.bz2"
unzip "${DATA_DIR}/coreos_production_update.zip" -d "${DATA_DIR}"
payload_signature_files=""
for i in ${SIGS_DIR}/update.sig.*; do
payload_signature_files=${payload_signature_files}:${i}
done
payload_signature_files="${payload_signature_files:1:${#payload_signature_files}}"
pushd "${DATA_DIR}"
./core_sign_update \
--image "${DATA_DIR}/coreos_production_update.bin" \
--kernel "${DATA_DIR}/coreos_production_image.vmlinuz" \
--output "${DATA_DIR}/coreos_production_update.gz" \
--private_keys "${KEYS_DIR}/devel.key.pem+fero:coreos-update-prod" \
--public_keys "${KEYS_DIR}/devel.pub.pem+${KEYS_DIR}/prod-2.pub.pem" \
--keys_separator "+" \
--signing_server_address "$SERVER_ADDR" \
--signing_server_port "$SERVER_PORT" \
--user_signatures "${payload_signature_files}"
popd
echo "=== Signing torcx manifest... ==="
torcx_signature_arg=""
for torcx_signature in ${SIGS_DIR}/torcx_manifest.json.sig.*; do
torcx_signature_arg="${torcx_signature_arg} --signature ${torcx_signature}"
done
torcx_signature_arg="${torcx_signature_arg:1:${#torcx_signature_arg}}"
fero-client \
--address $SERVER_ADDR \
--port $SERVER_PORT \
sign \
--file "${DATA_DIR}/torcx_manifest.json" \
--output "${DATA_DIR}/torcx_manifest.json.sig-fero" \
--secret-key coreos-torcx \
${torcx_signature_arg}
gpg2 --enarmor \
--output "${DATA_DIR}/torcx_manifest.json.asc" \
"${DATA_DIR}/torcx_manifest.json.sig-fero"
echo "=== Torcx manifest signed successfully. ==="
rm -f "${DATA_DIR}/torcx_manifest.json.sig-fero"

View File

@ -93,23 +93,22 @@ upload() {
fi
}
roll() {
ready() {
local channel="$1"
local version="$2"
local board="$3"
# Only ramp rollouts on AMD64; ARM64 is too small
if [[ "$board" = "amd64-usr" ]]; then
updateservicectl \
--server="https://public.update.core-os.net" \
--user="${ROLLER_USERNAME}" \
--key="${ROLLER_API_KEY}" \
group update \
--app-id="${APPID[${board}]}" \
--group-id="${channel}" \
--update-count=3 \
--update-interval=60
fi
# setting the percent will deactivate (not delete) any existing rollouts for
# this specific group.
echo "Rollout set to 0% for ${board}"
updateservicectl \
--server="https://public.update.core-os.net" \
--user="${ROLLER_USERNAME}" \
--key="${ROLLER_API_KEY}" \
group percent \
--app-id="${APPID[${board}]}" \
--group-id="${channel}" \
--update-percent=0
# FIXME(bgilbert): We set --publish=true because there's no way to
# say --publish=unchanged
@ -124,9 +123,44 @@ roll() {
--version="${version}"
}
roll() {
local channel="$1"
local hours="$2"
local board="$3"
local seconds=$((${hours} * 3600))
# Only ramp rollouts on AMD64; ARM64 is too small
if [[ "$board" = "arm64-usr" ]]; then
echo "Setting rollout for arm64-usr to 100%"
updateservicectl \
--server="https://public.update.core-os.net" \
--user="${ROLLER_USERNAME}" \
--key="${ROLLER_API_KEY}" \
group percent \
--app-id="${APPID[${board}]}" \
--group-id="${channel}" \
--update-percent=100
else
# creating a new rollout deletes any existing rollout for this group and
# automatically activates the new one.
echo "Creating linear rollout for ${board} that will get to 100% in ${hours}h"
updateservicectl \
--server="https://public.update.core-os.net" \
--user="${ROLLER_USERNAME}" \
--key="${ROLLER_API_KEY}" \
rollout create linear \
--app-id="${APPID[${board}]}" \
--group-id="${channel}" \
--duration="${seconds}" \
--frame-size="60"
fi
}
usage() {
echo "Usage: $0 {download|upload} <ARTIFACT-DIR> [{-a|-b|-s} <VERSION>]..." >&2
echo "Usage: $0 roll [{-a|-b|-s} <VERSION>]..." >&2
echo "Usage: $0 ready [{-a|-b|-s} <VERSION>]..." >&2
echo "Usage: $0 roll [{-a|-b|-s} <HOURS-TO-100-PERCENT>]..." >&2
exit 1
}
@ -136,7 +170,7 @@ shift ||:
case "${CMD}" in
download)
;;
upload|roll)
upload|ready|roll)
if [[ -e "${HOME}/.config/roller.conf" ]]; then
. "${HOME}/.config/roller.conf"
fi
@ -170,7 +204,7 @@ esac
# Walk argument pairs.
while [[ $# > 0 ]]; do
c="$1"
v="${2?Must provide a version (e.g. 1234.0.0)}"
v="${2?Must provide an argument for each channel (see usage)}"
shift 2
case "${c}" in

View File

@ -10,7 +10,7 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
# Script must be run inside the chroot if not in 'always serve' mode.
if [[ "$1" != "--archive_dir" ]]; then
restart_in_chroot_if_needed "$@"
assert_inside_chroot
fi
# This is the location of the script now.

View File

@ -227,9 +227,8 @@ if [[ "${FLAGS_skip_toolchain_update}" -eq "${FLAGS_FALSE}" && \
fi
# Build cros_workon packages when they are changed.
CHROMITE_BIN="${GCLIENT_ROOT}/chromite/bin"
if [ "${FLAGS_workon}" -eq "${FLAGS_TRUE}" ]; then
for pkg in $("${CHROMITE_BIN}/cros_list_modified_packages" --host); do
for pkg in $(cros_workon list --host); do
EMERGE_FLAGS+=( "--reinstall-atoms=${pkg}" "--usepkg-exclude=${pkg}" )
done
fi
@ -241,9 +240,6 @@ sudo -E ${EMERGE_CMD} "${EMERGE_FLAGS[@]}" \
coreos-devel/sdk-depends world
info "Removing obsolete packages"
# XXX: Remove these next two lines after stable > 1632.
cats=( '<=sys-devel' "${BOARD_CHOSTS[@]/#/<=cross-}" )
sudo -E emerge --quiet --unmerge "${cats[@]/%//binutils-2.29.0}" "${cats[@]/%//gcc-7.2.0}" 2>/dev/null || :
sudo -E ${EMERGE_CMD} --quiet --depclean @unavailable
if portageq list_preserved_libs / >/dev/null; then