From 51484a96e9867957507f7bfecd31fabc8ce2e190 Mon Sep 17 00:00:00 2001 From: Hung-Te Lin Date: Sat, 13 Nov 2010 02:43:14 +0800 Subject: [PATCH] crosutils: refine memento image / factory package creation This CL improves creation of memento / factory image payload by: - verbose progress report - allowing to compress by pigz, the parallel version of gzip - prevents unpacking entire image if partition tools (cgpt/parted) is available. BUG=chromium-os:6536,chromium-os:5208 TEST=Verified executing "time ./make_factory_package.sh ..." for ToT factory bundle: - before this CL (memento gzip param is not -9): 3m53.126s - after this CL, without pigz, with cgpt: 2m34.897s - after this CL, with pigz+cgpt, memento_gz=-9: 0m45.603s - after this CL, with pigz, without cgpt/parted, memento_gz=-9: 1m49.748s Also verified such bundle can be installed on a real netbook device. Change-Id: Ie182844ea5482d6d321b9549fa584377edf7dfe3 Review URL: http://codereview.chromium.org/4824003 --- image_common.sh | 106 ++++++++++++++++++++++++++++++++++++++++ make_factory_package.sh | 79 ++++++++++++++++++++++++------ mk_memento_images.sh | 98 +++++++++++++++++++++++++------------ 3 files changed, 237 insertions(+), 46 deletions(-) create mode 100644 image_common.sh diff --git a/image_common.sh b/image_common.sh new file mode 100644 index 0000000000..0ad9d0bbcd --- /dev/null +++ b/image_common.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +# Copyright (c) 2009 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This script contains common utility function to deal with disk images, +# especially for being redistributed into platforms without complete Chromium OS +# developing environment. + +# Check if given command is available in current system +has_command() { + type "$1" >/dev/null 2>&1 +} + +err_die() { + echo "ERROR: $@" >&2 + exit 1 +} + +# Finds the best gzip compressor and invoke it. +gzip_compress() { + if has_command pigz; then + # echo " ** Using parallel gzip **" >&2 + # Tested with -b 32, 64, 128(default), 256, 1024, 16384, and -b 32 (max + # window size of Deflate) seems to be the best in output size. + pigz -b 32 "$@" + else + gzip "$@" + fi +} + +# Finds if current system has tools for part_* commands +has_part_tools() { + has_command cgpt || has_command parted +} + +# Finds the best partition tool and print partition offset +part_offset() { + local file="$1" + local partno="$2" + + if has_command cgpt; then + cgpt show -b -i "$partno" "$file" + elif has_command parted; then + parted -m "$file" unit s print | + grep "^$partno:" | cut -d ':' -f 2 | sed 's/s$//' + else + exit 1 + fi +} + +# Finds the best partition tool and print partition size +part_size() { + local file="$1" + local partno="$2" + + if has_command cgpt; then + cgpt show -s -i "$partno" "$file" + elif has_command parted; then + parted -m "$file" unit s print | + grep "^$partno:" | cut -d ':' -f 4 | sed 's/s$//' + else + exit 1 + fi +} + +# Dumps a file by given offset and size (in sectors) +dump_partial_file() { + local file="$1" + local offset="$2" + local sectors="$3" + local bs=512 + + # Try to use larger buffer if offset/size can be re-aligned. + # 2M / 512 = 4096 + local buffer_ratio=4096 + if [ $((offset % buffer_ratio)) -eq 0 -a \ + $((sectors % buffer_ratio)) -eq 0 ]; then + offset=$((offset / buffer_ratio)) + sectors=$((sectors / buffer_ratio)) + bs=$((bs * buffer_ratio)) + fi + + if has_command pv; then + dd if="$file" bs=$bs skip="$offset" count="$sectors" \ + oflag=sync status=noxfer 2>/dev/null | + pv -ptreb -B 4m -s $((sectors * $bs)) + else + dd if="$file" bs=$bs skip="$offset" count="$sectors" \ + oflag=sync status=noxfer 2>/dev/null + fi +} + +# Dumps a specific partition from given image file +dump_partition() { + local file="$1" + local part_num="$2" + local offset="$(part_offset "$file" "$part_num")" || + err_die "failed to dump partition #$part_num from: $file" + local size="$(part_size "$file" "$part_num")" || + err_die "failed to dump partition #$part_num from: $file" + + dump_partial_file "$file" "$offset" "$size" +} + diff --git a/make_factory_package.sh b/make_factory_package.sh index 9a44f71dd2..a251980a12 100755 --- a/make_factory_package.sh +++ b/make_factory_package.sh @@ -18,6 +18,9 @@ # Load functions and constants for chromeos-install . "$(dirname "$0")/chromeos-common.sh" +# Load functions designed for image processing +. "$(dirname "$0")/image_common.sh" + get_default_board # Flags @@ -95,6 +98,50 @@ prepare_dir() { rm -rf state.gz } +compress_and_hash_memento_image() { + local input_file="$1" + + if has_part_tools; then + sudo "${SCRIPTS_DIR}/mk_memento_images.sh" "$input_file" 2 3 | + grep hash | + awk '{print $4}' + else + sudo "${SCRIPTS_DIR}/mk_memento_images.sh" part_2 part_3 | + grep hash | + awk '{print $4}' + fi +} + +compress_and_hash_file() { + local input_file="$1" + local output_file="$2" + + if [ -z "$input_file" ]; then + # Runs as a pipe processor + gzip_compress -c -9 | + tee "$output_file" | + openssl sha1 -binary | + openssl base64 + else + gzip_compress -c -9 "$input_file" | + tee "$output_file" | + openssl sha1 -binary | + openssl base64 + fi +} + +compress_and_hash_partition() { + local input_file="$1" + local part_num="$2" + local output_file="$3" + + if has_part_tools; then + dump_partition "$input_file" "$part_num" | + compress_and_hash_file "" "$output_file" + else + compress_and_hash_file "part_$part_num" "$output_file" + fi +} # Clean up stale config and data files. prepare_omaha @@ -108,21 +155,25 @@ echo "Output omaha config to ${OMAHA_DIR}/miniomaha.conf" prepare_dir -sudo ./unpack_partitions.sh ${RELEASE_IMAGE} &> /dev/null -release_hash=`sudo ${SCRIPTS_DIR}/mk_memento_images.sh part_2 part_3 \ - | grep hash | awk '{print $4}'` +if ! has_part_tools; then + #TODO(hungte) we can still avoid running unpack_partitions.sh + # by $(cat unpack_partitions.sh | grep Label | sed "s/#//" | grep ${name}" | + # awk '{ print $1}') to fetch offset/size. + echo "Unpacking image ${RELEASE_IMAGE} ..." >&2 + sudo ./unpack_partitions.sh "${RELEASE_IMAGE}" 2>/dev/null +fi + +release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")" sudo chmod a+rw update.gz mv update.gz rootfs-release.gz mv rootfs-release.gz ${OMAHA_DATA_DIR} echo "release: ${release_hash}" -cat part_8 | gzip -9 > oem.gz -oem_hash=`cat oem.gz | openssl sha1 -binary | openssl base64` +oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")" mv oem.gz ${OMAHA_DATA_DIR} echo "oem: ${oem_hash}" -cat part_12 | gzip -9 > efi.gz -efi_hash=`cat efi.gz | openssl sha1 -binary | openssl base64` +efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")" mv efi.gz ${OMAHA_DATA_DIR} echo "efi: ${efi_hash}" @@ -132,17 +183,18 @@ popd > /dev/null pushd ${FACTORY_DIR} > /dev/null prepare_dir +if ! has_part_tools; then + echo "Unpacking image ${FACTORY_IMAGE} ..." >&2 + sudo ./unpack_partitions.sh "${FACTORY_IMAGE}" 2>/dev/null +fi -sudo ./unpack_partitions.sh ${FACTORY_IMAGE} &> /dev/null -test_hash=`sudo ${SCRIPTS_DIR}//mk_memento_images.sh part_2 part_3 \ - | grep hash | awk '{print $4}'` +test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")" sudo chmod a+rw update.gz mv update.gz rootfs-test.gz mv rootfs-test.gz ${OMAHA_DATA_DIR} echo "test: ${test_hash}" -cat part_1 | gzip -9 > state.gz -state_hash=`cat state.gz | openssl sha1 -binary | openssl base64` +state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")" mv state.gz ${OMAHA_DATA_DIR} echo "state: ${state_hash}" @@ -155,8 +207,7 @@ if [ ! -z ${FLAGS_firmware_updater} ] ; then exit 1 fi - cat $SHELLBALL | gzip -9 > firmware.gz - firmware_hash=`cat firmware.gz | openssl sha1 -binary | openssl base64` + firmware_hash="$(compress_and_hash_file "$SHELLBALL" "firmware.gz")" mv firmware.gz ${OMAHA_DATA_DIR} echo "firmware: ${firmware_hash}" fi diff --git a/mk_memento_images.sh b/mk_memento_images.sh index de921bb579..b5d786135c 100755 --- a/mk_memento_images.sh +++ b/mk_memento_images.sh @@ -10,25 +10,64 @@ set -e +LIB_IMAGE_COMMON="$(dirname "$0")/image_common.sh" +if ! . "$LIB_IMAGE_COMMON"; then + echo "Missing required library: $LIB_IMAGE_COMMON. Cannot continue." + exit 1 +fi + if [ -z "$2" -o -z "$1" ]; then echo "usage: $0 path/to/kernel_partition_img path/to/rootfs_partition_img" + echo " or $0 path/to/chromiumos_img kern_part_no rootfs_part_no" exit 1 fi if [ "$CROS_GENERATE_UPDATE_PAYLOAD_CALLED" != "1" ]; then echo "WARNING:" - echo "This script should only be called from cros_generate_update_payload" - echo "Please run that script with --help to see how to use it." + echo " This script should only be called from cros_generate_update_payload" + echo " Please run that script with --help to see how to use it." +fi + +if ! has_command pigz; then + (echo "WARNING:" + echo " Your system does not have pigz (parallel gzip) installed." + echo " COMPRESSING WILL BE VERY SLOW. It is recommended to install pigz" + if has_command apt-get; then + echo " by 'sudo apt-get install pigz'." + elif has_command emerge; then + echo " by 'sudo emerge pigz'." + fi) >&2 fi if [ $(whoami) = "root" ]; then echo "running $0 as root which is unneccessary" fi -KPART="$1" -ROOT_PART="$2" - -KPART_SIZE=$(stat -c%s "$KPART") +# Determine the offset size, and file name of parameters +if [ -z "$3" ]; then + # kernnel_img rootfs_img + KPART="$1" + ROOT_PART="$2" + KPART_SIZE=$(stat -c%s "$KPART") + ROOT_PART_SIZE=$(stat -c%s "$ROOT_PART") + KPART_OFFSET=0 + KPART_SECTORS=$((KPART_SIZE / 512)) + ROOT_OFFSET=0 + ROOT_SECTORS=$((ROOT_PART_SIZE / 512)) +else + # chromiumos_img kern_part_no rootfs_part_no + KPART="$1" + ROOT_PART="$1" + KPART_OFFSET="$(part_offset "$KPART" "$2")" || + err_die "cannot retieve kernel partition offset" + KPART_SECTORS="$(part_size "$KPART" "$2")" || + err_die "cannot retieve kernel partition size" + ROOT_OFFSET="$(part_offset "$ROOT_PART" "$3")" || + err_die "cannot retieve root partition offset" + ROOT_SECTORS="$(part_size "$ROOT_PART" "$3")" || + err_die "cannot retieve root partition size" + KPART_SIZE=$((KPART_SECTORS * 512)) +fi # Sanity check size. if [ "$KPART_SIZE" -gt $((16 * 1024 * 1024)) ]; then @@ -38,34 +77,31 @@ if [ "$KPART_SIZE" -gt $((16 * 1024 * 1024)) ]; then fi FINAL_OUT_FILE=$(dirname "$1")/update.gz -UNCOMPRESSED_OUT_FILE="$FINAL_OUT_FILE.uncompressed" -# First, write size of kernel partition in big endian as uint64 to out file -# printf converts it to a number like 00000000003d0900. sed converts it to: -# \\x00\\x00\\x00\\x00\\x00\\x3d\\x09\\x00, then xargs converts it to binary -# with echo. -printf %016x "$KPART_SIZE" | \ - sed 's/\([0-9a-f][0-9a-f]\)/\\\\x\1/g' | \ - xargs echo -ne > "$UNCOMPRESSED_OUT_FILE" +# Update payload format: +# [kernel_size: big-endian uint64][kernel_blob][rootfs_blob] -# Next, write kernel partition to the out file -cat "$KPART" >> "$UNCOMPRESSED_OUT_FILE" +# Prepare kernel_size by using printf as a number like 00000000003d0900, then +# sed to convert as: \x00\x00\x00\x00\x00\x3d\x09\x00, finally echo -e to +# convert into binary. +KPART_SIZE_SIGNATURE="$(printf "%016x" "$KPART_SIZE" | + sed 's/\([0-9a-f][0-9a-f]\)/\\x\1/g')" -# Sanity check size of output file now -if [ $(stat -c%s "$UNCOMPRESSED_OUT_FILE") -ne $((8 + $KPART_SIZE)) ]; then - echo "Kernel partition changed size during image generation. Aborting." - exit 1 -fi +# Build the blob! +CS_AND_RET_CODES="$( + (echo -en "$KPART_SIZE_SIGNATURE" + echo "Compressing kernel..." >&2 + dump_partial_file "$KPART" "$KPART_OFFSET" "$KPART_SECTORS" + echo "Compressing rootfs..." >&2 + dump_partial_file "$ROOT_PART" "$ROOT_OFFSET" "$ROOT_SECTORS") | + gzip_compress -9 -c | + tee "$FINAL_OUT_FILE" | + openssl sha1 -binary | + openssl base64 | + tr '\n' ' ' + echo ${PIPESTATUS[*]})" -# Put rootfs into the out file -cat "$ROOT_PART" >> "$UNCOMPRESSED_OUT_FILE" - -# compress and hash -CS_AND_RET_CODES=$(gzip -c "$UNCOMPRESSED_OUT_FILE" | \ - tee "$FINAL_OUT_FILE" | openssl sha1 -binary | \ - openssl base64 | tr '\n' ' '; \ - echo ${PIPESTATUS[*]}) -EXPECTED_RET_CODES="0 0 0 0 0" +EXPECTED_RET_CODES="0 0 0 0 0 0" set -- $CS_AND_RET_CODES CALC_CS="$1" shift @@ -75,6 +111,4 @@ if [ "$RET_CODES" != "$EXPECTED_RET_CODES" ]; then exit 1 fi -rm "$UNCOMPRESSED_OUT_FILE" - echo Success. hash is "$CALC_CS"