Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils

This commit is contained in:
Scott Zawalski 2010-11-15 09:44:13 -08:00
commit f057702603
35 changed files with 1859 additions and 565 deletions

View File

@ -37,6 +37,11 @@ CONFIG_SITE=/usr/share/config.site
function setup_ssh() {
eval $(ssh-agent) > /dev/null
# TODO(jrbarnette): This is a temporary hack, slated for removal
# before it was ever created. It's a bug, and you should fix it
# right away!
chmod 400 \
${CHROMEOS_ROOT}/src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa
ssh-add \
${CHROMEOS_ROOT}/src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa
}

View File

@ -1,229 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# A python wrapper to call autotest ebuild.
import commands, logging, optparse, os, subprocess, sys
def run(cmd):
return subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderr)
class MyOptionParser(optparse.OptionParser):
"""Override python's builtin OptionParser to accept any undefined args."""
help = False
def _process_args(self, largs, rargs, values):
# see /usr/lib64/python2.6/optparse.py line 1414-1463
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
try:
self._process_long_opt(rargs, values)
except optparse.BadOptionError:
largs.append(arg)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
try:
self._process_short_opts(rargs, values)
except optparse.BadOptionError:
largs.append(arg)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
def print_help(self, file=None):
optparse.OptionParser.print_help(self, file)
MyOptionParser.help = True
parser = MyOptionParser()
parser.allow_interspersed_args = True
DEFAULT_BOARD = os.environ.get('DEFAULT_BOARD', '')
parser.add_option('--args', dest='args', action='store',
default='',
help='The arguments to pass to the test control file.')
parser.add_option('--autox', dest='autox', action='store_true',
default=True,
help='Build autox along with autotest [default].')
parser.add_option('--noautox', dest='autox', action='store_false',
help='Don\'t build autox along with autotest.')
parser.add_option('--board', dest='board', action='store',
default=DEFAULT_BOARD,
help='The board for which you are building autotest.')
parser.add_option('--build', dest='build', action='store',
help='Only prebuild client tests, do not run tests.')
parser.add_option('--buildcheck', dest='buildcheck', action='store_true',
default=True,
help='Fail if tests fail to build [default].')
parser.add_option('--nobuildcheck', dest='buildcheck', action='store_false',
help='Ignore test build failures.')
parser.add_option('--jobs', dest='jobs', action='store', type=int,
default=-1,
help='How many packages to build in parallel at maximum.')
parser.add_option('--noprompt', dest='noprompt', action='store_true',
help='Prompt user when building all tests.')
AUTOSERV='../third_party/autotest/files/server/autoserv'
AUTOTEST_CLIENT='../third_party/autotest/files/client/bin/autotest_client'
def parse_args_and_help():
def nop(_):
pass
sys_exit = sys.exit
sys.exit = nop
options, args = parser.parse_args()
sys.exit = sys_exit
if not args and not options.build:
parser.print_help()
if MyOptionParser.help:
if options.build:
print
print 'Options inherited from autotest_client, which is used in build',
print 'only mode.'
run([AUTOTEST_CLIENT, '--help'])
else:
print
print 'Options inherited from autoserv:'
run([AUTOSERV, '--help'])
sys.exit(0)
return options, args
def assert_inside_chroot(common_sh):
status, output = commands.getstatusoutput('/bin/bash -c ". %s && '
'assert_inside_chroot"' % common_sh)
if status is not 0:
print >> sys.stderr, output
sys.exit(status)
def set_common_env(common_sh, env_var):
env_value = commands.getoutput('/bin/bash -c \'. %s && echo $%s\'' %
(common_sh, env_var))
os.environ[env_var] = env_value
def die(common_sh, msg):
output = commands.getoutput('/bin/bash -c \'. %s && die "%s"\'' %
(common_sh, msg))
print >> sys.stderr, output
sys.exit(1)
def build_autotest(options):
environ = os.environ
if options.jobs != -1:
emerge_jobs = '--jobs=%d' % options.jobs
else:
emerge_jobs = ''
# Decide on USE flags based on options
use_flag = environ.get('USE', '')
if not options.autox:
use_flag = use_flag + ' -autox'
if options.buildcheck:
use_flag = use_flag + ' buildcheck'
board_blacklist_file = ('%s/src/overlays/overlay-%s/autotest-blacklist' %
(os.environ['GCLIENT_ROOT'], options.board))
if os.path.exists(board_blacklist_file):
blacklist = [line.strip()
for line in open(board_blacklist_file).readlines()]
else:
blacklist = []
all_tests = ('compilebench,dbench,disktest,fsx,hackbench,iperf,ltp,netperf2,'
'netpipe,unixbench')
site_tests = '../third_party/autotest/files/client/site_tests'
for site_test in os.listdir(site_tests):
test_path = os.path.join(site_tests, site_test)
test_py = os.path.join(test_path, '%s.py' % site_test)
if (os.path.exists(test_path) and os.path.isdir(test_path) and
os.path.exists(test_py) and os.path.isfile(test_py) and
site_test not in blacklist):
all_tests += ',' + site_test
if 'all' == options.build.lower():
if options.noprompt is not True:
print 'You want to pre-build all client tests and it may take a long',
print 'time to finish.'
print 'Are you sure you want to continue?(N/y)',
answer = sys.stdin.readline()
if 'y' != answer[0].lower():
print 'Use --build to specify tests you like to pre-compile. '
print 'E.g.: ./autotest --build=disktest,hardware_SAT'
sys.exit(0)
test_list = all_tests
else:
test_list = options.build
environ['FEATURES'] = ('%s -buildpkg -collision-protect' %
environ.get('FEATURES', ''))
environ['TEST_LIST'] = test_list
environ['USE'] = use_flag
emerge_cmd = ['emerge-%s' % options.board,
'chromeos-base/autotest']
if emerge_jobs:
emerge_cmd.append(emerge_jobs)
return run(emerge_cmd)
def run_autoserv(options, args):
environ = os.environ
environ['AUTOSERV_TEST_ARGS'] = options.args
environ['AUTOSERV_ARGS'] = ' '.join(args)
environ['FEATURES'] = ('%s -buildpkg -digest noauto' %
environ.get('FEATURES', ''))
ebuild_cmd = [ './autotest_run.sh', '--board=%s' % options.board]
run(ebuild_cmd)
def main():
me = sys.argv[0]
common_sh = os.path.join(os.path.dirname(me), 'common.sh')
assert_inside_chroot(common_sh)
set_common_env(common_sh, 'GCLIENT_ROOT')
options, args = parse_args_and_help()
if not options.board:
die(common_sh, 'Missing --board argument.')
if options.build:
status = build_autotest(options)
if status:
die(common_sh, 'build_autotest failed.')
else:
ssh_key_file = os.path.join(os.path.dirname(me),
'mod_for_test_scripts/ssh_keys/testing_rsa')
os.chmod(ssh_key_file, 0400)
run_autoserv(options, args)
if __name__ == '__main__':
main()

View File

@ -27,7 +27,8 @@ class ParallelTestRunner(object):
"""
def __init__(self, tests, base_ssh_port=_DEFAULT_BASE_SSH_PORT, board=None,
image_path=None, order_output=False, results_dir_root=None):
image_path=None, order_output=False, results_dir_root=None,
use_emerged=False):
"""Constructs and initializes the test runner class.
Args:
@ -50,6 +51,7 @@ class ParallelTestRunner(object):
self._image_path = image_path
self._order_output = order_output
self._results_dir_root = results_dir_root
self._use_emerged = use_emerged
def _SpawnTests(self):
"""Spawns VMs and starts the test runs on them.
@ -64,10 +66,6 @@ class ParallelTestRunner(object):
"""
ssh_port = self._base_ssh_port
spawned_tests = []
# Test runs shouldn't need anything from stdin. However, it seems that
# running with stdin leaves the terminal in a bad state so redirect from
# /dev/null.
dev_null = open('/dev/null')
for test in self._tests:
args = [ os.path.join(os.path.dirname(__file__), 'cros_run_vm_test'),
'--snapshot', # The image is shared so don't modify it.
@ -79,13 +77,13 @@ class ParallelTestRunner(object):
if self._results_dir_root:
args.append('--results_dir_root=%s/%s.%d' %
(self._results_dir_root, test, ssh_port))
if self._use_emerged: args.append('--use_emerged')
Info('Running %r...' % args)
output = None
if self._order_output:
output = tempfile.NamedTemporaryFile(prefix='parallel_vm_test_')
Info('Piping output to %s.' % output.name)
proc = subprocess.Popen(args, stdin=dev_null, stdout=output,
stderr=output)
proc = subprocess.Popen(args, stdout=output, stderr=output)
test_info = { 'test': test,
'proc': proc,
'output': output }
@ -147,6 +145,8 @@ def main():
parser.add_option('--results_dir_root',
help='Root results directory. If none specified, each test '
'will store its results in a separate /tmp directory.')
parser.add_option('--use_emerged', action='store_true', default=False,
help='Force use of emerged autotest packages')
(options, args) = parser.parse_args()
if not args:
@ -155,7 +155,7 @@ def main():
runner = ParallelTestRunner(args, options.base_ssh_port, options.board,
options.image_path, options.order_output,
options.results_dir_root)
options.results_dir_root, options.use_emerged)
runner.Run()

View File

@ -15,16 +15,17 @@ MAX_RETRIES=3
get_default_board
DEFINE_string board "$DEFAULT_BOARD" \
"The board for which you built autotest."
"The board for which you built autotest." b
DEFINE_string image_path "" "Full path of the VM image"
DEFINE_string results_dir_root "" "alternate root results directory"
DEFINE_string test_case "" "Name of the test case to run"
DEFINE_boolean use_emerged ${FLAGS_FALSE} \
"Force use of emerged autotest packages"
set -e
# Parse command line.
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
# Use latest if not specified.
if [ -z "${FLAGS_image_path}" ]; then
@ -36,7 +37,25 @@ fi
[ -e "${FLAGS_image_path}" ] || die "Image ${FLAGS_image_path} does not exist."
[ -n "${FLAGS_test_case}" ] || die "You must specify a test case."
if [ -n "${FLAGS_test_case}" ]; then
warn "Use of --test_case=<test> is being deprecated. Just pass test names \
as separate command line arguments."
fi
if [ -z "${FLAGS_test_case}" ] && [ -z "${FLAGS_ARGV}" ]; then
die "You must specify a test case."
fi
USE_EMERGED=
if [[ ${FLAGS_use_emerged} -eq ${FLAGS_TRUE} ]]; then
USE_EMERGED="--use_emerged"
fi
tests=( )
[ -n "${FLAGS_test_case}" ] && tests=( "${FLAGS_test_case}" )
for test in ${FLAGS_ARGV}; do
tests=( "${tests[@]}" "$(remove_quotes "${test}")" )
done
trap stop_kvm EXIT
start_kvm "${FLAGS_image_path}"
@ -47,4 +66,5 @@ retry_until_ssh ${MAX_RETRIES}
--ssh_port=${FLAGS_ssh_port} \
--remote=127.0.0.1 \
--results_dir_root="${FLAGS_results_dir_root}" \
"${FLAGS_test_case}"
${USE_EMERGED} \
"${tests[@]}"

View File

@ -10,6 +10,10 @@
. "$(dirname $0)/../lib/cros_vm_lib.sh"
. "$(dirname "$0")/../lib/cros_vm_constants.sh"
get_default_board
DEFINE_string board "${DEFAULT_BOARD}" \
"Board for VM image (unnecessary if path given)"
DEFINE_string image_path "" "Full path of the VM image"
set -e
@ -20,7 +24,8 @@ eval set -- "${FLAGS_ARGV}"
# Use latest if not specified.
if [ -z "${FLAGS_image_path}" ]; then
LATEST_IMAGE="$(${SCRIPTS_DIR}/get_latest_image.sh)/${DEFAULT_QEMU_IMAGE}"
LATEST_IMAGE="$(${SCRIPTS_DIR}/get_latest_image.sh \
--board=${FLAGS_board})/${DEFAULT_QEMU_IMAGE}"
info "Using latest vm image ${LATEST_IMAGE}"
FLAGS_image_path=${LATEST_IMAGE}
fi

0
bootperf-bin/__init__.py Normal file
View File

213
bootperf-bin/bootperf Executable file
View File

@ -0,0 +1,213 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Wrapper to run the platform_BootPerfServer autotest, and store the
# results for later analysis by the 'showbootdata' script.
#
# NOTE: This script must be run from inside the chromeos build
# chroot environment.
#
# SCRIPT_DIR="$(cd "$(dirname $0)/.." ; pwd)"
SCRIPT_DIR=$HOME/trunk/src/scripts
. "$SCRIPT_DIR/common.sh"
DEFINE_string output_dir "" "output directory for results" o
DEFINE_boolean keep_logs "$FLAGS_FALSE" "keep autotest results" k
RUN_TEST="$SCRIPT_DIR/run_remote_tests.sh"
TEST=server/site_tests/platform_BootPerfServer/control
TMP_RESULTS="/tmp/bootperf.$(date '+%Y%j%H%M').$$"
RESULTS_KEYVAL=platform_BootPerfServer/platform_BootPerfServer/results/keyval
RESULTS_SUMMARY_FILES=(
$RESULTS_KEYVAL
platform_BootPerfServer/keyval
platform_BootPerfServer/platform_BootPerfServer/keyval
platform_BootPerfServer/platform_BootPerfServer/platform_BootPerf/keyval
platform_BootPerfServer/platform_BootPerfServer/status
platform_BootPerfServer/platform_BootPerfServer/status.log
platform_BootPerfServer/status
platform_BootPerfServer/status.log
platform_BootPerfServer/sysinfo/cmdline
platform_BootPerfServer/sysinfo/cpuinfo
platform_BootPerfServer/sysinfo/modules
platform_BootPerfServer/sysinfo/uname
platform_BootPerfServer/sysinfo/version
)
# Structure of a results directory:
# $RUNDIR.$ITER/ - directory
# $RUNDIR_LOG - file
# $RUNDIR_SUMMARY/ - directory
# $RUNDIR_ALL_RESULTS/ - optional directory
# $KEYVAL_SUMMARY/ - file
# If you add any other content under the results directory, you'll
# probably need to change extra_files(), below.
RUNDIR=run
RUNDIR_LOG=log.txt
RUNDIR_SUMMARY=summary
RUNDIR_ALL_RESULTS=logs
KEYVAL_SUMMARY=results_keyval
# Usage/help function. This function is known to the shflags library,
# and mustn't be renamed.
flags_help() {
cat <<END_USAGE >&2
usage: $(basename $0) [ <options> ] <ip-address> [ <count> ]
Options:
--output_dir <directory>
--o <directory> Specify output directory for results
--[no]keep_logs
-k Keep [don't keep] autotest log files
Summary:
Run the platform_BootPerfServer autotest, and store results in the
given destination directory. The test target is specified by
<ip-address>.
By default, the test is run once; if <count> is given, the test is
run that many times. Note that the platform_BootPerfServer test
reboots the target 10 times, so the total number of reboots will
be 10*<count>.
If the destination directory doesn't exist, it is created. If the
destination directory already holds test results, additional
results are added in without overwriting earlier results.
If no destination is specified, the current directory is used,
provided that the directory is empty, or has been previously used
as a destination directory for this command.
By default, only a summary subset of the log files created by
autotest is preserved; with --keep_logs the (potentially large)
autotest logs are preserved with the test results.
END_USAGE
return $FLAGS_TRUE
}
usage() {
if [ $# -gt 0 ]; then
error "$(basename $0): $*"
echo >&2
fi
flags_help
exit 1
}
# List any files in the current directory not created as output
# from running this script.
extra_files() {
ls | grep -v "^$RUNDIR[.]...\$" |
grep -v $KEYVAL_SUMMARY
}
# Main function to run the boot performance test. Run the boot
# performance test for the given count, putting output into the
# current directory.
#
# Arguments are <ip-address> and <count> arguments, as for the main
# command.
#
# We terminate test runs if "run_remote_tests" ever fails to produce
# the results file; generally this is the result of a serious error
# (e.g. disk full) that won't go away if we just plow on.
run_boot_test() {
local remote="$1"
local count="${2:-1}"
local iter=$(expr "$(echo $RUNDIR.???)" : '.*\(...\)')
if [ "$iter" != "???" ]; then
iter=$(echo $iter | awk '{printf "%03d\n", $1 + 1}')
else
iter=000
fi
i=0
while [ $i -lt $count ]; do
local iter_rundir=$RUNDIR.$iter
local logfile=$iter_rundir/$RUNDIR_LOG
local summary_dir=$iter_rundir/$RUNDIR_SUMMARY
local all_results_dir=$iter_rundir/$RUNDIR_ALL_RESULTS
mkdir $iter_rundir
echo "run $iter start at $(date)"
$RUN_TEST --results_dir_root="$TMP_RESULTS" \
--remote="$remote" $TEST >$logfile 2>&1
if [ ! -e "$TMP_RESULTS/$RESULTS_KEYVAL" ]; then
error "No results file; terminating test runs."
error "Check $logfile for output from the test run,"
error "and see $TMP_RESULTS for full test logs and output."
break
fi
mkdir $summary_dir
tar cf - -C $TMP_RESULTS "${RESULTS_SUMMARY_FILES[@]}" |
tar xf - -C $summary_dir
if [ $FLAGS_keep_logs -eq $FLAGS_TRUE ]; then
mv $TMP_RESULTS $all_results_dir
chmod 755 $all_results_dir
else
rm -rf $TMP_RESULTS
fi
i=$(expr $i + 1)
iter=$(echo $iter | awk '{printf "%03d\n", $1 + 1}')
done
# "run 000 start at $(date)"
echo " ... end at $(date)"
cat $RUNDIR.???/$RUNDIR_SUMMARY/$RESULTS_KEYVAL >$KEYVAL_SUMMARY
}
# Main routine - check validity of the (already parsed) command line
# options. 'cd' to the results directory, if it was specified. If
# all the arguments checks pass, hand control to run_boot_test
main() {
if [ $# -lt 1 ]; then
usage "Missing target host address"
elif [ $# -gt 2 ]; then
usage "Too many arguments"
fi
if [ -n "${FLAGS_output_dir}" ]; then
if [ ! -d "${FLAGS_output_dir}" ]; then
if ! mkdir "${FLAGS_output_dir}"; then
usage "Unable to create ${FLAGS_output_dir}"
fi
fi
cd "${FLAGS_output_dir}" ||
usage "No permissions to chdir to ${FLAGS_output_dir}"
elif [ -n "$(extra_files)" ]; then
error "No results directory specified, and current directory"
error "contains contents other than run results."
error "You can override this error by using the --output_dir option"
usage
fi
# Check the count argument.
# N.B. the test [ "$2" -eq "$2" ] tests whether "$2" is valid as a
# number; when it fails it will also report a syntax error (which
# we suppress).
if [ -n "$2" ]; then
if ! [ "$2" -eq "$2" ] 2>/dev/null || [ "$2" -le 0 ]; then
usage "<count> argument must be a positive number"
fi
fi
run_boot_test "$@"
}
# shflags defines --help implicitly; if it's used on the command
# line FLAGS will invoke flags_help, set FLAGS_help to TRUE, and
# then return false. To avoid printing help twice, we have to check
# for that case here.
if ! FLAGS "$@"; then
if [ ${FLAGS_help} -eq ${FLAGS_TRUE} ]; then
exit 0
else
usage
fi
fi
eval main "${FLAGS_ARGV}"

132
bootperf-bin/perfprinter.py Normal file
View File

@ -0,0 +1,132 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines for printing boot time performance test results."""
import fnmatch
import os
import os.path
import re
import resultset
_PERF_KEYVAL_PATTERN = re.compile("(.*){perf}=(.*)\n")
def ReadKeyvalFile(results, file_):
"""Read an autotest keyval file, and process the results.
The `file_` parameter is a file object with contents in autotest
perf keyval format:
<keyname>{perf}=<value>
Each iteration of the test is terminated with a single blank line,
including the last iteration. Each iteration's results are added
to the `results` parameter, which should be an instance of
TestResultSet.
"""
kvd = {}
for line in iter(file_):
if line == "\n":
results.AddIterationResults(kvd)
kvd = {}
continue
m = _PERF_KEYVAL_PATTERN.match(line)
if m is None:
continue
kvd[m.group(1)] = m.group(2)
_RESULTS_PATH = (
"summary/platform_BootPerfServer/platform_BootPerfServer/results/keyval")
def ReadResultsDirectory(dir_):
"""Process results from a 'bootperf' output directory.
The accumulated results are returned in a newly created
TestResultSet object.
"""
res_set = resultset.TestResultSet(dir_)
dirlist = fnmatch.filter(os.listdir(dir_), "run.???")
dirlist.sort()
for run in dirlist:
keyval_path = os.path.join(dir_, run, _RESULTS_PATH)
try:
kvf = open(keyval_path)
except IOError:
continue
ReadKeyvalFile(res_set, kvf)
res_set.FinalizeResults()
return res_set
def PrintRawData(dirlist, use_timestats, keylist):
"""Print 'bootperf' results in "raw data" format."""
for dir_ in dirlist:
if use_timestats:
keyset = ReadResultsDirectory(dir_).TimeKeySet()
else:
keyset = ReadResultsDirectory(dir_).DiskKeySet()
for i in range(0, keyset.num_iterations):
if len(dirlist) > 1:
line = "%s %3d" % (dir_, i)
else:
line = "%3d" % i
if keylist is not None:
markers = keylist
else:
markers = keyset.markers
for stat in markers:
(_, v) = keyset.PrintableStatistic(keyset.RawData(stat)[i])
line += " %5s" % str(v)
print line
def PrintStatisticsSummary(dirlist, use_timestats, keylist):
"""Print 'bootperf' results in "summary of averages" format."""
if use_timestats:
header = "%5s %3s %5s %3s %s" % (
"time", "s%", "dt", "s%", "event")
format = "%5s %2d%% %5s %2d%% %s"
else:
header = "%6s %3s %6s %3s %s" % (
"diskrd", "s%", "delta", "s%", "event")
format = "%6s %2d%% %6s %2d%% %s"
havedata = False
for dir_ in dirlist:
if use_timestats:
keyset = ReadResultsDirectory(dir_).TimeKeySet()
else:
keyset = ReadResultsDirectory(dir_).DiskKeySet()
if keylist is not None:
markers = keylist
else:
markers = keyset.markers
if havedata:
print
if len(dirlist) > 1:
print "%s" % dir_,
print "(on %d cycles):" % keyset.num_iterations
print header
prevvalue = 0
prevstat = None
for stat in markers:
(valueavg, valuedev) = keyset.Statistics(stat)
valuepct = int(100 * valuedev / valueavg + 0.5)
if prevstat:
(deltaavg, deltadev) = keyset.DeltaStatistics(prevstat, stat)
deltapct = int(100 * deltadev / deltaavg + 0.5)
else:
deltapct = valuepct
(valstring, val_printed) = keyset.PrintableStatistic(valueavg)
delta = val_printed - prevvalue
(deltastring, _) = keyset.PrintableStatistic(delta)
print format % (valstring, valuepct, "+" + deltastring, deltapct, stat)
prevvalue = val_printed
prevstat = stat
havedata = True

274
bootperf-bin/resultset.py Normal file
View File

@ -0,0 +1,274 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes and functions for managing platform_BootPerf results.
Results from the platform_BootPerf test in the ChromiumOS autotest
package are stored in as performance 'keyvals', that is, a mapping
of names to numeric values. For each iteration of the test, one
set of keyvals is recorded.
This module currently tracks two kinds of keyval results, the boot
time results, and the disk read results. These results are stored
with keyval names such as 'seconds_kernel_to_login' and
'rdbytes_kernel_to_login'. Additionally, some older versions of the
test produced keyval names such as 'sectors_read_kernel_to_login'.
These keyvals record an accumulated total measured from a fixed
time in the past (kernel startup), e.g. 'seconds_kernel_to_login'
records the total seconds from kernel startup to login screen
ready.
The boot time keyval names all start with the prefix
'seconds_kernel_to_', and record time in seconds since kernel
startup.
The disk read keyval names all start with the prefix
'rdbytes_kernel_to_', and record bytes read from the boot device
since kernel startup. The obsolete disk keyvals start with the
prefix 'sectors_read_kernel_to_' and record the same statistic
measured in 512-byte sectors.
Boot time and disk kevyal values have a consistent ordering
across iterations. For instance, if in one iteration the value of
'seconds_kernel_to_login' is greater than the value of
'seconds_kernel_to_x_started', then it will be greater in *all*
iterations. This property is a consequence of the underlying
measurement procedure; it is not enforced by this module.
"""
import math
def _ListStats(list_):
# Utility function - calculate the average and (sample) standard
# deviation of a list of numbers. Result is float, even if the
# input list is full of int's
sum_ = 0.0
sumsq = 0.0
for v in list_:
sum_ += v
sumsq += v * v
n = len(list_)
avg = sum_ / n
var = (sumsq - sum_ * avg) / (n - 1)
if var < 0.0:
var = 0.0
dev = math.sqrt(var)
return (avg, dev)
def _DoCheck(dict_):
# Utility function - check the that all keyvals occur the same
# number of times. On success, return the number of occurrences;
# on failure return None
check = map(len, dict_.values())
if not check:
return None
for i in range(1, len(check)):
if check[i] != check[i-1]:
return None
return check[0]
def _KeyDelta(dict_, key0, key1):
# Utility function - return a list of the vector difference between
# two keyvals.
return map(lambda a, b: b - a, dict_[key0], dict_[key1])
class TestResultSet(object):
"""A set of boot time and disk usage result statistics.
Objects of this class consist of two sets of result statistics:
the boot time statistics and the disk statistics.
Class TestResultsSet does not interpret or store keyval mappings
directly; iteration results are processed by attached _KeySet
objects, one for boot time (`_timekeys`), one for disk read
(`_diskkeys`). These attached _KeySet objects can be obtained
with appropriate methods; various methods on these objects will
calculate statistics on the results, and provide the raw data.
"""
def __init__(self, name):
self.name = name
self._timekeys = _TimeKeySet()
self._diskkeys = _DiskKeySet()
self._olddiskkeys = _OldDiskKeySet()
def AddIterationResults(self, runkeys):
"""Add keyval results from a single iteration.
A TestResultSet is constructed by repeatedly calling
AddRunResults(), iteration by iteration. Iteration results are
passed in as a dictionary mapping keyval attributes to values.
When all iteration results have been added, FinalizeResults()
makes the results available for analysis.
"""
self._timekeys.AddRunResults(runkeys)
self._diskkeys.AddRunResults(runkeys)
self._olddiskkeys.AddRunResults(runkeys)
def FinalizeResults(self):
"""Make results available for analysis.
A TestResultSet is constructed by repeatedly feeding it results,
iteration by iteration. Iteration results are passed in as a
dictionary mapping keyval attributes to values. When all iteration
results have been added, FinalizeResults() makes the results
available for analysis.
"""
self._timekeys.FinalizeResults()
if not self._diskkeys.FinalizeResults():
self._olddiskkeys.FinalizeResults()
self._diskkeys = self._olddiskkeys
self._olddiskkeys = None
def TimeKeySet(self):
"""Return the boot time statistics result set."""
return self._timekeys
def DiskKeySet(self):
"""Return the disk read statistics result set."""
return self._diskkeys
class _KeySet(object):
"""Container for a set of related statistics.
_KeySet is an abstract superclass for containing collections of
either boot time or disk read statistics. Statistics are stored
as a dictionary (`_keyvals`) mapping keyval names to lists of
values.
The mapped keyval names are shortened by stripping the prefix
that identifies the type of prefix (keyvals that don't start with
the proper prefix are ignored). So, for example, with boot time
keyvals, 'seconds_kernel_to_login' becomes 'login' (and
'rdbytes_kernel_to_login' is ignored).
A list of all valid keyval names is stored in the `markers`
instance variable. The list is sorted by the natural ordering of
the underlying values (see the module comments for more details).
The list of values associated with a given keyval name are indexed
in the order in which they were added. So, all values for a given
iteration are stored at the same index.
"""
def __init__(self):
self._keyvals = {}
def AddRunResults(self, runkeys):
"""Add results for one iteration."""
for key, value in runkeys.iteritems():
if not key.startswith(self.PREFIX):
continue
shortkey = key[len(self.PREFIX):]
keylist = self._keyvals.setdefault(shortkey, [])
keylist.append(self._ConvertVal(value))
def FinalizeResults(self):
"""Finalize this object's results.
This method makes available the `markers` and `num_iterations`
instance variables. It also ensures that every keyval occurred
in every iteration by requiring that all keyvals have the same
number of data points.
"""
count = _DoCheck(self._keyvals)
if count is None:
self.num_iterations = 0
self.markers = []
return False
self.num_iterations = count
keylist = map(lambda k: (self._keyvals[k][0], k),
self._keyvals.keys())
keylist.sort(key=lambda tp: tp[0])
self.markers = map(lambda tp: tp[1], keylist)
return True
def RawData(self, key):
"""Return the list of values for the given marker key."""
return self._keyvals[key]
def DeltaData(self, key0, key1):
"""Return vector difference of the values of the given keys."""
return _KeyDelta(self._keyvals, key0, key1)
def Statistics(self, key):
"""Return the average and standard deviation of the key's values."""
return _ListStats(self._keyvals[key])
def DeltaStatistics(self, key0, key1):
"""Return the average and standard deviation of the differences
between two keys.
"""
return _ListStats(self.DeltaData(key0, key1))
class _TimeKeySet(_KeySet):
"""Concrete subclass of _KeySet for boot time statistics."""
# TIME_KEY_PREFIX = 'seconds_kernel_to_'
PREFIX = 'seconds_kernel_to_'
# Time-based keyvals are reported in seconds and get converted to
# milliseconds
TIME_SCALE = 1000
def _ConvertVal(self, value):
# We use a "round to nearest int" formula here to make sure we
# don't lose anything in the conversion from decimal.
return int(self.TIME_SCALE * float(value) + 0.5)
def PrintableStatistic(self, value):
v = int(value + 0.5)
return ("%d" % v, v)
class _DiskKeySet(_KeySet):
"""Concrete subclass of _KeySet for disk read statistics."""
PREFIX = 'rdbytes_kernel_to_'
# Disk read keyvals are reported in bytes and get converted to
# MBytes (1 MByte = 1 million bytes, not 2**20)
DISK_SCALE = 1.0e-6
def _ConvertVal(self, value):
return self.DISK_SCALE * float(value)
def PrintableStatistic(self, value):
v = round(value, 1)
return ("%.1fM" % v, v)
class _OldDiskKeySet(_DiskKeySet):
"""Concrete subclass of _KeySet for the old-style disk read statistics."""
# Older versions of platform_BootPerf reported total sectors read
# using names of the form sectors_read_kernel_to_* (instead of the
# more recent rdbytes_kernel_to_*), but some of those names
# exceeded the 30-character limit in the MySQL database schema.
PREFIX = 'sectors_read_kernel_to_'
# Old sytle disk read keyvals are reported in 512-byte sectors and
# get converted to MBytes (1 MByte = 1 million bytes, not 2**20)
SECTOR_SCALE = 512 * _DiskKeySet.DISK_SCALE
def _ConvertVal(self, value):
return self.SECTOR_SCALE * float(value)

116
bootperf-bin/showbootdata Executable file
View File

@ -0,0 +1,116 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A command to display summary statistics from runs of 'bootperf'.
Command line options allow selecting from one of two sets of
performance statistics: The boot time statistics (selected by
--timestats) measure time spent from kernel startup in milliseconds.
The disk statistics (selected by --diskstats) measure total bytes
read from the boot device since kernel startup.
Boot time statistics are recorded as cumulative time (or disk read)
since kernel startup, measured when specific events occur during
boot. Events include such things as 'startup', the moment when the
upstart 'startup' job begins running, and 'login', when the
Chrome OS login screen is displayed. By default, all recorded events
are included in the output; command line options allow restricting
the view to a selected subset of events.
Separate command line options allow selecting from one of two
different display modes. When --averages is selected, the display
shows the average value and sample standard deviation (as a percent
of the average) for all selected events. The --averages display also
calculates the time (or bytes) between adjacent events, and shows
the avearage and sample standard deviation of the differences.
The --rawdata display shows the raw data value associated with each
event for each boot: Each line of output represents the event values
for one boot cycle.
"""
import sys
import optparse
import perfprinter
_usage = "%prog [options] [results-directory ...]"
_description = """\
Summarize boot time performance results. The result directory
arguments are directories previously specified as output for the
'bootperf' script.
"""
optparser = optparse.OptionParser(usage=_usage, description=_description)
optgroup = optparse.OptionGroup(
optparser, "Selecting boot time or disk statistics (choose one)")
optgroup.add_option(
"-d", "--diskstats", action="store_true",
dest="use_diskstats",
help="use statistics for bytes read since kernel startup")
optgroup.add_option(
"-t", "--timestats", action="store_true",
dest="use_timestats",
help="use statistics for time since kernel startup (default)")
optparser.add_option_group(optgroup)
optparser.set_defaults(use_diskstats=False)
optparser.set_defaults(use_timestats=False)
optgroup = optparse.OptionGroup(optparser, "Event selection")
optgroup.add_option(
"-e", "--event", action="append",
dest="eventnames",
help="restrict statistics to the comma-separated list of events")
optparser.add_option_group(optgroup)
optgroup = optparse.OptionGroup(
optparser, "Display mode selection (choose one)")
optgroup.add_option(
"-a", "--averages", action="store_true",
dest="print_averages",
help="display a summary of the averages of chosen statistics (default)")
optgroup.add_option(
"-r", "--rawdata", action="store_true",
dest="print_raw",
help="display raw data from all boot iterations")
optparser.add_option_group(optgroup)
optparser.set_defaults(print_averages=False)
optparser.set_defaults(print_raw=False)
def main(argv):
(options, args) = optparser.parse_args(argv)
if options.print_averages and options.print_raw:
print >>sys.stderr, "Can't use -a and -r together.\n"
optparser.print_help()
sys.exit(1)
elif options.print_raw:
printfunc = perfprinter.PrintRawData
else:
printfunc = perfprinter.PrintStatisticsSummary
if options.use_timestats and options.use_diskstats:
print >>sys.stderr, "Can't use -t and -d together.\n"
optparser.print_help()
sys.exit(1)
elif options.use_diskstats:
use_timestats = False
else:
use_timestats = True
if options.eventnames:
keylist = []
for kl in options.eventnames:
keylist.extend(kl.split(','))
else:
keylist = None
printfunc(args, use_timestats, keylist)
if __name__ == "__main__":
if len(sys.argv) > 1:
main(sys.argv[1:])
else:
main(["."])

View File

@ -72,8 +72,8 @@ DEFINE_string usb_disk /dev/sdb3 \
DEFINE_boolean enable_rootfs_verification ${FLAGS_TRUE} \
"Default all bootloaders to use kernel-based root fs integrity checking."
DEFINE_integer verity_error_behavior 1 \
"Kernel verified boot error behavior (0: I/O errors, 1: panic, 2: nothing) \
Default: 1"
"Kernel verified boot error behavior (0: I/O errors, 1: panic, 2: nothing, \
3: cros) Default: 1"
DEFINE_integer verity_depth 1 \
"Kernel verified boot hash tree depth. Default: 1"
DEFINE_integer verity_max_ios -1 \

View File

@ -26,7 +26,7 @@ export CHROMEOS_VERSION_MINOR=9
# Increment by 2 in trunk after making a release branch.
# Does not reset on a major/minor change (always increases).
# (Trunk is always odd; branches are always even).
export CHROMEOS_VERSION_BRANCH=111
export CHROMEOS_VERSION_BRANCH=113
# Patch number.
# Increment by 1 each release on a branch.
@ -84,8 +84,8 @@ export CHROMEOS_VERSION_STRING=\
# Set CHROME values (Used for releases) to pass to chromeos-chrome-bin ebuild
# URL to chrome archive
export CHROME_BASE=
# directory containing chrome-chromeos.zip - an svn rev or a full version
export CHROME_BUILD=
# export CHROME_VERSION from incoming value or NULL and let ebuild default
export CHROME_VERSION="$CHROME_VERSION"
# Print (and remember) version info.
echo "ChromeOS version information:"

View File

@ -52,6 +52,7 @@ def _GrabOutput(cmd):
def _GrabTags():
"""Returns list of tags from current git repository."""
# TODO(dianders): replace this with the python equivalent.
cmd = ("git for-each-ref refs/tags | awk '{print $3}' | "
"sed 's,refs/tags/,,g' | sort -t. -k3,3rn -k4,4rn")
return _GrabOutput(cmd).split()
@ -129,7 +130,20 @@ class Commit(object):
def __init__(self, commit, projectname, commit_email, commit_date, subject,
body, tracker_acc):
"""Create commit logs."""
"""Create commit logs.
Args:
commit: The commit hash (sha) from git.
projectname: The project name, from:
git config --get remote.cros.projectname
commit_email: The email address associated with the commit (%ce in git
log)
commit_date: The date of the commit, like "Mon Nov 1 17:34:14 2010 -0500"
(%cd in git log))
subject: The subject of the commit (%s in git log)
body: The body of the commit (%b in git log)
tracker_acc: A tracker_access.TrackerAccess object.
"""
self.commit = commit
self.projectname = projectname
self.commit_email = commit_email
@ -149,7 +163,12 @@ class Commit(object):
Returns:
A list of Issue objects, each of which holds info about a bug.
"""
# NOTE: most of this code is copied from bugdroid:
# <http://src.chromium.org/viewvc/chrome/trunk/tools/bugdroid/bugdroid.py?revision=59229&view=markup>
# Get a list of bugs. Handle lots of possibilities:
# - Multiple "BUG=" lines, with varying amounts of whitespace.
# - For each BUG= line, bugs can be split by commas _or_ by whitespace (!)
entries = []
for line in self.body.split('\n'):
match = re.match(r'^ *BUG *=(.*)', line)
@ -157,6 +176,13 @@ class Commit(object):
for i in match.group(1).split(','):
entries.extend(filter(None, [x.strip() for x in i.split()]))
# Try to parse the bugs. Handle lots of different formats:
# - The whole URL, from which we parse the project and bug.
# - A simple string that looks like "project:bug"
# - A string that looks like "bug", which will always refer to the previous
# tracker referenced (defaulting to the default tracker).
#
# We will create an "Issue" object for each bug.
issues = []
last_tracker = DEFAULT_TRACKER
regex = (r'http://code.google.com/p/(\S+)/issues/detail\?id=([0-9]+)'
@ -174,6 +200,7 @@ class Commit(object):
elif bug_tuple[4]:
issues.append(Issue(last_tracker, bug_tuple[4], self._tracker_acc))
# Sort the issues and return...
issues.sort()
return issues
@ -205,12 +232,12 @@ class Commit(object):
bug_str = '<font color="red">none</font>'
cols = [
cgi.escape(self.projectname),
str(self.commit_date),
commit_desc,
cgi.escape(self.commit_email),
bug_str,
cgi.escape(self.subject[:100]),
cgi.escape(self.projectname),
str(self.commit_date),
commit_desc,
cgi.escape(self.commit_email),
bug_str,
cgi.escape(self.subject[:100]),
]
return '<tr><td>%s</td></tr>' % ('</td><td>'.join(cols))
@ -221,7 +248,17 @@ class Commit(object):
def _GrabChanges(path, tag1, tag2, tracker_acc):
"""Return list of commits to path between tag1 and tag2."""
"""Return list of commits to path between tag1 and tag2.
Args:
path: One of the directories managed by repo.
tag1: The first of the two tags to pass to git log.
tag2: The second of the two tags to pass to git log.
tracker_acc: A tracker_access.TrackerAccess object.
Returns:
A list of "Commit" objects.
"""
cmd = 'cd %s && git config --get remote.cros.projectname' % path
projectname = _GrabOutput(cmd).strip()
@ -239,19 +276,24 @@ def _GrabChanges(path, tag1, tag2, tracker_acc):
def _ParseArgs():
"""Parse command-line arguments.
Returns:
An optparse.OptionParser object.
"""
parser = optparse.OptionParser()
parser.add_option(
"--sort-by-date", dest="sort_by_date", default=False,
action='store_true', help="Sort commits by date.")
'--sort-by-date', dest='sort_by_date', default=False,
action='store_true', help='Sort commits by date.')
parser.add_option(
"--tracker-user", dest="tracker_user", default=None,
help="Specify a username to login to code.google.com.")
'--tracker-user', dest='tracker_user', default=None,
help='Specify a username to login to code.google.com.')
parser.add_option(
"--tracker-pass", dest="tracker_pass", default=None,
help="Specify a password to go w/ user.")
'--tracker-pass', dest='tracker_pass', default=None,
help='Specify a password to go w/ user.')
parser.add_option(
"--tracker-passfile", dest="tracker_passfile", default=None,
help="Specify a file containing a password to go w/ user.")
'--tracker-passfile', dest='tracker_passfile', default=None,
help='Specify a file containing a password to go w/ user.')
return parser.parse_args()
@ -264,7 +306,11 @@ def main():
elif len(args) == 1:
tag2, = args
if tag2 in tags:
tag1 = tags[tags.index(tag2) + 1]
tag2_index = tags.index(tag2)
if tag2_index == len(tags) - 1:
print >>sys.stderr, 'No previous tag for %s' % tag2
sys.exit(1)
tag1 = tags[tag2_index + 1]
else:
print >>sys.stderr, 'Unrecognized tag: %s' % tag2
sys.exit(1)
@ -287,7 +333,7 @@ def main():
print >>sys.stderr, INSTRS_FOR_GDATA
sys.exit(1)
if options.tracker_passfile is not None:
options.tracker_pass = open(options.tracker_passfile, "r").read().strip()
options.tracker_pass = open(options.tracker_passfile, 'r').read().strip()
tracker_acc = tracker_access.TrackerAccess(options.tracker_user,
options.tracker_pass)
else:
@ -307,7 +353,7 @@ def main():
print '<table border="1" cellpadding="4">'
print '<tr><th>%s</th>' % ('</th><th>'.join(cols))
if options.sort_by_date:
changes.sort(key=operator.attrgetter("commit_date"))
changes.sort(key=operator.attrgetter('commit_date'))
else:
changes.sort()
for change in changes:

View File

@ -104,7 +104,9 @@ DEFAULT_CHROOT_DIR=${CHROMEOS_CHROOT_DIR:-"$GCLIENT_ROOT/chroot"}
DEFAULT_BUILD_ROOT=${CHROMEOS_BUILD_ROOT:-"$SRC_ROOT/build"}
# Set up a global ALL_BOARDS value
ALL_BOARDS=$(cd $SRC_ROOT/overlays;ls -1d overlay-* 2>&-|sed 's,overlay-,,g')
if [ -d $SRC_ROOT/overlays ]; then
ALL_BOARDS=$(cd $SRC_ROOT/overlays;ls -1d overlay-* 2>&-|sed 's,overlay-,,g')
fi
# Strip CR
ALL_BOARDS=$(echo $ALL_BOARDS)
# Set a default BOARD
@ -129,6 +131,7 @@ fi
CHROOT_TRUNK_DIR="/home/$USER/trunk"
# Install make for portage ebuilds. Used by build_image and gmergefs.
# TODO: Is /usr/local/autotest-chrome still used by anyone?
DEFAULT_INSTALL_MASK="/usr/include /usr/man /usr/share/man /usr/share/doc \
/usr/share/gtk-doc /usr/share/gtk-2.0 /usr/lib/gtk-2.0/include \
/usr/share/info /usr/share/aclocal /usr/lib/gcc /usr/lib/pkgconfig \
@ -143,7 +146,7 @@ FACTORY_INSTALL_MASK="/opt/google/chrome /opt/google/o3d /opt/netscape \
/usr/share/ibus-pinyin /usr/share/libhangul /usr/share/locale \
/usr/share/m17n /usr/share/mime /usr/share/sounds /usr/share/tts \
/usr/share/X11 /usr/share/zoneinfo /usr/lib/debug
/usr/local/autotest /usr/local/autotest-chrome"
/usr/local/autotest /usr/local/autotest-chrome /usr/local/autotest-pkgs"
# Check to ensure not running old scripts
V_REVERSE=''

View File

@ -191,11 +191,11 @@ menuentry "local image B" {
}
menuentry "verified image A" {
linux \$grubpartA/boot/vmlinuz ${common_args} ${verity_common} i915.modeset=1 cros_efi root=/dev/dm-0 dm="DMTABLEA"
linux \$grubpartA/boot/vmlinuz ${common_args} ${verity_common} i915.modeset=1 cros_efi root=/dev/dm-0 dm=\\"DMTABLEA\\"
}
menuentry "verified image B" {
linux \$grubpartB/boot/vmlinuz ${common_args} ${verity_common} i915.modeset=1 cros_efi root=/dev/dm-0 dm="DMTABLEB"
linux \$grubpartB/boot/vmlinuz ${common_args} ${verity_common} i915.modeset=1 cros_efi root=/dev/dm-0 dm=\\"DMTABLEB\\"
}
# FIXME: usb doesn't support verified boot for now

View File

@ -0,0 +1 @@
cros_generate_stateful_update_payload.py

View File

@ -0,0 +1,95 @@
#!/usr/bin/python2.6
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is responsible for generate a stateful update payload."""
import logging
import optparse
import os
import subprocess
import tempfile
STATEFUL_FILE = 'stateful.tgz'
def GenerateStatefulPayload(image_path, output_directory, logger):
"""Generates a stateful update payload given a full path to an image.
Args:
image_path: Full path to the image.
output_directory: Path to the directory to leave the resulting output.
logger: logging instance.
"""
logger.info('Generating stateful update file.')
from_dir = os.path.dirname(image_path)
image = os.path.basename(image_path)
output_gz = os.path.join(output_directory, STATEFUL_FILE)
crosutils_dir = os.path.dirname(__file__)
# Temporary directories for this function.
rootfs_dir = tempfile.mkdtemp(suffix='rootfs', prefix='tmp')
stateful_dir = tempfile.mkdtemp(suffix='stateful', prefix='tmp')
# Mount the image to pull out the important directories.
try:
# Only need stateful partition, but this saves us having to manage our
# own loopback device.
subprocess.check_call(['%s/mount_gpt_image.sh' % crosutils_dir,
'--from=%s' % from_dir,
'--image=%s' % image,
'--read_only',
'--rootfs_mountpt=%s' % rootfs_dir,
'--stateful_mountpt=%s' % stateful_dir,
])
logger.info('Tarring up /usr/local and /var!')
subprocess.check_call(['sudo',
'tar',
'-czf',
output_gz,
'--directory=%s' % stateful_dir,
'--hard-dereference',
'--transform=s,^dev_image,dev_image_new,',
'--transform=s,^var,var_new,',
'dev_image',
'var',
])
except:
logger.error('Failed to create stateful update file')
raise
finally:
# Unmount best effort regardless.
subprocess.call(['%s/mount_gpt_image.sh' % crosutils_dir,
'--unmount',
'--rootfs_mountpt=%s' % rootfs_dir,
'--stateful_mountpt=%s' % stateful_dir,
])
# Clean up our directories.
os.rmdir(rootfs_dir)
os.rmdir(stateful_dir)
logger.info('Successfully generated %s' % output_gz)
def main():
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(os.path.basename(__file__))
parser = optparse.OptionParser()
parser.add_option('-i', '--image_path',
help='The image to generate the stateful update for.')
parser.add_option('-o', '--output_dir',
help='The path to the directory to output the update file.')
options, unused_args = parser.parse_args()
if not options.image_path:
parser.error('Missing image for stateful payload generator')
if not options.output_dir:
parser.error('Missing output directory for the payload generator')
GenerateStatefulPayload(os.path.abspath(options.image_path),
options.output_dir, logger)
if __name__ == '__main__':
main()

View File

@ -87,7 +87,7 @@ patch_kernel() {
offset=$(($offset * 512))
sudo losetup -o "$offset" "$STATE_LOOP_DEV" "$IMAGE"
STATE_MNT=$(mktemp -d /tmp/state.XXXXXX)
sudo mount "$STATE_LOOP_DEV" "$STATE_MNT"
sudo mount --read-only "$STATE_LOOP_DEV" "$STATE_MNT"
dd if="$STATE_MNT"/vmlinuz_hd.vblock of="$KERN_FILE" conv=notrunc
sudo umount "$STATE_MNT"
STATE_MNT=""

View File

@ -93,8 +93,12 @@ def _BestEBuild(ebuilds):
return winner
def _FindStableEBuilds(files):
"""Return a list of stable ebuilds from specified list of files.
def _FindUprevCandidates(files):
"""Return a list of uprev candidates from specified list of files.
Usually an uprev candidate is a the stable ebuild in a cros_workon directory.
However, if no such stable ebuild exists (someone just checked in the 9999
ebuild), this is the unstable ebuild.
Args:
files: List of files.
@ -131,7 +135,8 @@ def _FindStableEBuilds(files):
if not unstable_ebuilds:
Die('Missing 9999 ebuild in %s' % os.path.dirname(path))
if not stable_ebuilds:
Die('Missing stable ebuild in %s' % os.path.dirname(path))
Warning('Missing stable ebuild in %s' % os.path.dirname(path))
return unstable_ebuilds[0]
if stable_ebuilds:
return stable_ebuilds[0]
@ -153,7 +158,7 @@ def _BuildEBuildDictionary(overlays, all, packages):
for package_dir, dirs, files in os.walk(overlay):
# Add stable ebuilds to overlays[overlay].
paths = [os.path.join(package_dir, path) for path in files]
ebuild = _FindStableEBuilds(paths)
ebuild = _FindUprevCandidates(paths)
# If the --all option isn't used, we only want to update packages that
# are in packages.
@ -377,8 +382,8 @@ class _EBuild(object):
else:
# Has no revision so we stripped the version number instead.
ebuild_no_version = ebuild_no_rev
ebuild_no_rev = ebuild_path.rpartition('.ebuild')[0]
rev_string = "0"
ebuild_no_rev = ebuild_path.rpartition('9999.ebuild')[0] + '0.0.1'
rev_string = '0'
revision = int(rev_string)
return (ebuild_no_rev, ebuild_no_version, revision)
@ -448,8 +453,10 @@ class EBuildStableMarker(object):
_Print('Adding new stable ebuild to git')
_SimpleRunCommand('git add %s' % new_ebuild_path)
_Print('Removing old ebuild from git')
_SimpleRunCommand('git rm %s' % old_ebuild_path)
if self._ebuild.is_stable:
_Print('Removing old ebuild from git')
_SimpleRunCommand('git rm %s' % old_ebuild_path)
return True
def CommitChange(self, message):

View File

@ -125,7 +125,7 @@ class EBuildTest(mox.MoxTestBase):
def testParseEBuildPathNoRevisionNumber(self):
# Test with ebuild without revision number.
no_rev, no_version, revision = cros_mark_as_stable._EBuild._ParseEBuildPath(
'/path/test_package-0.0.1.ebuild')
'/path/test_package-9999.ebuild')
self.assertEquals(no_rev, '/path/test_package-0.0.1')
self.assertEquals(no_version, '/path/test_package')
self.assertEquals(revision, 0)
@ -139,6 +139,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
self.mox.StubOutWithMock(cros_mark_as_stable, 'RunCommand')
self.mox.StubOutWithMock(os, 'unlink')
self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable._EBuild)
self.m_ebuild.is_stable = True
self.m_ebuild.package = 'test_package'
self.m_ebuild.current_revision = 1
self.m_ebuild.ebuild_path_no_revision = '/path/test_package-0.0.1'
@ -281,13 +282,13 @@ class BuildEBuildDictionaryTest(mox.MoxTestBase):
self.package_path = self.root + '/test_package-0.0.1.ebuild'
paths = [[self.root, [], []]]
cros_mark_as_stable.os.walk("/overlay").AndReturn(paths)
self.mox.StubOutWithMock(cros_mark_as_stable, '_FindStableEBuilds')
self.mox.StubOutWithMock(cros_mark_as_stable, '_FindUprevCandidates')
def testWantedPackage(self):
overlays = {"/overlay": []}
package = _Package(self.package)
cros_mark_as_stable._FindStableEBuilds([]).AndReturn(package)
cros_mark_as_stable._FindUprevCandidates([]).AndReturn(package)
self.mox.ReplayAll()
cros_mark_as_stable._BuildEBuildDictionary(overlays, False, [self.package])
self.mox.VerifyAll()
@ -297,7 +298,7 @@ class BuildEBuildDictionaryTest(mox.MoxTestBase):
def testUnwantedPackage(self):
overlays = {"/overlay": []}
package = _Package(self.package)
cros_mark_as_stable._FindStableEBuilds([]).AndReturn(package)
cros_mark_as_stable._FindUprevCandidates([]).AndReturn(package)
self.mox.ReplayAll()
cros_mark_as_stable._BuildEBuildDictionary(overlays, False, [])
self.assertEquals(len(overlays), 1)

View File

@ -12,7 +12,7 @@
. "$(dirname $0)/common.sh"
. "$(dirname $0)/remote_access.sh"
restart_in_chroot_if_needed $*
assert_inside_chroot
MINIDUMP_DUMP=/usr/bin/minidump_dump
MINIDUMP_STACKWALK=/usr/bin/minidump_stackwalk

263
generate_au_zip.py Executable file
View File

@ -0,0 +1,263 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to generate a zip file of delta-generator and it's dependencies.
"""
import logging.handlers
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
# GLOBALS
logging_format = '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s'
date_format = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=logging_format,
datefmt=date_format)
def CreateTempDir():
"""Creates a tempdir and returns the name of the tempdir."""
temp_dir = tempfile.mkdtemp(suffix='au', prefix='tmp')
logging.info('Using tempdir = %s', temp_dir)
return temp_dir
def _SplitAndStrip(data):
"""Prunes the ldd output, and return a list of needed library names
Example of data:
linux-vdso.so.1 => (0x00007ffffc96a000)
libbz2.so.1 => /lib/libbz2.so.1 (0x00007f3ff8782000)
libc.so.6 => /lib/libc.so.6 (0x00007f3ff83ff000)
/lib64/ld-linux-x86-64.so.2 (0x00007f3ff89b3000)
Args:
data: list of libraries from ldd output
Returns:
list of libararies that we should copy
"""
return_list = []
for line in data.split('\n'):
line = re.sub('.*not a dynamic executable.*', '', line)
line = re.sub('.* =>\s+', '', line)
line = re.sub('\(0x.*\)\s?', '', line)
line = line.strip()
if not len(line):
continue
logging.debug('MATCHED line = %s', line)
return_list.append(line)
return return_list
def DepsToCopy(ldd_files, black_list):
"""Returns a list of deps for a given dynamic executables list.
Args:
ldd_files: List of dynamic files that needs to have the deps evaluated
black_list: List of files that we should ignore
Returns:
library_list: List of files that are dependencies
"""
for file_name in ldd_files:
logging.info('Running ldd on %s', file_name)
cmd = ['/usr/bin/ldd', file_name]
stdout_data = ''
stderr_data = ''
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout_data, stderr_data) = proc.communicate(input=None)
except subprocess.CalledProcessError, e:
logging.error('Command %s failed', cmd)
logging.error('error code %s', e.returncode)
logging.error('ouput %s', e.output)
raise
library_list = []
if not stdout_data:
return library_list
logging.debug('ldd for %s = stdout = %s stderr =%s', file_name,
stdout_data, stderr_data)
library_list = _SplitAndStrip(stdout_data)
return _ExcludeBlacklist(library_list, black_list)
def CopyRequiredFiles(dest_files_root):
"""Generates a list of files that are required for au-generator zip file
Args:
dest_files_root: location of the directory where we should copy the files
"""
if not dest_files_root:
logging.error('Invalid option passed for dest_files_root')
sys.exit(1)
# Files that need to go through ldd
ldd_files = ['/usr/bin/delta_generator', '/usr/bin/bsdiff',
'/usr/bin/bspatch', '/usr/bin/cgpt']
# statically linked files and scripts etc.,
static_files = ['~/trunk/src/scripts/common.sh',
'~/trunk/src/scripts/cros_generate_update_payload',
'~/trunk/src/scripts/chromeos-common.sh']
# We need directories to be copied recursively to a destination within tempdir
recurse_dirs = {'~/trunk/src/scripts/lib/shflags': 'lib/shflags'}
black_list = [
'linux-vdso.so',
'libgcc_s.so',
'libgthread-2.0.so',
'libpthread.so',
'librt.so',
'libstdc',
'libgcc_s.so',
'libc.so',
'ld-linux-x86-64',
'libm.so',
'libdl.so',
'libresolv.so',
]
all_files = ldd_files + static_files
all_files = map(os.path.expanduser, all_files)
for file_name in all_files:
if not os.path.isfile(file_name):
logging.error('file = %s does not exist', file_name)
sys.exit(1)
logging.debug('Given files that need to be copied = %s' % '' .join(all_files))
all_files += DepsToCopy(ldd_files=ldd_files,black_list=black_list)
for file_name in all_files:
logging.info('Copying file %s to %s', file_name, dest_files_root)
shutil.copy2(file_name, dest_files_root)
for source_dir, target_dir in recurse_dirs.iteritems():
logging.info('Processing directory %s', source_dir)
full_path = os.path.expanduser(source_dir)
if not os.path.isdir(full_path):
logging.error("Directory given for %s expanded to %s doens't exist.",
source_dir, full_path)
sys.exit(1)
dest = os.path.join(dest_files_root, target_dir)
logging.info('Copying directory %s to %s.', full_path, target_dir)
shutil.copytree(full_path, dest)
def CleanUp(temp_dir):
"""Cleans up the tempdir
Args:
temp_dir = name of the directory to cleanup
"""
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
logging.info('Removed tempdir = %s', temp_dir)
def GenerateZipFile(base_name, root_dir):
"""Returns true if able to generate zip file
Args:
base_name: name of the zip file
root_dir: location of the directory that we should zip
Returns:
True if successfully generates the zip file otherwise False
"""
logging.info('Generating zip file %s with contents from %s', base_name,
root_dir)
current_dir = os.getcwd()
os.chdir(root_dir)
try:
subprocess.Popen(['zip', '-r', '-9', base_name, '.'],
stdout=subprocess.PIPE).communicate()[0]
except OSError, e:
logging.error('Execution failed:%s', e.strerror)
return False
finally:
os.chdir(current_dir)
return True
def _ExcludeBlacklist(library_list, black_list=[]):
"""Deletes the set of files from black_list from the library_list
Args:
library_list: List of the library names to filter through black_list
black_list: List of the black listed names to filter
Returns:
Filtered library_list
"""
return_list = []
pattern = re.compile(r'|'.join(black_list))
logging.debug('PATTERN: %s=', pattern)
for library in library_list:
if pattern.search(library):
logging.debug('BLACK-LISTED = %s=', library)
continue
return_list.append(library)
logging.debug('Returning return_list=%s=', return_list)
return return_list
def CopyZipToFinalDestination(output_dir, zip_file_name):
"""Copies the generated zip file to a final destination
Args:
output_dir: Directory where the file should be copied to
zip_file_name: name of the zip file that should be copied
Returns:
True on Success False on Failure
"""
if not os.path.isfile(zip_file_name):
logging.error("Zip file %s doesn't exist. Returning False", zip_file_name)
return False
if not os.path.isdir(output_dir):
logging.debug('Creating %s', output_dir)
os.makedirs(output_dir)
logging.info('Copying %s to %s', zip_file_name, output_dir)
shutil.copy2(zip_file_name, output_dir)
return True
def main():
"""Main function to start the script"""
parser = optparse.OptionParser()
parser.add_option( '-d', '--debug', dest='debug', action='store_true',
default=False, help='Verbose Default: False',)
parser.add_option('-o', '--output-dir', dest='output_dir',
default='/tmp/au-generator',
help='Specify the output location for copying the zipfile')
parser.add_option('-z', '--zip-name', dest='zip_name',
default='au-generator.zip', help='Name of the zip file')
parser.add_option('-k', '--keep-temp', dest='keep_temp', default=False,
action='store_true', help='Keep the temp files...',)
(options, args) = parser.parse_args()
if options.debug:
logging.setLevel(logging.DEBUG)
logging.debug('Options are %s ', options)
temp_dir = CreateTempDir()
dest_files_root = os.path.join(temp_dir, 'au-generator')
os.makedirs(dest_files_root)
CopyRequiredFiles(dest_files_root=dest_files_root)
zip_file_name = os.path.join(temp_dir, options.zip_name)
GenerateZipFile(zip_file_name, dest_files_root)
CopyZipToFinalDestination(options.output_dir, zip_file_name)
if not options.keep_temp:
CleanUp(temp_dir)
if __name__ == '__main__':
main()

View File

@ -30,6 +30,7 @@ DEFINE_boolean verify ${FLAGS_TRUE} "Verify image on device after update."
# Flags for devserver.
DEFINE_string archive_dir "" \
"Update using the test image in the image.zip in this directory." a
DEFINE_string board "" "Override the board reported by the target"
DEFINE_integer devserver_port 8080 \
"Port to use for devserver."
DEFINE_boolean for_vm ${FLAGS_FALSE} "Image is for a vm."
@ -124,10 +125,11 @@ function start_dev_server {
else
# IMAGE_PATH should be the newest image and learn the board from
# the target.
FLAGS_board=""
learn_board
IMAGE_PATH="$($(dirname "$0")/get_latest_image.sh --board="${FLAGS_board}")"
IMAGE_PATH="${IMAGE_PATH}/chromiumos_image.bin"
devserver_flags="${devserver_flags} \
--image $(reinterpret_path_for_chroot ${IMAGE_PATH})"
fi
[ ${FLAGS_for_vm} -eq ${FLAGS_TRUE} ] && \
@ -139,6 +141,7 @@ function start_dev_server {
info "Starting devserver with flags ${devserver_flags}"
./enter_chroot.sh "sudo ./start_devserver ${devserver_flags} \
--client_prefix=ChromeOSUpdateEngine \
--board=${FLAGS_board} \
--port=${FLAGS_devserver_port} > ${FLAGS_server_log} 2>&1" &
info "Waiting on devserver to start"
@ -147,6 +150,10 @@ function start_dev_server {
do
sleep 5
echo -n "."
if ! pgrep -f start_devserver > /dev/null; then
echo "Devserver failed, see dev_server.log."
exit 1
fi
done
echo ""
}

View File

@ -37,7 +37,7 @@ DEFINE_integer rootfs_partition_size 1024 \
"rootfs parition size in MBs."
DEFINE_string state_image "" \
"Stateful partition image (defaults to creating new statful partition)"
DEFINE_integer statefulfs_size -1 \
DEFINE_integer statefulfs_size 2048 \
"Stateful partition size in MBs."
DEFINE_boolean test_image "${FLAGS_FALSE}" \
"Copies normal image to chromiumos_test_image.bin, modifies it for test."
@ -285,6 +285,6 @@ fi
if [ "${FLAGS_format}" == "qemu" ]; then
echo "If you have qemu-kvm installed, you can start the image by:"
echo "sudo kvm -m ${FLAGS_mem} -vga std -pidfile /tmp/kvm.pid -net nic,model=e1000 " \
"-net user,hostfwd=tcp::922-:22 \\"
"-net user,hostfwd=tcp::9222-:22 \\"
echo " -hda ${FLAGS_to}/${DEFAULT_QEMU_IMAGE}"
fi

106
lib/cros_image_common.sh Normal file
View File

@ -0,0 +1,106 @@
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script contains common utility function to deal with disk images,
# especially for being redistributed into platforms without complete Chromium OS
# developing environment.
# Check if given command is available in current system
has_command() {
type "$1" >/dev/null 2>&1
}
err_die() {
echo "ERROR: $@" >&2
exit 1
}
# Finds the best gzip compressor and invoke it.
gzip_compress() {
if has_command pigz; then
# echo " ** Using parallel gzip **" >&2
# Tested with -b 32, 64, 128(default), 256, 1024, 16384, and -b 32 (max
# window size of Deflate) seems to be the best in output size.
pigz -b 32 "$@"
else
gzip "$@"
fi
}
# Finds if current system has tools for part_* commands
has_part_tools() {
has_command cgpt || has_command parted
}
# Finds the best partition tool and print partition offset
part_offset() {
local file="$1"
local partno="$2"
if has_command cgpt; then
cgpt show -b -i "$partno" "$file"
elif has_command parted; then
parted -m "$file" unit s print |
grep "^$partno:" | cut -d ':' -f 2 | sed 's/s$//'
else
exit 1
fi
}
# Finds the best partition tool and print partition size
part_size() {
local file="$1"
local partno="$2"
if has_command cgpt; then
cgpt show -s -i "$partno" "$file"
elif has_command parted; then
parted -m "$file" unit s print |
grep "^$partno:" | cut -d ':' -f 4 | sed 's/s$//'
else
exit 1
fi
}
# Dumps a file by given offset and size (in sectors)
dump_partial_file() {
local file="$1"
local offset="$2"
local sectors="$3"
local bs=512
# Try to use larger buffer if offset/size can be re-aligned.
# 2M / 512 = 4096
local buffer_ratio=4096
if [ $((offset % buffer_ratio)) -eq 0 -a \
$((sectors % buffer_ratio)) -eq 0 ]; then
offset=$((offset / buffer_ratio))
sectors=$((sectors / buffer_ratio))
bs=$((bs * buffer_ratio))
fi
if has_command pv; then
dd if="$file" bs=$bs skip="$offset" count="$sectors" \
oflag=sync status=noxfer 2>/dev/null |
pv -ptreb -B 4m -s $((sectors * $bs))
else
dd if="$file" bs=$bs skip="$offset" count="$sectors" \
oflag=sync status=noxfer 2>/dev/null
fi
}
# Dumps a specific partition from given image file
dump_partition() {
local file="$1"
local part_num="$2"
local offset="$(part_offset "$file" "$part_num")" ||
err_die "failed to dump partition #$part_num from: $file"
local size="$(part_size "$file" "$part_num")" ||
err_die "failed to dump partition #$part_num from: $file"
dump_partial_file "$file" "$offset" "$size"
}

View File

@ -10,6 +10,7 @@ DEFINE_boolean no_graphics ${FLAGS_FALSE} "Runs the KVM instance silently."
DEFINE_boolean persist "${FLAGS_FALSE}" "Persist vm."
DEFINE_boolean snapshot ${FLAGS_FALSE} "Don't commit changes to image."
DEFINE_integer ssh_port 9222 "Port to tunnel ssh traffic over."
DEFINE_string vnc "" "VNC Server to display to instead of SDL."
KVM_PID_FILE=/tmp/kvm.$$.pid
LIVE_VM_IMAGE=
@ -40,7 +41,10 @@ function start_kvm() {
local nographics=""
local usesnapshot=""
if [ ${FLAGS_no_graphics} -eq ${FLAGS_TRUE} ]; then
nographics="-nographic"
nographics="-nographic -serial none"
fi
if [ -n "${FLAGS_vnc}" ]; then
nographics="-vnc ${FLAGS_vnc}"
fi
if [ ${FLAGS_snapshot} -eq ${FLAGS_TRUE} ]; then

View File

@ -18,6 +18,10 @@
# Load functions and constants for chromeos-install
. "$(dirname "$0")/chromeos-common.sh"
# Load functions designed for image processing
. "$(dirname "$0")/lib/cros_image_common.sh" ||
die "Cannot load required library: lib/cros_image_common.sh; Abort."
get_default_board
# Flags
@ -95,6 +99,50 @@ prepare_dir() {
rm -rf state.gz
}
compress_and_hash_memento_image() {
local input_file="$1"
if has_part_tools; then
sudo "${SCRIPTS_DIR}/mk_memento_images.sh" "$input_file" 2 3 |
grep hash |
awk '{print $4}'
else
sudo "${SCRIPTS_DIR}/mk_memento_images.sh" part_2 part_3 |
grep hash |
awk '{print $4}'
fi
}
compress_and_hash_file() {
local input_file="$1"
local output_file="$2"
if [ -z "$input_file" ]; then
# Runs as a pipe processor
gzip_compress -c -9 |
tee "$output_file" |
openssl sha1 -binary |
openssl base64
else
gzip_compress -c -9 "$input_file" |
tee "$output_file" |
openssl sha1 -binary |
openssl base64
fi
}
compress_and_hash_partition() {
local input_file="$1"
local part_num="$2"
local output_file="$3"
if has_part_tools; then
dump_partition "$input_file" "$part_num" |
compress_and_hash_file "" "$output_file"
else
compress_and_hash_file "part_$part_num" "$output_file"
fi
}
# Clean up stale config and data files.
prepare_omaha
@ -108,21 +156,25 @@ echo "Output omaha config to ${OMAHA_DIR}/miniomaha.conf"
prepare_dir
sudo ./unpack_partitions.sh ${RELEASE_IMAGE} &> /dev/null
release_hash=`sudo ${SCRIPTS_DIR}/mk_memento_images.sh part_2 part_3 \
| grep hash | awk '{print $4}'`
if ! has_part_tools; then
#TODO(hungte) we can still avoid running unpack_partitions.sh
# by $(cat unpack_partitions.sh | grep Label | sed "s/#//" | grep ${name}" |
# awk '{ print $1}') to fetch offset/size.
echo "Unpacking image ${RELEASE_IMAGE} ..." >&2
sudo ./unpack_partitions.sh "${RELEASE_IMAGE}" 2>/dev/null
fi
release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")"
sudo chmod a+rw update.gz
mv update.gz rootfs-release.gz
mv rootfs-release.gz ${OMAHA_DATA_DIR}
echo "release: ${release_hash}"
cat part_8 | gzip -9 > oem.gz
oem_hash=`cat oem.gz | openssl sha1 -binary | openssl base64`
oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")"
mv oem.gz ${OMAHA_DATA_DIR}
echo "oem: ${oem_hash}"
cat part_12 | gzip -9 > efi.gz
efi_hash=`cat efi.gz | openssl sha1 -binary | openssl base64`
efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")"
mv efi.gz ${OMAHA_DATA_DIR}
echo "efi: ${efi_hash}"
@ -132,17 +184,18 @@ popd > /dev/null
pushd ${FACTORY_DIR} > /dev/null
prepare_dir
if ! has_part_tools; then
echo "Unpacking image ${FACTORY_IMAGE} ..." >&2
sudo ./unpack_partitions.sh "${FACTORY_IMAGE}" 2>/dev/null
fi
sudo ./unpack_partitions.sh ${FACTORY_IMAGE} &> /dev/null
test_hash=`sudo ${SCRIPTS_DIR}//mk_memento_images.sh part_2 part_3 \
| grep hash | awk '{print $4}'`
test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")"
sudo chmod a+rw update.gz
mv update.gz rootfs-test.gz
mv rootfs-test.gz ${OMAHA_DATA_DIR}
echo "test: ${test_hash}"
cat part_1 | gzip -9 > state.gz
state_hash=`cat state.gz | openssl sha1 -binary | openssl base64`
state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")"
mv state.gz ${OMAHA_DATA_DIR}
echo "state: ${state_hash}"
@ -155,8 +208,7 @@ if [ ! -z ${FLAGS_firmware_updater} ] ; then
exit 1
fi
cat $SHELLBALL | gzip -9 > firmware.gz
firmware_hash=`cat firmware.gz | openssl sha1 -binary | openssl base64`
firmware_hash="$(compress_and_hash_file "$SHELLBALL" "firmware.gz")"
mv firmware.gz ${OMAHA_DATA_DIR}
echo "firmware: ${firmware_hash}"
fi
@ -166,7 +218,7 @@ fi
if [ -n "${FLAGS_subfolder}" ] && \
[ -f "${OMAHA_DIR}"/miniomaha.conf"" ] ; then
# Remove the ']' from the last line of the file so we can add another config.
sed -i '$d' < ${OMAHA_DIR}/miniomaha.conf
sed -i '$d' ${OMAHA_DIR}/miniomaha.conf
else
echo -e "config = [" > ${OMAHA_DIR}/miniomaha.conf
fi
@ -190,7 +242,7 @@ echo -n "{
if [ ! -z "${FLAGS_firmware_updater}" ] ; then
echo -n "
'firmware_image': 'firmware.gz',
'firmware_image': '"${subfolder}"firmware.gz',
'firmware_checksum': '${firmware_hash}'," >> ${OMAHA_DIR}/miniomaha.conf
fi

View File

@ -10,25 +10,64 @@
set -e
if [ -z "$2" -o -z "$1" ]; then
# Load functions designed for image processing
if ! . "$(dirname "$0")/lib/cros_image_common.sh"; then
echo "ERROR: Cannot load required library: lib/cros_image_common.sh; Abort."
exit 1
fi
if [ -z "$2" -o -z "$1" ] || [ "${#@}" -ne 2 -a "${#@}" -ne 3 ]; then
echo "usage: $0 path/to/kernel_partition_img path/to/rootfs_partition_img"
echo " or $0 path/to/chromiumos_img kern_part_no rootfs_part_no"
exit 1
fi
if [ "$CROS_GENERATE_UPDATE_PAYLOAD_CALLED" != "1" ]; then
echo "WARNING:"
echo "This script should only be called from cros_generate_update_payload"
echo "Please run that script with --help to see how to use it."
echo " This script should only be called from cros_generate_update_payload"
echo " Please run that script with --help to see how to use it."
fi
if ! has_command pigz; then
(echo "WARNING:"
echo " Your system does not have pigz (parallel gzip) installed."
echo " COMPRESSING WILL BE VERY SLOW. It is recommended to install pigz"
if has_command apt-get; then
echo " by 'sudo apt-get install pigz'."
elif has_command emerge; then
echo " by 'sudo emerge pigz'."
fi) >&2
fi
if [ $(whoami) = "root" ]; then
echo "running $0 as root which is unneccessary"
fi
KPART="$1"
ROOT_PART="$2"
KPART_SIZE=$(stat -c%s "$KPART")
# Determine the offset size, and file name of parameters
if [ -z "$3" ]; then
# kernnel_img rootfs_img
KPART="$1"
ROOT_PART="$2"
KPART_SIZE=$(stat -c%s "$KPART")
ROOT_PART_SIZE=$(stat -c%s "$ROOT_PART")
KPART_OFFSET=0
KPART_SECTORS=$((KPART_SIZE / 512))
ROOT_OFFSET=0
ROOT_SECTORS=$((ROOT_PART_SIZE / 512))
else
# chromiumos_img kern_part_no rootfs_part_no
KPART="$1"
ROOT_PART="$1"
KPART_OFFSET="$(part_offset "$KPART" "$2")" ||
err_die "cannot retieve kernel partition offset"
KPART_SECTORS="$(part_size "$KPART" "$2")" ||
err_die "cannot retieve kernel partition size"
ROOT_OFFSET="$(part_offset "$ROOT_PART" "$3")" ||
err_die "cannot retieve root partition offset"
ROOT_SECTORS="$(part_size "$ROOT_PART" "$3")" ||
err_die "cannot retieve root partition size"
KPART_SIZE=$((KPART_SECTORS * 512))
fi
# Sanity check size.
if [ "$KPART_SIZE" -gt $((16 * 1024 * 1024)) ]; then
@ -38,34 +77,31 @@ if [ "$KPART_SIZE" -gt $((16 * 1024 * 1024)) ]; then
fi
FINAL_OUT_FILE=$(dirname "$1")/update.gz
UNCOMPRESSED_OUT_FILE="$FINAL_OUT_FILE.uncompressed"
# First, write size of kernel partition in big endian as uint64 to out file
# printf converts it to a number like 00000000003d0900. sed converts it to:
# \\x00\\x00\\x00\\x00\\x00\\x3d\\x09\\x00, then xargs converts it to binary
# with echo.
printf %016x "$KPART_SIZE" | \
sed 's/\([0-9a-f][0-9a-f]\)/\\\\x\1/g' | \
xargs echo -ne > "$UNCOMPRESSED_OUT_FILE"
# Update payload format:
# [kernel_size: big-endian uint64][kernel_blob][rootfs_blob]
# Next, write kernel partition to the out file
cat "$KPART" >> "$UNCOMPRESSED_OUT_FILE"
# Prepare kernel_size by using printf as a number like 00000000003d0900, then
# sed to convert as: \x00\x00\x00\x00\x00\x3d\x09\x00, finally echo -e to
# convert into binary.
KPART_SIZE_SIGNATURE="$(printf "%016x" "$KPART_SIZE" |
sed 's/\([0-9a-f][0-9a-f]\)/\\x\1/g')"
# Sanity check size of output file now
if [ $(stat -c%s "$UNCOMPRESSED_OUT_FILE") -ne $((8 + $KPART_SIZE)) ]; then
echo "Kernel partition changed size during image generation. Aborting."
exit 1
fi
# Build the blob!
CS_AND_RET_CODES="$(
(echo -en "$KPART_SIZE_SIGNATURE"
echo "Compressing kernel..." >&2
dump_partial_file "$KPART" "$KPART_OFFSET" "$KPART_SECTORS"
echo "Compressing rootfs..." >&2
dump_partial_file "$ROOT_PART" "$ROOT_OFFSET" "$ROOT_SECTORS") |
gzip_compress -9 -c |
tee "$FINAL_OUT_FILE" |
openssl sha1 -binary |
openssl base64 |
tr '\n' ' '
echo ${PIPESTATUS[*]})"
# Put rootfs into the out file
cat "$ROOT_PART" >> "$UNCOMPRESSED_OUT_FILE"
# compress and hash
CS_AND_RET_CODES=$(gzip -c "$UNCOMPRESSED_OUT_FILE" | \
tee "$FINAL_OUT_FILE" | openssl sha1 -binary | \
openssl base64 | tr '\n' ' '; \
echo ${PIPESTATUS[*]})
EXPECTED_RET_CODES="0 0 0 0 0"
EXPECTED_RET_CODES="0 0 0 0 0 0"
set -- $CS_AND_RET_CODES
CALC_CS="$1"
shift
@ -75,6 +111,4 @@ if [ "$RET_CODES" != "$EXPECTED_RET_CODES" ]; then
exit 1
fi
rm "$UNCOMPRESSED_OUT_FILE"
echo Success. hash is "$CALC_CS"

View File

@ -27,7 +27,7 @@ get_default_board
DEFINE_string board "$DEFAULT_BOARD" "Board for which the image was built" b
DEFINE_integer statefulfs_sectors 4096 \
"Number of sectors to use for the stateful filesystem"
"Number of sectors to use for the stateful filesystem when minimizing"
# Skips the build steps and just does the kernel swap.
DEFINE_string kernel_image "" \
"Path to a pre-built recovery kernel"
@ -42,6 +42,12 @@ DEFINE_boolean kernel_image_only $FLAGS_FALSE \
"Emit the recovery kernel image only"
DEFINE_boolean sync_keys $FLAGS_TRUE \
"Update the kernel to be installed with the vblock from stateful"
DEFINE_boolean minimize_image $FLAGS_TRUE \
"Decides if the original image is used or a minimal recovery image is \
created."
DEFINE_boolean modify_in_place $FLAGS_FALSE \
"Modifies the source image in place. This cannot be used with \
--minimize_image."
DEFINE_integer jobs -1 \
"How many packages to build in parallel at maximum." j
DEFINE_string build_root "/build" \
@ -50,6 +56,9 @@ DEFINE_string build_root "/build" \
DEFINE_string rootfs_hash "/tmp/rootfs.hash" \
"Path where the rootfs hash should be stored."
DEFINE_boolean verbose $FLAGS_FALSE \
"Log all commands to stdout." v
# Keep in sync with build_image.
DEFINE_string keys_dir "/usr/share/vboot/devkeys" \
"Directory containing the signing keys."
@ -58,6 +67,17 @@ DEFINE_string keys_dir "/usr/share/vboot/devkeys" \
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
if [ $FLAGS_verbose -eq $FLAGS_FALSE ]; then
exec 2>/dev/null
# Redirecting to stdout instead of stderr since
# we silence stderr above.
die() {
echo -e "${V_BOLD_RED}ERROR : $1${V_VIDOFF}"
exit 1
}
fi
set -x # Make debugging with -v easy.
EMERGE_CMD="emerge"
EMERGE_BOARD_CMD="emerge-${FLAGS_board}"
@ -178,10 +198,11 @@ create_recovery_kernel_image() {
local kern_tmp=$(mktemp)
local kern_hash=
dd if="$FLAGS_image" bs=512 count=$kern_size skip=$kern_offset of="$kern_tmp"
dd if="$FLAGS_image" bs=512 count=$kern_size \
skip=$kern_offset of="$kern_tmp" 1>&2
# We're going to use the real signing block.
if [ $FLAGS_sync_keys -eq $FLAGS_TRUE ]; then
dd if="$INSTALL_VBLOCK" of="$kern_tmp" conv=notrunc
dd if="$INSTALL_VBLOCK" of="$kern_tmp" conv=notrunc 1>&2
fi
local kern_hash=$(sha1sum "$kern_tmp" | cut -f1 -d' ')
rm "$kern_tmp"
@ -200,7 +221,7 @@ create_recovery_kernel_image() {
--root=${cros_root} \
--keys_dir="${FLAGS_keys_dir}" \
--nouse_dev_keys \
${verity_args}
${verity_args} 1>&2
sudo rm "$FLAGS_rootfs_hash"
sudo losetup -d "$root_dev"
trap - RETURN
@ -236,6 +257,13 @@ install_recovery_kernel() {
local kern_a_size=$(partsize "$RECOVERY_IMAGE" 2)
local kern_b_offset=$(partoffset "$RECOVERY_IMAGE" 4)
local kern_b_size=$(partsize "$RECOVERY_IMAGE" 4)
if [ $kern_b_size -eq 1 ]; then
echo "Image was created with no KERN-B partition reserved!" 1>&2
echo "Cannot proceed." 1>&2
return 1
fi
# Backup original kernel to KERN-B
dd if="$RECOVERY_IMAGE" of="$RECOVERY_IMAGE" bs=512 \
count=$kern_a_size \
@ -277,14 +305,22 @@ install_recovery_kernel() {
}
maybe_resize_stateful() {
# If we're not minimizing, then just copy and go.
if [ $FLAGS_minimize_image -eq $FLAGS_FALSE ]; then
if [ "$FLAGS_image" != "$RECOVERY_IMAGE" ]; then
cp "$FLAGS_image" "$RECOVERY_IMAGE"
fi
return 0
fi
# Rebuild the image with a 1 sector stateful partition
local err=0
local small_stateful=$(mktemp)
dd if=/dev/zero of="$small_stateful" bs=512 \
count=${FLAGS_statefulfs_sectors}
count=${FLAGS_statefulfs_sectors} 1>&2
trap "rm $small_stateful" RETURN
# Don't bother with ext3 for such a small image.
/sbin/mkfs.ext2 -F -b 4096 "$small_stateful"
/sbin/mkfs.ext2 -F -b 4096 "$small_stateful" 1>&2
# If it exists, we need to copy the vblock over to stateful
# This is the real vblock and not the recovery vblock.
@ -301,14 +337,20 @@ maybe_resize_stateful() {
# Create a recovery image of the right size
# TODO(wad) Make the developer script case create a custom GPT with
# just the kernel image and stateful.
update_partition_table "$FLAGS_image" "$small_stateful" 4096 "$RECOVERY_IMAGE"
update_partition_table "$FLAGS_image" "$small_stateful" 4096 \
"$RECOVERY_IMAGE" 1>&2
return $err
}
# main process begins here.
cleanup() {
set +e
if [ "$FLAGS_image" != "$RECOVERY_IMAGE" ]; then
rm "$RECOVERY_IMAGE"
fi
rm "$INSTALL_VBLOCK"
}
# Make sure this is really what the user wants, before nuking the device
echo "Creating recovery image ${FLAGS_to} from ${FLAGS_image} . . . "
# main process begins here.
set -e
set -u
@ -335,6 +377,15 @@ if [ $FLAGS_kernel_image_only -eq $FLAGS_TRUE -a \
die "Cannot use --kernel_image_only with --kernel_image"
fi
if [ $FLAGS_modify_in_place -eq $FLAGS_TRUE ]; then
if [ $FLAGS_minimize_image -eq $FLAGS_TRUE ]; then
die "Cannot use --modify_in_place and --minimize_image together."
fi
RECOVERY_IMAGE="${FLAGS_image}"
fi
echo "Creating recovery image from ${FLAGS_image}"
INSTALL_VBLOCK=$(get_install_vblock)
if [ -z "$INSTALL_VBLOCK" ]; then
die "Could not copy the vblock from stateful."
@ -343,10 +394,10 @@ fi
if [ -z "$FLAGS_kernel_image" ]; then
emerge_recovery_kernel
create_recovery_kernel_image
echo "Recovery kernel created at $RECOVERY_KERNEL_IMAGE"
else
RECOVERY_KERNEL_IMAGE="$FLAGS_kernel_image"
fi
echo "Kernel emitted: $RECOVERY_KERNEL_IMAGE."
if [ $FLAGS_kernel_image_only -eq $FLAGS_TRUE ]; then
echo "Kernel emitted. Stopping there."
@ -354,13 +405,16 @@ if [ $FLAGS_kernel_image_only -eq $FLAGS_TRUE ]; then
exit 0
fi
rm "$RECOVERY_IMAGE" || true # Start fresh :)
if [ $FLAGS_modify_in_place -eq $FLAGS_FALSE ]; then
rm "$RECOVERY_IMAGE" || true # Start fresh :)
fi
trap "rm \"$RECOVERY_IMAGE\" && rm \"$INSTALL_VBLOCK\"" EXIT
trap cleanup EXIT
maybe_resize_stateful # Also copies the image
maybe_resize_stateful # Also copies the image if needed.
install_recovery_kernel
echo "Recovery image created at $RECOVERY_IMAGE"
print_time_elapsed
trap - EXIT

View File

@ -139,8 +139,10 @@ if [ ${FLAGS_most_recent} -eq ${FLAGS_TRUE} ] ; then
FLAGS_from="$(./get_latest_image.sh --board="${FLAGS_board}")"
fi
# Turn path into an absolute path.
# Turn paths into absolute paths.
FLAGS_from=`eval readlink -f ${FLAGS_from}`
FLAGS_rootfs_mountpt=`eval readlink -f ${FLAGS_rootfs_mountpt}`
FLAGS_stateful_mountpt=`eval readlink -f ${FLAGS_stateful_mountpt}`
# Perform desired operation.
if [ ${FLAGS_unmount} -eq ${FLAGS_TRUE} ] ; then

View File

@ -7,7 +7,7 @@
Usage:
./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]
[emerge args] package"
[--force-remote-binary=PKGS] [emerge args] package
Basic operation:
Runs 'emerge -p --debug' to display dependencies, and stores a
@ -84,6 +84,7 @@ from _emerge.Scheduler import Scheduler
from _emerge.stdout_spinner import stdout_spinner
import portage
import portage.debug
import portage.versions
def Usage():
@ -218,7 +219,8 @@ class DepGraphGenerator(object):
"""
__slots__ = ["board", "emerge", "mandatory_source", "no_workon_deps",
"nomerge", "package_db", "rebuild", "show_output"]
"nomerge", "package_db", "rebuild", "show_output",
"force_remote_binary", "forced_remote_binary_packages"]
def __init__(self):
self.board = None
@ -229,6 +231,8 @@ class DepGraphGenerator(object):
self.package_db = {}
self.rebuild = False
self.show_output = False
self.force_remote_binary = set()
self.forced_remote_binary_packages = set()
def ParseParallelEmergeArgs(self, argv):
"""Read the parallel emerge arguments from the command-line.
@ -251,6 +255,11 @@ class DepGraphGenerator(object):
workon_str = arg.replace("--workon=", "")
package_list = shlex.split(" ".join(shlex.split(workon_str)))
self.mandatory_source.update(package_list)
elif arg.startswith("--force-remote-binary="):
force_remote_binary = arg.replace("--force-remote-binary=", "")
force_remote_binary = \
shlex.split(" ".join(shlex.split(force_remote_binary)))
self.force_remote_binary.update(force_remote_binary)
elif arg.startswith("--nomerge="):
nomerge_str = arg.replace("--nomerge=", "")
package_list = shlex.split(" ".join(shlex.split(nomerge_str)))
@ -455,12 +464,11 @@ class DepGraphGenerator(object):
forced_flags = set(pkgsettings.useforce).union(pkgsettings.usemask)
depgraph = self.emerge.depgraph
flags = depgraph._reinstall_for_flags(forced_flags, cur_use,
cur_iuse, now_use, now_iuse)
return not flags
def GenDependencyTree(self):
def GenDependencyTree(self, remote_pkgs):
"""Get dependency tree info from emerge.
TODO(): Update cros_extract_deps to also use this code.
@ -479,22 +487,39 @@ class DepGraphGenerator(object):
# --workon and the dependencies have changed.
emerge = self.emerge
emerge_opts = emerge.opts.copy()
emerge_opts.pop("--getbinpkg", None)
if "--usepkgonly" not in emerge_opts:
emerge_opts.pop("--usepkg", None)
if self.mandatory_source or self.rebuild:
# Enable --emptytree so that we get the full tree, which we need for
# dependency analysis. By default, with this option, emerge optimizes
# the graph by removing uninstall instructions from the graph. By
# specifying --tree as well, we tell emerge that it's not safe to remove
# uninstall instructions because we're planning on analyzing the output.
emerge_opts["--tree"] = True
emerge_opts["--emptytree"] = True
# Enable --emptytree so that we get the full tree, which we need for
# dependency analysis. By default, with this option, emerge optimizes
# the graph by removing uninstall instructions from the graph. By
# specifying --tree as well, we tell emerge that it's not safe to remove
# uninstall instructions because we're planning on analyzing the output.
emerge_opts["--tree"] = True
emerge_opts["--emptytree"] = True
# Tell emerge not to worry about use flags yet. We handle those inside
# parallel_emerge itself. Further, when we use the --force-remote-binary
# flag, we don't emerge to reject a package just because it has different
# use flags.
emerge_opts.pop("--newuse", None)
emerge_opts.pop("--reinstall", None)
# Create a list of packages to merge
packages = set(emerge.cmdline_packages[:])
if self.mandatory_source:
packages.update(self.mandatory_source)
if self.force_remote_binary:
forced_pkgs = {}
for pkg in remote_pkgs:
category, pkgname, _, _ = portage.catpkgsplit(pkg)
full_pkgname = "%s/%s" % (category, pkgname)
if (pkgname in self.force_remote_binary or
full_pkgname in self.force_remote_binary):
forced_pkgs.setdefault(full_pkgname, []).append(pkg)
for pkgs in forced_pkgs.values():
forced_package = portage.versions.best(pkgs)
packages.add("=%s" % forced_package)
self.forced_remote_binary_packages.add(forced_package)
# Tell emerge to be quiet. We print plenty of info ourselves so we don't
# need any extra output from portage.
@ -567,9 +592,25 @@ class DepGraphGenerator(object):
frozen_config = depgraph._frozen_config
vardb = frozen_config.trees[root]["vartree"].dbapi
pkgsettings = frozen_config.pkgsettings[root]
# It's time to start worrying about use flags, if necessary.
for flag in ("--newuse", "--reinstall"):
if flag in emerge.opts:
emerge_opts[flag] = emerge.opts[flag]
deps_info = {}
for pkg in depgraph.altlist():
if isinstance(pkg, Package):
# If we're not using --force-remote-binary, check what flags are being
# used by the real package.
if "--usepkgonly" not in emerge.opts:
try:
pkg = emerge.depgraph._pkg(pkg.cpv, "ebuild", emerge.root_config)
except portage.exception.PackageNotFound:
# This is a --force-remote-binary package.
pass
self.package_db[pkg.cpv] = pkg
# If we're not in emptytree mode, and we're going to replace a package
# that is already installed, then this operation is possibly optional.
# ("--selective" mode is handled later, in RemoveInstalledPackages())
@ -580,9 +621,6 @@ class DepGraphGenerator(object):
optional = True
break
# Add the package to our database.
self.package_db[str(pkg.cpv)] = pkg
# Save off info about the package
deps_info[str(pkg.cpv)] = {"idx": len(deps_info),
"optional": optional}
@ -611,7 +649,65 @@ class DepGraphGenerator(object):
print "%s %s (%s)" % (depth, entry, action)
self.PrintTree(deps[entry]["deps"], depth=depth + " ")
def GenDependencyGraph(self, deps_tree, deps_info):
def RemotePackageDatabase(self, binhost_url):
"""Grab the latest binary package database from the prebuilt server.
We need to know the modification times of the prebuilt packages so that we
know when it is OK to use these packages and when we should rebuild them
instead.
Args:
binhost_url: Base URL of remote packages (PORTAGE_BINHOST).
Returns:
A dict mapping package identifiers to modification times.
"""
if not binhost_url:
return {}
def retry_urlopen(url, tries=3):
"""Open the specified url, retrying if we run into network errors.
We do not retry for HTTP errors.
Args:
url: The specified url.
tries: The number of times to try.
Returns:
The result of urllib2.urlopen(url).
"""
for i in range(tries):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError as e:
raise
except urllib2.URLError as e:
if i + 1 == tries:
raise
else:
print "Cannot GET %s: %s" % (url, e)
url = os.path.join(binhost_url, "Packages")
try:
f = retry_urlopen(url)
except urllib2.HTTPError as e:
if e.code == 404:
return {}
else:
raise
prebuilt_pkgs = {}
for line in f:
if line.startswith("CPV: "):
pkg = line.replace("CPV: ", "").rstrip()
elif line.startswith("MTIME: "):
prebuilt_pkgs[pkg] = int(line[:-1].replace("MTIME: ", ""))
f.close()
return prebuilt_pkgs
def GenDependencyGraph(self, deps_tree, deps_info, remote_pkgs):
"""Generate a doubly linked dependency graph.
Args:
@ -660,6 +756,10 @@ class DepGraphGenerator(object):
# If true, indicates that this package must be installed. We don't care
# whether it's binary or source, unless the mandatory_source flag is
# also set.
# - force_remote_binary:
# If true, indicates that we want to update to the latest remote prebuilt
# of this package. Packages that depend on this package should be built
# from source.
#
deps_map = {}
@ -678,7 +778,8 @@ class DepGraphGenerator(object):
# Create an entry for the package
action = packages[pkg]["action"]
default_pkg = {"needs": {}, "provides": set(), "action": action,
"mandatory_source": False, "mandatory": False}
"mandatory_source": False, "mandatory": False,
"force_remote_binary": False}
this_pkg = deps_map.setdefault(pkg, default_pkg)
# Create entries for dependencies of this package first.
@ -909,64 +1010,6 @@ class DepGraphGenerator(object):
if this_pkg["action"] == "nomerge":
this_pkg["action"] = "merge"
def RemotePackageDatabase(binhost_url):
"""Grab the latest binary package database from the prebuilt server.
We need to know the modification times of the prebuilt packages so that we
know when it is OK to use these packages and when we should rebuild them
instead.
Args:
binhost_url: Base URL of remote packages (PORTAGE_BINHOST).
Returns:
A dict mapping package identifiers to modification times.
"""
if not binhost_url:
return {}
def retry_urlopen(url, tries=3):
"""Open the specified url, retrying if we run into network errors.
We do not retry for HTTP errors.
Args:
url: The specified url.
tries: The number of times to try.
Returns:
The result of urllib2.urlopen(url).
"""
for i in range(tries):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError as e:
raise
except urllib2.URLError as e:
if i + 1 == tries:
raise
else:
print "Cannot GET %s: %s" % (url, e)
url = binhost_url + "/Packages"
try:
f = retry_urlopen(url)
except urllib2.HTTPError as e:
if e.code == 404:
return {}
else:
raise
prebuilt_pkgs = {}
for line in f:
if line.startswith("CPV: "):
pkg = line.replace("CPV: ", "").rstrip()
elif line.startswith("MTIME: "):
prebuilt_pkgs[pkg] = int(line[:-1].replace("MTIME: ", ""))
f.close()
return prebuilt_pkgs
def LocalPackageDatabase():
"""Get the modification times of the packages in the local database.
@ -1019,7 +1062,7 @@ class DepGraphGenerator(object):
"""
if pkg in cache:
return cache[pkg]
if pkg not in pkg_db:
if pkg not in pkg_db and pkg not in self.forced_remote_binary_packages:
cache[pkg] = False
else:
cache[pkg] = True
@ -1081,7 +1124,7 @@ class DepGraphGenerator(object):
else:
MergeChildren(pkg, "mandatory_source")
def UsePrebuiltPackages():
def UsePrebuiltPackages(remote_pkgs):
"""Update packages that can use prebuilts to do so."""
start = time.time()
@ -1099,13 +1142,18 @@ class DepGraphGenerator(object):
# Build list of prebuilt packages
for pkg, info in deps_map.iteritems():
if info and not info["mandatory_source"] and info["action"] == "merge":
if info and info["action"] == "merge":
if (not info["force_remote_binary"] and info["mandatory_source"] or
"--usepkgonly" not in emerge.opts and pkg not in remote_pkgs):
continue
db_keys = list(bindb._aux_cache_keys)
try:
db_vals = bindb.aux_get(pkg, db_keys + ["MTIME"])
except KeyError:
# No binary package
continue
mtime = int(db_vals.pop() or 0)
metadata = zip(db_keys, db_vals)
db_pkg = Package(built=True, cpv=pkg, installed=False,
@ -1116,15 +1164,14 @@ class DepGraphGenerator(object):
# Calculate what packages need to be rebuilt due to changes in use flags.
for pkg, db_pkg in prebuilt_pkgs.iteritems():
db_pkg_src = self.package_db[pkg]
if not self.CheckUseFlags(pkgsettings, db_pkg, db_pkg_src):
if not self.CheckUseFlags(pkgsettings, db_pkg, self.package_db[pkg]):
MergeChildren(pkg, "mandatory_source")
# Convert eligible packages to binaries.
for pkg, info in deps_map.iteritems():
if (info and not info["mandatory_source"] and
info["action"] == "merge" and pkg in prebuilt_pkgs):
self.package_db[pkg] = prebuilt_pkgs[pkg]
if info and info["action"] == "merge" and pkg in prebuilt_pkgs:
if not info["mandatory_source"] or info["force_remote_binary"]:
self.package_db[pkg] = prebuilt_pkgs[pkg]
seconds = time.time() - start
if "--quiet" not in emerge.opts:
@ -1132,28 +1179,22 @@ class DepGraphGenerator(object):
return prebuilt_pkgs
def AddRemainingPackages():
"""Fill in packages that don't have entries in the package db.
Every package we are installing needs an entry in the package db.
This function should only be called after we have removed the
packages that are not being merged from our deps_map.
"""
for pkg in deps_map:
if pkg not in self.package_db:
if deps_map[pkg]["action"] != "merge":
# We should only fill in packages that are being merged. If
# there's any other packages here, something funny is going on.
print "Missing entry for %s in package db" % pkg
sys.exit(1)
db_pkg = emerge.depgraph._pkg(pkg, "ebuild", emerge.root_config)
self.package_db[pkg] = db_pkg
ReverseTree(deps_tree)
BuildFinalPackageSet()
AddSecretDeps()
# Mark that we want to use remote binaries only for a particular package.
vardb = emerge.depgraph._frozen_config.trees[root]["vartree"].dbapi
for pkg in self.force_remote_binary:
for db_pkg in final_db.match_pkgs(pkg):
match = deps_map.get(str(db_pkg.cpv))
if match:
match["force_remote_binary"] = True
rebuild_blacklist.add(str(db_pkg.cpv))
if not vardb.match_pkgs(db_pkg.cpv):
MergeChildren(str(db_pkg.cpv), "mandatory")
if self.no_workon_deps:
for pkg in self.mandatory_source.copy():
for db_pkg in final_db.match_pkgs(pkg):
@ -1166,10 +1207,6 @@ class DepGraphGenerator(object):
cycles = FindCycles()
if self.rebuild:
local_pkgs = LocalPackageDatabase()
remote_pkgs = {}
if "--getbinpkg" in emerge.opts:
binhost = emerge.settings["PORTAGE_BINHOST"]
remote_pkgs = RemotePackageDatabase(binhost)
AutoRebuildDeps(local_pkgs, remote_pkgs, cycles)
# We need to remove installed packages so that we can use the dependency
@ -1180,8 +1217,7 @@ class DepGraphGenerator(object):
SanitizeTree()
if deps_map:
if "--usepkg" in emerge.opts:
UsePrebuiltPackages()
AddRemainingPackages()
UsePrebuiltPackages(remote_pkgs)
return deps_map
def PrintInstallPlan(self, deps_map):
@ -1734,13 +1770,18 @@ def main():
print " Skipping package %s on %s" % (nomerge_packages,
deps.board or "root")
deps_tree, deps_info = deps.GenDependencyTree()
remote_pkgs = {}
if "--getbinpkg" in emerge.opts:
binhost = emerge.settings["PORTAGE_BINHOST"]
remote_pkgs = deps.RemotePackageDatabase(binhost)
deps_tree, deps_info = deps.GenDependencyTree(remote_pkgs)
# You want me to be verbose? I'll give you two trees! Twice as much value.
if "--tree" in emerge.opts and "--verbose" in emerge.opts:
deps.PrintTree(deps_tree)
deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
deps_graph = deps.GenDependencyGraph(deps_tree, deps_info, remote_pkgs)
# OK, time to print out our progress so far.
deps.PrintInstallPlan(deps_graph)

View File

@ -92,8 +92,8 @@ def UpdateLocalFile(filename, value, key='PORTAGE_BINHOST'):
# Strip newlines from end of line. We already add newlines below.
line = line.rstrip("\n")
if '=' not in line:
# Skip any line without an equal in it and just write it out.
if len(line.split('=')) != 2:
# Skip any line that doesn't fit key=val.
file_lines.append(line)
continue

View File

@ -17,7 +17,9 @@ class TestUpdateFile(unittest.TestCase):
self.contents_str = ['# comment that should be skipped',
'PKGDIR="/var/lib/portage/pkgs"',
'PORTAGE_BINHOST="http://no.thanks.com"',
'portage portage-20100310.tar.bz2']
'portage portage-20100310.tar.bz2',
'COMPILE_FLAGS="some_value=some_other"',
]
temp_fd, self.version_file = tempfile.mkstemp()
os.write(temp_fd, '\n'.join(self.contents_str))
os.close(temp_fd)

View File

@ -18,42 +18,20 @@ DEFINE_string args "" \
"Command line arguments for test. Quoted and space separated if multiple." a
DEFINE_string board "$DEFAULT_BOARD" \
"The board for which you are building autotest"
DEFINE_boolean build ${FLAGS_FALSE} "Build tests while running" b
DEFINE_string chroot "${DEFAULT_CHROOT_DIR}" "alternate chroot location" c
DEFINE_boolean cleanup ${FLAGS_FALSE} "Clean up temp directory"
DEFINE_integer iterations 1 "Iterations to run every top level test" i
DEFINE_string results_dir_root "" "alternate root results directory"
DEFINE_boolean verbose ${FLAGS_FALSE} "Show verbose autoserv output" v
DEFINE_boolean use_emerged ${FLAGS_FALSE} \
"Force use of emerged autotest packages"
RAN_ANY_TESTS=${FLAGS_FALSE}
# Check if our stdout is a tty
function is_a_tty() {
local stdout=$(readlink /proc/$$/fd/1)
[[ "${stdout#/dev/tty}" != "${stdout}" ]] && return 0
[[ "${stdout#/dev/pts}" != "${stdout}" ]] && return 0
return 1
}
# Writes out text in specified color if stdout is a tty
# Arguments:
# $1 - color
# $2 - text to color
# $3 - text following colored text (default colored)
# Returns:
# None
function echo_color() {
local color=0
[[ "$1" == "red" ]] && color=31
[[ "$1" == "green" ]] && color=32
[[ "$1" == "yellow" ]] && color=33
if is_a_tty; then
echo -e "\033[1;${color}m$2\033[0m$3"
else
echo "$2$3"
fi
}
function cleanup() {
# Always remove the build path in case it was used.
[[ -n "${BUILD_DIR}" ]] && sudo rm -rf "${BUILD_DIR}"
if [[ $FLAGS_cleanup -eq ${FLAGS_TRUE} ]] || \
[[ ${RAN_ANY_TESTS} -eq ${FLAGS_FALSE} ]]; then
rm -rf "${TMP}"
@ -63,27 +41,6 @@ function cleanup() {
cleanup_remote_access
}
# Adds attributes to all tests run
# Arguments:
# $1 - results directory
# $2 - attribute name (key)
# $3 - attribute value (value)
function add_test_attribute() {
local results_dir="$1"
local attribute_name="$2"
local attribute_value="$3"
if [[ -z "$attribute_value" ]]; then
return;
fi
for status_file in $(echo "${results_dir}"/*/status); do
local keyval_file=$(dirname $status_file)/keyval
echo "Updating ${keyval_file}"
echo "${attribute_name}=${attribute_value}" >> "${keyval_file}"
done
}
# Determine if a control is for a client or server test. Echos
# either "server" or "client".
# Arguments:
@ -94,17 +51,83 @@ function read_test_type() {
local type=$(egrep -m1 \
'^[[:space:]]*TEST_TYPE[[:space:]]*=' "${control_file}")
if [[ -z "${type}" ]]; then
echo_color "red" ">>> Unable to find TEST_TYPE line in ${control_file}"
exit 1
die "Unable to find TEST_TYPE line in ${control_file}"
fi
type=$(python -c "${type}; print TEST_TYPE.lower()")
if [[ "${type}" != "client" ]] && [[ "${type}" != "server" ]]; then
echo_color "red" ">>> Unknown type of test (${type}) in ${control_file}"
exit 1
die "Unknown type of test (${type}) in ${control_file}"
fi
echo ${type}
}
function create_tmp() {
# Set global TMP for remote_access.sh's sake
# and if --results_dir_root is specified,
# set TMP and create dir appropriately
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_chroot}${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d ${FLAGS_chroot}/tmp/run_remote_tests.XXXX)
fi
TMP_INSIDE_CHROOT=$(echo ${TMP#${FLAGS_chroot}})
else
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d /tmp/run_remote_tests.XXXX)
fi
TMP_INSIDE_CHROOT=${TMP}
fi
}
function prepare_build_dir() {
local autotest_dir="$1"
INSIDE_BUILD_DIR="${TMP_INSIDE_CHROOT}/build"
BUILD_DIR="${TMP}/build"
info "Copying autotest tree into ${BUILD_DIR}."
sudo mkdir -p "${BUILD_DIR}"
sudo rsync -rl --chmod=ugo=rwx "${autotest_dir}"/ "${BUILD_DIR}"
info "Pilfering toolchain shell environment from Portage."
local outside_ebuild_dir="${TMP}/chromeos-base/autotest-build"
local inside_ebuild_dir="${TMP_INSIDE_CHROOT}/chromeos-base/autotest-build"
mkdir -p "${outside_ebuild_dir}"
local E_only="autotest-build-9999.ebuild"
cat > "${outside_ebuild_dir}/${E_only}" <<EOF
inherit toolchain-funcs
SLOT="0"
EOF
local E="chromeos-base/autotest-build/${E_only}"
${ENTER_CHROOT} "ebuild-${FLAGS_board}" "${inside_ebuild_dir}/${E_only}" \
clean unpack 2>&1 > /dev/null
local P_tmp="${FLAGS_chroot}/build/${FLAGS_board}/tmp/portage/"
local E_dir="${E%%/*}/${E_only%.*}"
sudo cp "${P_tmp}/${E_dir}/temp/environment" "${BUILD_DIR}"
}
function autodetect_build() {
if [ ${FLAGS_use_emerged} -eq ${FLAGS_TRUE} ]; then
info \
"As requested, using emerged autotests already installed in your sysroot."
FLAGS_build=${FLAGS_FALSE}
return
fi
if ${ENTER_CHROOT} ./cros_workon --board=${FLAGS_board} list | \
grep -q autotest; then
info \
"Detected cros_workon autotests, building your sources instead of emerged \
autotest. To use installed autotest, pass --use_emerged."
FLAGS_build=${FLAGS_TRUE}
else
info \
"Using emerged autotests already installed in your sysroot. To build \
autotests directly from your source directory instead, pass --build."
FLAGS_build=${FLAGS_FALSE}
fi
}
function main() {
cd $(dirname "$0")
@ -130,26 +153,7 @@ function main() {
set -e
# Set global TMP for remote_access.sh's sake
# and if --results_dir_root is specified,
# set TMP and create dir appropriately
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_chroot}${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d ${FLAGS_chroot}/tmp/run_remote_tests.XXXX)
fi
TMP_INSIDE_CHROOT=$(echo ${TMP#${FLAGS_chroot}})
else
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d /tmp/run_remote_tests.XXXX)
fi
TMP_INSIDE_CHROOT=${TMP}
fi
create_tmp
trap cleanup EXIT
@ -158,6 +162,23 @@ function main() {
learn_board
autotest_dir="${FLAGS_chroot}/build/${FLAGS_board}/usr/local/autotest"
ENTER_CHROOT=""
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
ENTER_CHROOT="./enter_chroot.sh --chroot ${FLAGS_chroot} --"
fi
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then
autodetect_build
fi
if [ ${FLAGS_build} -eq ${FLAGS_TRUE} ]; then
autotest_dir="${SRC_ROOT}/third_party/autotest/files"
else
if [ ! -d "${autotest_dir}" ]; then
die "You need to emerge autotest-tests (or use --build)"
fi
fi
local control_files_to_run=""
local chrome_autotests="${CHROME_ROOT}/src/chrome/test/chromeos/autotest/files"
# Now search for tests which unambiguously include the given identifier
@ -172,8 +193,7 @@ function main() {
! finds=$(find ${search_path} -maxdepth 2 -type f \( -name control.\* -or \
-name control \) | egrep -v "~$" | egrep "${test_request}")
if [[ -z "${finds}" ]]; then
echo_color "red" ">>> Cannot find match for \"${test_request}\""
exit 1
die "Cannot find match for \"${test_request}\""
fi
local matches=$(echo "${finds}" | wc -l)
if [[ ${matches} -gt 1 ]]; then
@ -193,13 +213,14 @@ function main() {
echo ""
if [[ -z "${control_files_to_run}" ]]; then
echo_color "red" ">>> Found no control files"
exit 1
die "Found no control files"
fi
echo_color "yellow" ">>> Running the following control files:"
[ ${FLAGS_build} -eq ${FLAGS_TRUE} ] && prepare_build_dir "${autotest_dir}"
info "Running the following control files:"
for CONTROL_FILE in ${control_files_to_run}; do
echo_color "yellow" " * " "${CONTROL_FILE}"
info " * ${CONTROL_FILE}"
done
for control_file in ${control_files_to_run}; do
@ -217,7 +238,7 @@ function main() {
option="-s"
fi
echo ""
echo_color "yellow" ">>> Running ${type} test " ${control_file}
info "Running ${type} test ${control_file}"
local control_file_name=$(basename "${control_file}")
local short_name=$(basename $(dirname "${control_file}"))
@ -243,31 +264,49 @@ function main() {
RAN_ANY_TESTS=${FLAGS_TRUE}
local enter_chroot=""
local autotest="${GCLIENT_ROOT}/src/scripts/autotest_workon"
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
enter_chroot="./enter_chroot.sh --chroot ${FLAGS_chroot} --"
autotest="./autotest_workon"
fi
# Remove chrome autotest location prefix from control_file if needed
if [[ ${control_file:0:${#chrome_autotests}} == \
"${chrome_autotests}" ]]; then
control_file="${control_file:${#chrome_autotests}+1}"
echo_color "yellow" ">>> Running chrome autotest " ${control_file}
fi
if [[ -n "${FLAGS_args}" ]]; then
passthrough_args="--args=${FLAGS_args}"
info "Running chrome autotest ${control_file}"
fi
${enter_chroot} ${autotest} --board "${FLAGS_board}" -m "${FLAGS_remote}" \
--ssh-port ${FLAGS_ssh_port} \
"${option}" "${control_file}" -r "${results_dir}" ${verbose} \
"${passthrough_args}" >&2
export AUTOSERV_TEST_ARGS="${FLAGS_args}"
export AUTOSERV_ARGS="-m ${FLAGS_remote} \
--ssh-port ${FLAGS_ssh_port} \
${option} ${control_file} -r ${results_dir} ${verbose}"
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then
cat > "${TMP}/run_test.sh" <<EOF
export AUTOSERV_TEST_ARGS="${AUTOSERV_TEST_ARGS}"
export AUTOSERV_ARGS="${AUTOSERV_ARGS}"
cd /home/${USER}/trunk/src/scripts
./autotest_run.sh --board "${FLAGS_board}"
EOF
chmod a+rx "${TMP}/run_test.sh"
${ENTER_CHROOT} ${TMP_INSIDE_CHROOT}/run_test.sh >&2
else
cp "${BUILD_DIR}/environment" "${TMP}/run_test.sh"
GRAPHICS_BACKEND=${GRAPHICS_BACKEND:-OPENGL}
if [ -n "${AUTOSERV_TEST_ARGS}" ]; then
AUTOSERV_TEST_ARGS="-a \"${AUTOSERV_TEST_ARGS}\""
fi
cat >> "${TMP}/run_test.sh" <<EOF
export GCLIENT_ROOT=/home/${USER}/trunk
export GRAPHICS_BACKEND=${GRAPHICS_BACKEND}
export SSH_AUTH_SOCK=${SSH_AUTH_SOCK} TMPDIR=/tmp SSH_AGENT_PID=${SSH_AGENT_PID}
export SYSROOT=/build/${FLAGS_board}
tc-export CC CXX PKG_CONFIG
cd ${INSIDE_BUILD_DIR}
./server/autoserv ${AUTOSERV_ARGS} ${AUTOSERV_TEST_ARGS}
EOF
sudo cp "${TMP}/run_test.sh" "${BUILD_DIR}"
sudo chmod a+rx "${BUILD_DIR}/run_test.sh"
${ENTER_CHROOT} sudo bash -c "${INSIDE_BUILD_DIR}/run_test.sh" >&2
fi
done
echo ""
echo_color "yellow" ">>> Test results:"
info "Test results:"
./generate_test_report "${TMP}" --strip="${TMP}/"
print_time_elapsed

View File

@ -37,7 +37,7 @@ FLAGS = gflags.FLAGS
gflags.DEFINE_string('board', None, 'Platform to build.')
gflags.DEFINE_string('base_image', None, 'Path to base image.')
gflags.DEFINE_string('firmware_updater', None, 'Path to firmware updater.')
gflags.DEFINE_boolean('start_devserver', False, 'Start devserver.')
class KillableProcess():
"""A killable process.
@ -77,7 +77,7 @@ class KillableProcess():
def start_devserver():
"""Starts devserver."""
cmd = 'python devserver.py'
cmd = 'python devserver.py --factory_config miniomaha.conf'
print 'Running command: %s' % cmd
devserver_process = KillableProcess(cmd, cwd=DEVSERVER_DIR)
devserver_process.start(wait=False)
@ -174,7 +174,8 @@ def main(argv):
FLAGS.firmware_updater,
folder=FLAGS.board, board=FLAGS.board)
start_devserver()
if FLAGS.start_devserver:
start_devserver()
if __name__ == '__main__':