Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils

This commit is contained in:
Scott Zawalski 2010-12-08 13:57:10 -08:00
commit 78bd3bdff8
31 changed files with 1259 additions and 732 deletions

232
autotest
View File

@ -1,232 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# A python wrapper to call autotest ebuild.
import commands, logging, optparse, os, subprocess, sys
def run(cmd):
return subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderr)
class MyOptionParser(optparse.OptionParser):
"""Override python's builtin OptionParser to accept any undefined args."""
help = False
def _process_args(self, largs, rargs, values):
# see /usr/lib64/python2.6/optparse.py line 1414-1463
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
try:
self._process_long_opt(rargs, values)
except optparse.BadOptionError:
largs.append(arg)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
try:
self._process_short_opts(rargs, values)
except optparse.BadOptionError:
largs.append(arg)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
def print_help(self, file=None):
optparse.OptionParser.print_help(self, file)
MyOptionParser.help = True
parser = MyOptionParser()
parser.allow_interspersed_args = True
DEFAULT_BOARD = os.environ.get('DEFAULT_BOARD', '')
parser.add_option('--args', dest='args', action='store',
default='',
help='The arguments to pass to the test control file.')
parser.add_option('--autox', dest='autox', action='store_true',
default=True,
help='Build autox along with autotest [default].')
parser.add_option('--noautox', dest='autox', action='store_false',
help='Don\'t build autox along with autotest.')
parser.add_option('--board', dest='board', action='store',
default=DEFAULT_BOARD,
help='The board for which you are building autotest.')
parser.add_option('--build', dest='build', action='store',
help='Only prebuild client tests, do not run tests.')
parser.add_option('--buildcheck', dest='buildcheck', action='store_true',
default=True,
help='Fail if tests fail to build [default].')
parser.add_option('--nobuildcheck', dest='buildcheck', action='store_false',
help='Ignore test build failures.')
parser.add_option('--jobs', dest='jobs', action='store', type=int,
default=-1,
help='How many packages to build in parallel at maximum.')
parser.add_option('--noprompt', dest='noprompt', action='store_true',
help='Prompt user when building all tests.')
AUTOSERV='../third_party/autotest/files/server/autoserv'
AUTOTEST_CLIENT='../third_party/autotest/files/client/bin/autotest_client'
def parse_args_and_help():
def nop(_):
pass
sys_exit = sys.exit
sys.exit = nop
options, args = parser.parse_args()
sys.exit = sys_exit
if not args and not options.build:
parser.print_help()
if MyOptionParser.help:
if options.build:
print
print 'Options inherited from autotest_client, which is used in build',
print 'only mode.'
run([AUTOTEST_CLIENT, '--help'])
else:
print
print 'Options inherited from autoserv:'
run([AUTOSERV, '--help'])
sys.exit(0)
return options, args
def assert_inside_chroot(common_sh):
status, output = commands.getstatusoutput('/bin/bash -c ". %s && '
'assert_inside_chroot"' % common_sh)
if status is not 0:
print >> sys.stderr, output
sys.exit(status)
def set_common_env(common_sh, env_var):
env_value = commands.getoutput('/bin/bash -c \'. %s && echo $%s\'' %
(common_sh, env_var))
os.environ[env_var] = env_value
def die(common_sh, msg):
output = commands.getoutput('/bin/bash -c \'. %s && die "%s"\'' %
(common_sh, msg))
print >> sys.stderr, output
sys.exit(1)
def build_autotest(options):
environ = os.environ
if options.jobs != -1:
emerge_jobs = '--jobs=%d' % options.jobs
else:
emerge_jobs = ''
# Decide on USE flags based on options
use_flag = environ.get('USE', '')
if not options.autox:
use_flag = use_flag + ' -autox'
if options.buildcheck:
use_flag = use_flag + ' buildcheck'
board_blacklist_file = ('%s/src/overlays/overlay-%s/autotest-blacklist' %
(os.environ['GCLIENT_ROOT'], options.board))
if os.path.exists(board_blacklist_file):
blacklist = [line.strip()
for line in open(board_blacklist_file).readlines()]
else:
blacklist = []
all_tests = ('compilebench,dbench,disktest,fsx,hackbench,iperf,netperf2,'
'netpipe,unixbench')
site_tests = '../third_party/autotest/files/client/site_tests'
for site_test in os.listdir(site_tests):
test_path = os.path.join(site_tests, site_test)
test_py = os.path.join(test_path, '%s.py' % site_test)
if (os.path.exists(test_path) and os.path.isdir(test_path) and
os.path.exists(test_py) and os.path.isfile(test_py) and
site_test not in blacklist):
all_tests += ',' + site_test
if 'all' == options.build.lower():
if options.noprompt is not True:
print 'You want to pre-build all client tests and it may take a long',
print 'time to finish.'
print 'Are you sure you want to continue?(N/y)',
answer = sys.stdin.readline()
if 'y' != answer[0].lower():
print 'Use --build to specify tests you like to pre-compile. '
print 'E.g.: ./autotest --build=disktest,hardware_SAT'
sys.exit(0)
test_list = all_tests
else:
test_list = options.build
environ['FEATURES'] = ('%s -buildpkg -collision-protect' %
environ.get('FEATURES', ''))
environ['TEST_LIST'] = test_list
environ['USE'] = use_flag
emerge_cmd = ['emerge-%s' % options.board,
'chromeos-base/autotest']
if emerge_jobs:
emerge_cmd.append(emerge_jobs)
return run(emerge_cmd)
def run_autoserv(options, args):
environ = os.environ
environ['AUTOSERV_TEST_ARGS'] = options.args
environ['AUTOSERV_ARGS'] = ' '.join(args)
environ['FEATURES'] = ('%s -buildpkg -digest noauto' %
environ.get('FEATURES', ''))
ebuild_cmd = ['ebuild-%s' % options.board,
'../third_party/chromiumos-overlay/chromeos-base/'
'autotest/autotest-0.0.1.ebuild',
'clean', 'unpack', 'test']
run(ebuild_cmd)
def main():
me = sys.argv[0]
common_sh = os.path.join(os.path.dirname(me), 'common.sh')
assert_inside_chroot(common_sh)
set_common_env(common_sh, 'GCLIENT_ROOT')
options, args = parse_args_and_help()
if not options.board:
die(common_sh, 'Missing --board argument.')
if options.build:
status = build_autotest(options)
if status:
die(common_sh, 'build_autotest failed.')
else:
ssh_key_file = os.path.join(os.path.dirname(me),
'mod_for_test_scripts/ssh_keys/testing_rsa')
os.chmod(ssh_key_file, 0400)
run_autoserv(options, args)
if __name__ == '__main__':
main()

View File

@ -1,56 +0,0 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Provides common commands for dealing running/building autotest
. "$(dirname "$0")/common.sh"
get_default_board
DEFINE_string board "$DEFAULT_BOARD" \
"The board for which you are building autotest"
function check_board() {
local board_names=""
local index=1
local found=0
local board_basename=$(echo "${FLAGS_board}" |cut -d '_' -f 1)
for overlay_path in "${SRC_ROOT}"/overlays/overlay-*
do
local overlay=$(basename "${overlay_path}")
local board="${overlay#overlay-}"
board_names[index]="${board}"
index+=1
if [ "${board_basename}" == "${board}" ]
then
found=1
fi
done
if [ ${found} -eq 0 ]
then
echo "You are required to specify a supported board from the command line."
echo "Supported boards are:"
for board in ${board_names[@]}
do
echo ${board}
done
exit 0
fi
}
# Populates the chroot's /usr/local/autotest/$FLAGS_board directory based on
# the given source directory.
# args:
# $1 - original source directory
# $2 - target directory
function update_chroot_autotest() {
local original=$1
local target=$2
echo "Updating chroot Autotest from ${original} to ${target}..."
sudo mkdir -p "${target}"
sudo chmod 777 "${target}"
cp -fpru ${original}/{client,conmux,server,tko,utils,global_config.ini,shadow_config.ini} ${target}
}

View File

@ -26,6 +26,13 @@ _PACKAGE_FILE = '%(buildroot)s/src/scripts/cbuildbot_package.list'
ARCHIVE_BASE = '/var/www/archive'
ARCHIVE_COUNT = 10
# Currently, both the full buildbot and the preflight buildbot store their
# data in a variable named PORTAGE_BINHOST, but they're in different files.
# We're planning on joining the two files soon and renaming the full binhost
# to FULL_BINHOST.
_FULL_BINHOST = 'PORTAGE_BINHOST'
_PREFLIGHT_BINHOST = 'PORTAGE_BINHOST'
# ======================== Utility functions ================================
def MakeDir(path, parents=False):
@ -305,6 +312,28 @@ def _MakeChroot(buildroot):
RunCommand(['./make_chroot', '--fast'], cwd=cwd)
def _GetPortageEnvVar(buildroot, board, envvar):
"""Get a portage environment variable for the specified board, if any.
buildroot: The root directory where the build occurs. Must be an absolute
path.
board: Board type that was built on this machine. E.g. x86-generic. If this
is None, get the env var from the host.
envvar: The environment variable to get. E.g. 'PORTAGE_BINHOST'.
Returns:
The value of the environment variable, as a string. If no such variable
can be found, return the empty string.
"""
cwd = os.path.join(buildroot, 'src', 'scripts')
portageq = 'portageq'
if board:
portageq += '-%s' % board
binhost = RunCommand([portageq, 'envvar', envvar], cwd=cwd,
redirect_stdout=True, enter_chroot=True, error_ok=True)
return binhost.rstrip('\n')
def _SetupBoard(buildroot, board='x86-generic'):
"""Wrapper around setup_board."""
cwd = os.path.join(buildroot, 'src', 'scripts')
@ -368,8 +397,8 @@ def _RunSmokeSuite(buildroot, results_dir):
cwd = os.path.join(buildroot, 'src', 'scripts')
RunCommand(['bin/cros_run_vm_test',
'--no_graphics',
'--test_case=suite_Smoke',
'--results_dir_root=%s' % results_dir,
'suite_Smoke',
], cwd=cwd, error_ok=False)
@ -503,10 +532,45 @@ def _ResolveOverlays(buildroot, overlays):
elif overlays == 'both':
paths = [public_overlay, private_overlay]
else:
Die('Incorrect overlay configuration: %s' % overlays)
Info('No overlays found.')
paths = []
return paths
def _UploadPrebuilts(buildroot, board, overlay_config, binhosts):
"""Upload prebuilts.
Args:
buildroot: The root directory where the build occurs.
board: Board type that was built on this machine
overlay_config: A string describing which overlays you want.
'private': Just the private overlay.
'public': Just the public overlay.
'both': Both the public and private overlays.
binhosts: The URLs of the current binhosts. Binaries that are already
present will not be uploaded twice. Empty URLs will be ignored.
"""
cwd = os.path.join(buildroot, 'src', 'scripts')
cmd = [os.path.join(cwd, 'prebuilt.py'),
'--sync-binhost-conf',
'--build-path', buildroot,
'--board', board,
'--prepend-version', 'preflight',
'--key', _PREFLIGHT_BINHOST]
for binhost in binhosts:
if binhost:
cmd.extend(['--previous-binhost-url', binhost])
if overlay_config == 'public':
cmd.extend(['--upload', 'gs://chromeos-prebuilt'])
else:
assert overlay_config in ('private', 'both')
cmd.extend(['--upload', 'chromeos-images:/var/www/prebuilt/',
'--binhost-base-url', 'http://chromeos-prebuilt'])
RunCommand(cmd, cwd=cwd)
def main():
# Parse options
usage = "usage: %prog [options] cbuildbot_config"
@ -558,28 +622,41 @@ def main():
parser.print_usage()
sys.exit(1)
# Calculate list of overlay directories.
overlays = _ResolveOverlays(buildroot, buildconfig['overlays'])
try:
_PreFlightRinse(buildroot, buildconfig['board'], tracking_branch, overlays)
# Calculate list of overlay directories.
rev_overlays = _ResolveOverlays(buildroot, buildconfig['rev_overlays'])
push_overlays = _ResolveOverlays(buildroot, buildconfig['push_overlays'])
# We cannot push to overlays that we don't rev.
assert set(push_overlays).issubset(set(rev_overlays))
# Either has to be a master or not have any push overlays.
assert buildconfig['master'] or not push_overlays
board = buildconfig['board']
old_binhost = None
_PreFlightRinse(buildroot, buildconfig['board'], tracking_branch,
rev_overlays)
chroot_path = os.path.join(buildroot, 'chroot')
boardpath = os.path.join(chroot_path, 'build', board)
if options.sync:
if options.clobber or not os.path.isdir(buildroot):
_FullCheckout(buildroot, tracking_branch, url=options.url)
else:
old_binhost = _GetPortageEnvVar(buildroot, board, _FULL_BINHOST)
_IncrementalCheckout(buildroot)
new_binhost = _GetPortageEnvVar(buildroot, board, _FULL_BINHOST)
if old_binhost and old_binhost != new_binhost:
RunCommand(['sudo', 'rm', '-rf', boardpath])
# Check that all overlays can be found.
for path in overlays:
assert ':' not in path, 'Overlay must not contain colons: %s' % path
for path in rev_overlays:
if not os.path.isdir(path):
Die('Missing overlay: %s' % path)
chroot_path = os.path.join(buildroot, 'chroot')
if not os.path.isdir(chroot_path):
_MakeChroot(buildroot)
boardpath = os.path.join(chroot_path, 'build', buildconfig['board'])
if not os.path.isdir(boardpath):
_SetupBoard(buildroot, board=buildconfig['board'])
@ -589,7 +666,7 @@ def main():
options.chrome_rev)
elif buildconfig['uprev']:
_UprevPackages(buildroot, tracking_branch, revisionfile,
buildconfig['board'], overlays)
buildconfig['board'], rev_overlays)
_EnableLocalAccount(buildroot)
# Doesn't rebuild without acquiring more source.
@ -611,7 +688,7 @@ def main():
_RunSmokeSuite(buildroot, test_results_dir)
finally:
if not options.debug:
archive_full_path=os.path.join(options.gsutil_archive,
archive_full_path = os.path.join(options.gsutil_archive,
str(options.buildnumber))
_ArchiveTestResults(buildroot, buildconfig['board'],
test_results_dir=test_results_dir,
@ -624,8 +701,11 @@ def main():
if buildconfig['master']:
# Master bot needs to check if the other slaves completed.
if cbuildbot_comm.HaveSlavesCompleted(config):
if not options.debug:
_UploadPrebuilts(buildroot, board, buildconfig['rev_overlays'],
[new_binhost])
_UprevPush(buildroot, tracking_branch, buildconfig['board'],
overlays, options.debug)
push_overlays, options.debug)
else:
Die('CBUILDBOT - One of the slaves has failed!!!')

View File

@ -19,9 +19,12 @@ hostname -- Needed for 'important' slaves. The hostname of the bot. Should
match hostname in slaves.cfg in buildbot checkout.
unittests -- Runs unittests for packages.
smoke_bvt -- Runs the test smoke suite in a qemu-based VM using KVM.
overlays -- Select what overlays to look at. This can be 'public', 'private'
or 'both'. There should only be one master bot pushing changes to
each overlay per branch.
rev_overlays -- Select what overlays to look at for revving. This can be
'public', 'private' or 'both'.
push_overlays -- Select what overlays to push at. This should be a subset of
rev_overlays for the particular builder. Must be None if
not a master. There should only be one master bot pushing
changes to each overlay per branch.
"""
@ -33,7 +36,8 @@ config['default'] = {
'important' : False,
'unittests' : False,
'smoke_bvt' : False,
'overlays': 'public',
'rev_overlays': 'public',
'push_overlays': None,
}
config['x86-generic-pre-flight-queue'] = {
'board' : 'x86-generic',
@ -43,7 +47,8 @@ config['x86-generic-pre-flight-queue'] = {
'hostname' : 'chromeosbuild2',
'unittests' : True,
'smoke_bvt' : True,
'overlays': 'public',
'rev_overlays': 'public',
'push_overlays': 'public',
}
config['x86-mario-pre-flight-queue'] = {
'board' : 'x86-mario',
@ -52,7 +57,8 @@ config['x86-mario-pre-flight-queue'] = {
'important' : False,
'unittests' : True,
'smoke_bvt' : True,
'overlays': 'private',
'rev_overlays': 'both',
'push_overlays': 'private',
}
config['x86-mario-pre-flight-branch'] = {
'board' : 'x86-mario',
@ -61,7 +67,8 @@ config['x86-mario-pre-flight-branch'] = {
'important' : False,
'unittests' : True,
'smoke_bvt' : True,
'overlays': 'both',
'rev_overlays': 'both',
'push_overlays': 'both',
}
config['x86_agz_bin'] = {
'board' : 'x86-agz',
@ -70,7 +77,8 @@ config['x86_agz_bin'] = {
'important' : False,
'unittests' : True,
'smoke_bvt' : True,
'overlays': 'private',
'rev_overlays': 'both',
'push_overlays': None,
}
config['x86_dogfood_bin'] = {
'board' : 'x86-dogfood',
@ -79,7 +87,8 @@ config['x86_dogfood_bin'] = {
'important' : False,
'unittests' : True,
'smoke_bvt' : True,
'overlays': 'private',
'rev_overlays': 'both',
'push_overlays': None,
}
config['x86_pineview_bin'] = {
'board' : 'x86-pineview',
@ -87,7 +96,8 @@ config['x86_pineview_bin'] = {
'master' : False,
'important' : False,
'unittests': True,
'overlays': 'public',
'rev_overlays': 'public',
'push_overlays': None,
}
config['arm_tegra2_bin'] = {
'board' : 'tegra2',
@ -95,7 +105,8 @@ config['arm_tegra2_bin'] = {
'master' : False,
'important' : False,
'unittests' : False,
'overlays': 'public',
'rev_overlays': 'public',
'push_overlays': None,
}
config['arm_generic_bin'] = {
'board' : 'arm-generic',
@ -103,5 +114,6 @@ config['arm_generic_bin'] = {
'master' : False,
'important' : False,
'unittests' : False,
'overlays': 'public',
'rev_overlays': 'public',
'push_overlays': None,
}

View File

@ -151,10 +151,13 @@ class CBuildBotTest(mox.MoxTestBase):
m_file.read().AndReturn(self._test_string)
m_file.close()
drop_file = cbuildbot._PACKAGE_FILE % {'buildroot': self._buildroot}
cbuildbot.RunCommand(['./cros_mark_as_stable', '--all',
'--board=%s' % self._test_board,
'--overlays=%s' % ':'.join(self._chroot_overlays),
'--tracking_branch=cros/master', 'commit'],
'--tracking_branch=cros/master',
'--drop_file=%s' % ReinterpretPathForChroot(drop_file),
'commit'],
cwd='%s/src/scripts' % self._buildroot,
enter_chroot=True)
@ -174,10 +177,13 @@ class CBuildBotTest(mox.MoxTestBase):
m_file.read().AndReturn('None')
m_file.close()
drop_file = cbuildbot._PACKAGE_FILE % {'buildroot': self._buildroot}
cbuildbot.RunCommand(['./cros_mark_as_stable', '--all',
'--board=%s' % self._test_board,
'--overlays=%s' % ':'.join(self._chroot_overlays),
'--tracking_branch=cros/master', 'commit'],
'--tracking_branch=cros/master',
'--drop_file=%s' % ReinterpretPathForChroot(drop_file),
'commit'],
cwd='%s/src/scripts' % self._buildroot,
enter_chroot=True)
@ -187,6 +193,43 @@ class CBuildBotTest(mox.MoxTestBase):
self._overlays)
self.mox.VerifyAll()
def testGetPortageEnvVar(self):
"""Basic test case for _GetPortageEnvVar function."""
envvar = 'EXAMPLE'
cbuildbot.RunCommand(mox.And(mox.IsA(list), mox.In(envvar)),
cwd='%s/src/scripts' % self._buildroot,
redirect_stdout=True, enter_chroot=True,
error_ok=True).AndReturn('RESULT\n')
self.mox.ReplayAll()
result = cbuildbot._GetPortageEnvVar(self._buildroot, self._test_board,
envvar)
self.mox.VerifyAll()
self.assertEqual(result, 'RESULT')
def testUploadPublicPrebuilts(self):
"""Test _UploadPrebuilts with a public location."""
binhost = 'http://www.example.com'
binhosts = [binhost, None]
check = mox.And(mox.IsA(list), mox.In(binhost), mox.Not(mox.In(None)),
mox.In('gs://chromeos-prebuilt'))
cbuildbot.RunCommand(check, cwd='%s/src/scripts' % self._buildroot)
self.mox.ReplayAll()
cbuildbot._UploadPrebuilts(self._buildroot, self._test_board, 'public',
binhosts)
self.mox.VerifyAll()
def testUploadPrivatePrebuilts(self):
"""Test _UploadPrebuilts with a private location."""
binhost = 'http://www.example.com'
binhosts = [binhost, None]
check = mox.And(mox.IsA(list), mox.In(binhost), mox.Not(mox.In(None)),
mox.In('chromeos-images:/var/www/prebuilt/'))
cbuildbot.RunCommand(check, cwd='%s/src/scripts' % self._buildroot)
self.mox.ReplayAll()
cbuildbot._UploadPrebuilts(self._buildroot, self._test_board, 'private',
binhosts)
self.mox.VerifyAll()
if __name__ == '__main__':
unittest.main()

View File

@ -6,14 +6,17 @@
import optparse
import os
import re
import sys
import unittest
import urllib
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from cros_build_lib import Die
from cros_build_lib import Info
from cros_build_lib import ReinterpretPathForChroot
from cros_build_lib import RunCommand
from cros_build_lib import RunCommandCaptureOutput
from cros_build_lib import Warning
# VM Constants.
@ -29,6 +32,11 @@ global remote
global target_image_path
global vm_graphics_flag
class UpdateException(Exception):
"""Exception thrown when UpdateImage or UpdateUsingPayload fail"""
def __init__(self, code, stdout):
self.code = code
self.stdout = stdout
class AUTest(object):
"""Abstract interface that defines an Auto Update test."""
@ -40,6 +48,9 @@ class AUTest(object):
# Set these up as they are used often.
self.crosutils = os.path.join(os.path.dirname(__file__), '..')
self.crosutilsbin = os.path.join(os.path.dirname(__file__))
self.download_folder = os.path.join(self.crosutils, 'latest_download')
if not os.path.exists(self.download_folder):
os.makedirs(self.download_folder)
def GetStatefulChangeFlag(self, stateful_change):
"""Returns the flag to pass to image_to_vm for the stateful change."""
@ -74,11 +85,36 @@ class AUTest(object):
Warning('Delta update failed, disabling delta updates and retrying.')
self.use_delta_updates = False
self.source_image = ''
self.UpdateImage(image)
self._UpdateImageReportError(image)
else:
self.UpdateImage(image)
self._UpdateImageReportError(image)
def PrepareBase(self):
def _UpdateImageReportError(self, image_path, stateful_change='old'):
"""Calls UpdateImage and reports any error to the console.
Still throws the exception.
"""
try:
self.UpdateImage(image_path, stateful_change)
except UpdateException as err:
# If the update fails, print it out
Warning(err.stdout)
raise
def _AttemptUpdateWithPayloadExpectedFailure(self, payload, expected_msg):
# This update is expected to fail...
try:
self.UpdateUsingPayload(payload)
except UpdateException as err:
# Will raise ValueError if expected is not found.
if re.search(re.escape(expected_msg), err.stdout, re.MULTILINE):
return
Warning("Didn't find '%s' in:" % expected_msg)
Warning(err.stdout)
self.fail('We managed to update when failure was expected')
def PrepareBase(self, image_path):
"""Prepares target with base_image_path."""
pass
@ -95,6 +131,15 @@ class AUTest(object):
"""
pass
def UpdateUsingPayload(self, update_path, stateful_change='old'):
"""Updates target with the pre-generated update stored in update_path
Args:
update_path: Path to the image to update with. This directory should
contain both update.gz, and stateful.image.gz
"""
pass
def VerifyImage(self, percent_required_to_pass):
"""Verifies the image with tests.
@ -140,7 +185,7 @@ class AUTest(object):
"""
# Just make sure some tests pass on original image. Some old images
# don't pass many tests.
self.PrepareBase()
self.PrepareBase(image_path=base_image_path)
# TODO(sosa): move to 100% once we start testing using the autotest paired
# with the dev channel.
percent_passed = self.VerifyImage(10)
@ -163,7 +208,7 @@ class AUTest(object):
"""
# Just make sure some tests pass on original image. Some old images
# don't pass many tests.
self.PrepareBase()
self.PrepareBase(image_path=base_image_path)
# TODO(sosa): move to 100% once we start testing using the autotest paired
# with the dev channel.
percent_passed = self.VerifyImage(10)
@ -178,6 +223,40 @@ class AUTest(object):
self.TryDeltaAndFallbackToFull(target_image_path, base_image_path, 'clean')
self.VerifyImage(percent_passed)
def testPartialUpdate(self):
"""Tests what happens if we attempt to update with a truncated payload."""
# Preload with the version we are trying to test.
self.PrepareBase(image_path=target_image_path)
# Image can be updated at:
# ~chrome-eng/chromeos/localmirror/autest-images
url = 'http://gsdview.appspot.com/chromeos-localmirror/' \
'autest-images/truncated_image.gz'
payload = os.path.join(self.download_folder, 'truncated_image.gz')
# Read from the URL and write to the local file
urllib.urlretrieve(url, payload)
expected_msg='download_hash_data == update_check_response_hash failed'
self._AttemptUpdateWithPayloadExpectedFailure(payload, expected_msg)
def testCorruptedUpdate(self):
"""Tests what happens if we attempt to update with a corrupted payload."""
# Preload with the version we are trying to test.
self.PrepareBase(image_path=target_image_path)
# Image can be updated at:
# ~chrome-eng/chromeos/localmirror/autest-images
url = 'http://gsdview.appspot.com/chromeos-localmirror/' \
'autest-images/corrupted_image.gz'
payload = os.path.join(self.download_folder, 'corrupted.gz')
# Read from the URL and write to the local file
urllib.urlretrieve(url, payload)
# This update is expected to fail...
expected_msg='zlib inflate() error:-3'
self._AttemptUpdateWithPayloadExpectedFailure(payload, expected_msg)
class RealAUTest(unittest.TestCase, AUTest):
"""Test harness for updating real images."""
@ -185,23 +264,40 @@ class RealAUTest(unittest.TestCase, AUTest):
def setUp(self):
AUTest.setUp(self)
def PrepareBase(self):
def PrepareBase(self, image_path):
"""Auto-update to base image to prepare for test."""
self.UpdateImage(base_image_path)
self._UpdateImageReportError(image_path)
def UpdateImage(self, image_path, stateful_change='old'):
"""Updates a remote image using image_to_live.sh."""
stateful_change_flag = self.GetStatefulChangeFlag(stateful_change)
RunCommand([
(code, stdout, stderr) = RunCommandCaptureOutput([
'%s/image_to_live.sh' % self.crosutils,
'--image=%s' % image_path,
'--remote=%s' % remote,
stateful_change_flag,
'--verify',
'--src_image=%s' % self.source_image,
], enter_chroot=False)
'--src_image=%s' % self.source_image
])
if code != 0:
raise UpdateException(code, stdout)
def UpdateUsingPayload(self, update_path, stateful_change='old'):
"""Updates a remote image using image_to_live.sh."""
stateful_change_flag = self.GetStatefulChangeFlag(stateful_change)
(code, stdout, stderr) = RunCommandCaptureOutput([
'%s/image_to_live.sh' % self.crosutils,
'--payload=%s' % update_path,
'--remote=%s' % remote,
stateful_change_flag,
'--verify',
])
if code != 0:
raise UpdateException(code, stdout)
def VerifyImage(self, percent_required_to_pass):
"""Verifies an image using run_remote_tests.sh with verification suite."""
@ -233,17 +329,19 @@ class VirtualAUTest(unittest.TestCase, AUTest):
AUTest.setUp(self)
self._KillExistingVM(_KVM_PID_FILE)
def PrepareBase(self):
def PrepareBase(self, image_path):
"""Creates an update-able VM based on base image."""
self.vm_image_path = '%s/chromiumos_qemu_image.bin' % os.path.dirname(
base_image_path)
image_path)
Info('Creating: %s' % self.vm_image_path)
if not os.path.exists(self.vm_image_path):
Info('Qemu image %s not found, creating one.' % self.vm_image_path)
RunCommand(['%s/image_to_vm.sh' % self.crosutils,
'--full',
'--from=%s' % ReinterpretPathForChroot(
os.path.dirname(base_image_path)),
os.path.dirname(image_path)),
'--vdisk_size=%s' % _FULL_VDISK_SIZE,
'--statefulfs_size=%s' % _FULL_STATEFULFS_SIZE,
'--board=%s' % board,
@ -251,6 +349,9 @@ class VirtualAUTest(unittest.TestCase, AUTest):
else:
Info('Using existing VM image %s' % self.vm_image_path)
Info('Testing for %s' % self.vm_image_path)
self.assertTrue(os.path.exists(self.vm_image_path))
def UpdateImage(self, image_path, stateful_change='old'):
@ -259,16 +360,41 @@ class VirtualAUTest(unittest.TestCase, AUTest):
if self.source_image == base_image_path:
self.source_image = self.vm_image_path
RunCommand(['%s/cros_run_vm_update' % self.crosutilsbin,
'--update_image_path=%s' % image_path,
'--vm_image_path=%s' % self.vm_image_path,
'--snapshot',
vm_graphics_flag,
'--persist',
'--kvm_pid=%s' % _KVM_PID_FILE,
stateful_change_flag,
'--src_image=%s' % self.source_image,
], enter_chroot=False)
(code, stdout, stderr) = RunCommandCaptureOutput([
'%s/cros_run_vm_update' % self.crosutilsbin,
'--update_image_path=%s' % image_path,
'--vm_image_path=%s' % self.vm_image_path,
'--snapshot',
vm_graphics_flag,
'--persist',
'--kvm_pid=%s' % _KVM_PID_FILE,
stateful_change_flag,
'--src_image=%s' % self.source_image,
])
if code != 0:
raise UpdateException(code, stdout)
def UpdateUsingPayload(self, update_path, stateful_change='old'):
"""Updates a remote image using image_to_live.sh."""
stateful_change_flag = self.GetStatefulChangeFlag(stateful_change)
if self.source_image == base_image_path:
self.source_image = self.vm_image_path
(code, stdout, stderr) = RunCommandCaptureOutput([
'%s/cros_run_vm_update' % self.crosutilsbin,
'--payload=%s' % update_path,
'--vm_image_path=%s' % self.vm_image_path,
'--snapshot',
vm_graphics_flag,
'--persist',
'--kvm_pid=%s' % _KVM_PID_FILE,
stateful_change_flag,
'--src_image=%s' % self.source_image,
])
if code != 0:
raise UpdateException(code, stdout)
def VerifyImage(self, percent_required_to_pass):
"""Runs vm smoke suite to verify image."""

View File

@ -31,7 +31,7 @@ from cros_build_lib import RunCommand, Info, Warning
BASE_CHROME_SVN_URL = 'http://src.chromium.org/svn'
# Command for which chrome ebuild to uprev.
TIP_OF_TRUNK, LATEST_RELEASE, STICKY = 'tot', 'latest_release', 'sticky_release'
TIP_OF_TRUNK, LATEST_RELEASE, STICKY = 'tot', 'latest_release', 'stable_release'
CHROME_REV = [TIP_OF_TRUNK, LATEST_RELEASE, STICKY]
# Helper regex's for finding ebuilds.

View File

@ -15,6 +15,8 @@ get_default_board
# Flags
DEFINE_string board "$DEFAULT_BOARD" "The name of the board to set up."
DEFINE_string board_overlay "" "Location of the board overlay."
DEFINE_boolean primary_only ${FLAGS_FALSE} \
"Only return the path to the board's primary overlay. (Default: false)"
DEFINE_string variant "" "Board variant."
# Parse command line flags
@ -40,6 +42,53 @@ if [[ $FLAGS_variant =~ [_\ ] ]] ; then
exit 1
fi
#
# Check that the provided variant overlay name is valid.
#
if [ -n "$FLAGS_variant" ] ; then
VARIANT_NAME="overlay-variant-${FLAGS_board}-${FLAGS_variant}"
VARIANT_OVERLAY="${SRC_ROOT}/overlays/${VARIANT_NAME}"
PRIVATE_VARIANT_NAME="overlay-variant-${FLAGS_board}-${FLAGS_variant}-private"
PRIVATE_VARIANT_OVERLAY="${SRC_ROOT}/private-overlays/${PRIVATE_VARIANT_NAME}"
if [ ! -d "${VARIANT_OVERLAY}" ] && \
[ ! -d "${PRIVATE_VARIANT_OVERLAY}" ] ; then
error "There is no variant overlay called '${FLAGS_variant}'"
exit 1
fi
fi
function is_primary_overlay() {
local directory=$1
[ -f "${directory}/make.conf" ] || return 1
[ -f "${directory}/toolchain.conf" ] || return 1
return 0
}
BOARD_OVERLAY="${SRC_ROOT}/overlays/overlay-${FLAGS_board}"
PRIVATE_OVERLAY_NAME="overlay-${FLAGS_board}-private"
PRIVATE_BOARD_OVERLAY="${SRC_ROOT}/private-overlays/${PRIVATE_OVERLAY_NAME}"
#
# Identify the primary board overlay or die.
#
if is_primary_overlay ${BOARD_OVERLAY}; then
PRIMARY_OVERLAY="${BOARD_OVERLAY}"
elif is_primary_overlay "${PRIVATE_BOARD_OVERLAY}"; then
PRIMARY_OVERLAY="${PRIVATE_BOARD_OVERLAY}"
fi
if [ ! -n "${PRIMARY_OVERLAY}" ]; then
error "There is no primary board overlay for ${FLAGS_board}"
exit 1
fi
#
# If only the primary overlay is needed, provide it and exit.
#
if [ "${FLAGS_primary_only}" -eq "${FLAGS_TRUE}" ]; then
echo "${PRIMARY_OVERLAY}"
exit 0
fi
#
# Check for chromeos-overlay.
#
@ -50,50 +99,38 @@ if [ -d "${CHROMEOS_OVERLAY}" ]; then
fi
#
# Check if there are any board overlays. There should be at least a top
# level board specific overlay.
# Check if there are any public board overlays.
#
PRIMARY_BOARD_OVERLAY="${SRC_ROOT}/overlays/overlay-${FLAGS_board}"
if [ -d "${PRIMARY_BOARD_OVERLAY}" ]; then
echo "${PRIMARY_BOARD_OVERLAY}"
if [ -d "${BOARD_OVERLAY}" ]; then
echo "${BOARD_OVERLAY}"
#
# Add the public variant overlay
# Add the public variant overlay if it exists.
#
if [ -n "$FLAGS_variant" ] ; then
VARIANT_NAME="overlay-variant-${FLAGS_board}-${FLAGS_variant}"
VARIANT_OVERLAY="${SRC_ROOT}/overlays/${VARIANT_NAME}"
if [ ! -d "$VARIANT_OVERLAY" ] ; then
error "Can't find variant overlay directory $VARIANT_OVERLAY"
exit 1
if [ -d "$VARIANT_OVERLAY" ] ; then
echo "${VARIANT_OVERLAY}"
fi
fi
fi
echo "${VARIANT_OVERLAY}"
#
# Add any private overlays and private variant overlays for this board.
#
if [ -d "${SRC_ROOT}/private-overlays" ] ; then
OVERLAY_NAME="overlay-${FLAGS_board}-private"
PRIVATE_OVERLAY="${SRC_ROOT}/private-overlays/${OVERLAY_NAME}"
if [ -d "${PRIVATE_OVERLAY}" ] ; then
echo "${PRIVATE_OVERLAY}"
fi
#
# Add any private overlays and variant overlays for this board.
# Add the private variant overlay if it exists.
#
if [ -d "${SRC_ROOT}/private-overlays" ] ; then
OVERLAY_NAME="overlay-${FLAGS_board}-private"
PRIVATE_OVERLAY="${SRC_ROOT}/private-overlays/${OVERLAY_NAME}"
if [ -d "${PRIVATE_OVERLAY}" ] ; then
echo "${PRIVATE_OVERLAY}"
fi
#
# Add the public and private variant overlays
#
if [ -n "$FLAGS_variant" ] ; then
VARIANT_NAME="overlay-variant-${FLAGS_board}-${FLAGS_variant}-private"
PRIVATE_VARIANT_OVERLAY="${SRC_ROOT}/private-overlays/${VARIANT_NAME}"
if [ -d "${PRIVATE_VARIANT_OVERLAY}" ] ; then
echo "${PRIVATE_VARIANT_OVERLAY}"
fi
if [ -n "$FLAGS_variant" ] ; then
if [ -d "${PRIVATE_VARIANT_OVERLAY}" ] ; then
echo "${PRIVATE_VARIANT_OVERLAY}"
fi
fi
fi

View File

@ -70,14 +70,14 @@ class ParallelTestRunner(object):
args = [ os.path.join(os.path.dirname(__file__), 'cros_run_vm_test'),
'--snapshot', # The image is shared so don't modify it.
'--no_graphics',
'--ssh_port=%d' % ssh_port,
'--test_case=%s' % test ]
'--ssh_port=%d' % ssh_port ]
if self._board: args.append('--board=%s' % self._board)
if self._image_path: args.append('--image_path=%s' % self._image_path)
if self._results_dir_root:
args.append('--results_dir_root=%s/%s.%d' %
(self._results_dir_root, test, ssh_port))
if self._use_emerged: args.append('--use_emerged')
args.append(test)
Info('Running %r...' % args)
output = None
if self._order_output:

View File

@ -77,7 +77,7 @@ if [ -n "${FLAGS_verify_chrome_version}" ]; then
--remote=127.0.0.1 \
--ssh_port=${FLAGS_ssh_port})
[[ ${chrome_version_on_vm} == ${FLAGS_verify_chrome_version} ]] || \
die "Chrome version mismatch. VM reported ${chrome_version_on_vm}"
warn "CHROME_VERSION is no longer set.This check will be removed"
else
warn "${FLAGS_verify_chrome_version} is not a valid Chrome version"
fi

View File

@ -9,6 +9,7 @@
. "$(dirname $0)/../common.sh"
. "$(dirname $0)/../lib/cros_vm_lib.sh"
DEFINE_string payload "" "Full name of the payload to update with."
DEFINE_string src_image "" \
"Create a delta update by passing in the image on the remote machine."
DEFINE_string stateful_update_flag "" "Flags to pass to stateful update." s
@ -21,7 +22,7 @@ set -e
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
[ -n "${FLAGS_update_image_path}" ] || \
[ -n "${FLAGS_update_image_path}" ] || [ -n "${FLAGS_payload}" ] || \
die "You must specify a path to an image to use as an update."
[ -n "${FLAGS_vm_image_path}" ] || \
die "You must specify a path to a vm image."
@ -29,6 +30,14 @@ eval set -- "${FLAGS_ARGV}"
trap stop_kvm EXIT
start_kvm "${FLAGS_vm_image_path}"
if [ -n "${FLAGS_update_image_path}" ]; then
IMAGE_ARGS="--image=$(readlink -f ${FLAGS_update_image_path})"
fi
if [ -n "${FLAGS_payload}" ]; then
IMAGE_ARGS="--payload="${FLAGS_payload}""
fi
$(dirname $0)/../image_to_live.sh \
--remote=127.0.0.1 \
--ssh_port=${FLAGS_ssh_port} \
@ -36,5 +45,5 @@ $(dirname $0)/../image_to_live.sh \
--src_image="${FLAGS_src_image}" \
--verify \
--for_vm \
--image=$(readlink -f ${FLAGS_update_image_path})
${IMAGE_ARGS}

View File

@ -10,7 +10,7 @@
. "$(dirname $0)/../common.sh"
# Script must be run inside the chroot.
assert_inside_chroot
restart_in_chroot_if_needed "$@"
get_default_board
@ -58,12 +58,18 @@ if ! pkgfile=$("${EQUERYCMD}" which "${workon_name}" 2> /dev/null); then
fi
if [ "${FLAGS_scrub}" = "${FLAGS_TRUE}" ]; then
eval $(${EBUILDCMD} $(${EQUERYCMD} which ${workon_name}) info)
srcdir=$(readlink -m ${CROS_WORKON_SRCDIR})
trunkdir=$(readlink -m ${CHROOT_TRUNK_DIR})
project_path=${srcdir#${trunkdir}/}
if ! (cd "${GCLIENT_ROOT}/${project_path}" && git clean -xf); then
die "Could not scrub source directory"
warn "--scrub will destroy ALL FILES unknown to git!"
read -p "Are you sure you want to do this? [y|N]" resp
if egrep -qi "^y(es)?$" <(echo -n "${resp}"); then
eval $(${EBUILDCMD} $(${EQUERYCMD} which ${workon_name}) info)
srcdir=$(readlink -m ${CROS_WORKON_SRCDIR})
trunkdir=$(readlink -m ${CHROOT_TRUNK_DIR})
project_path=${srcdir#${trunkdir}/}
if ! (cd "${GCLIENT_ROOT}/${project_path}" && git clean -xf); then
die "Could not scrub source directory"
fi
else
info "Not scrubbing; exiting gracefully"
fi
exit 0
fi
@ -79,7 +85,8 @@ if [ "${FLAGS_test}" = "${FLAGS_TRUE}" ]; then
emerge_features="test"
fi
if [ "${FLAGS_install}" = "${FLAGS_TRUE}" ]; then
FEATURES="${emerge_features}" "${EMERGECMD}" "${1}"
SANDBOX_WRITE=~/trunk CROS_WORKON_INPLACE=1 \
FEATURES="${emerge_features} ${FEATURES}" "${EMERGECMD}" "${1}"
exit $?
fi

View File

@ -167,7 +167,11 @@ def GetLatestZipUrl(board, channel, latest_url_base, zip_server_base):
Warning(('Could not use latest link provided, defaulting to parsing'
' latest from zip url base.'))
return GetNewestLinkFromZipBase(board, channel, zip_server_base)
try:
return GetNewestLinkFromZipBase(board, channel, zip_server_base)
except:
Warning('Failed to get url from standard zip base. Trying rc.')
return GetNewestLinkFromZipBase(board + '-rc', channel, zip_server_base)
def GrabZipAndExtractImage(zip_url, download_folder, image_name) :

View File

@ -1,32 +0,0 @@
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script makes autotest client tests inside a chroot environment. The idea
# is to compile any platform-dependent autotest client tests in the build
# environment, since client systems under test lack the proper toolchain.
#
# The user can later run autotest against an ssh enabled test client system, or
# install the compiled client tests directly onto the rootfs image.
. "$(dirname "$0")/common.sh"
get_default_board
DEFINE_string board "$DEFAULT_BOARD" \
"The board for which you are building autotest"
FLAGS "$@" || exit 1
if [[ -n "${CROS_WORKON_SRCROOT}" ]]; then
if [[ -z "${FLAGS_board}" ]]; then
setup_board_warning
exit 1
fi
emerge-${FLAGS_board} autotest-all
else
./autotest --noprompt --build=all --board="${FLAGS_board}" $@
fi

View File

@ -26,7 +26,7 @@ export CHROMEOS_VERSION_MINOR=9
# Increment by 2 in trunk after making a release branch.
# Does not reset on a major/minor change (always increases).
# (Trunk is always odd; branches are always even).
export CHROMEOS_VERSION_BRANCH=129
export CHROMEOS_VERSION_BRANCH=131
# Patch number.
# Increment by 1 each release on a branch.

309
chromite/lib/binpkg.py Normal file
View File

@ -0,0 +1,309 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Adapted from portage/getbinpkg.py -- Portage binary-package helper functions
# Copyright 2003-2004 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import operator
import os
import tempfile
import time
import urllib
import urllib2
class PackageIndex(object):
"""A parser for the Portage Packages index file.
The Portage Packages index file serves to keep track of what packages are
included in a tree. It contains the following sections:
1) The header. The header tracks general key/value pairs that don't apply
to any specific package. E.g., it tracks the base URL of the packages
file, and the number of packages included in the file. The header is
terminated by a blank line.
2) The body. The body is a list of packages. Each package contains a list
of key/value pairs. Packages are either terminated by a blank line or
by the end of the file. Every package has a CPV entry, which serves as
a unique identifier for the package.
"""
def __init__(self):
"""Constructor."""
# The header tracks general key/value pairs that don't apply to any
# specific package. E.g., it tracks the base URL of the packages.
self.header = {}
# A list of packages (stored as a list of dictionaries).
self.packages = []
# Whether or not the PackageIndex has been modified since the last time it
# was written.
self.modified = False
def _PopulateDuplicateDB(self, db):
"""Populate db with SHA1 -> URL mapping for packages.
Args:
db: Dictionary to populate with SHA1 -> URL mapping for packages.
"""
uri = self.header['URI']
for pkg in self.packages:
cpv, sha1 = pkg['CPV'], pkg.get('SHA1')
if sha1:
path = pkg.get('PATH', urllib.quote(cpv + '.tbz2'))
db[sha1] = '%s/%s' % (uri.rstrip('/'), path)
def _ReadPkgIndex(self, pkgfile):
"""Read a list of key/value pairs from the Packages file into a dictionary.
Both header entries and package entries are lists of key/value pairs, so
they can both be read by this function. Entries can be terminated by empty
lines or by the end of the file.
This function will read lines from the specified file until it encounters
the a blank line or the end of the file.
Keys and values in the Packages file are separated by a colon and a space.
Keys may contain capital letters, numbers, and underscores, but may not
contain colons. Values may contain any character except a newline. In
particular, it is normal for values to contain colons.
Lines that have content, and do not contain a valid key/value pair, are
ignored. This is for compatibility with the Portage package parser, and
to allow for future extensions to the Packages file format.
All entries must contain at least one key/value pair. If the end of the
fils is reached, an empty dictionary is returned.
Args:
pkgfile: A python file object.
Returns the dictionary of key-value pairs that was read from the file.
"""
d = {}
for line in pkgfile:
line = line.rstrip('\n')
if not line:
assert d, 'Packages entry must contain at least one key/value pair'
break
line = line.split(': ', 1)
if len(line) == 2:
k, v = line
d[k] = v
return d
def _WritePkgIndex(self, pkgfile, entry):
"""Write header entry or package entry to packages file.
The keys and values will be separated by a colon and a space. The entry
will be terminated by a blank line.
Args:
pkgfile: A python file object.
entry: A dictionary of the key/value pairs to write.
"""
lines = ['%s: %s' % (k, v) for k, v in sorted(entry.items()) if v]
pkgfile.write('%s\n\n' % '\n'.join(lines))
def _ReadHeader(self, pkgfile):
"""Read header of packages file.
Args:
pkgfile: A python file object.
"""
assert not self.header, 'Should only read header once.'
self.header = self._ReadPkgIndex(pkgfile)
def _ReadBody(self, pkgfile):
"""Read body of packages file.
Before calling this function, you must first read the header (using
_ReadHeader).
Args:
pkgfile: A python file object.
"""
assert self.header, 'Should read header first.'
assert not self.packages, 'Should only read body once.'
# Read all of the sections in the body by looping until we reach the end
# of the file.
while True:
d = self._ReadPkgIndex(pkgfile)
if not d:
break
if 'CPV' in d:
self.packages.append(d)
def Read(self, pkgfile):
"""Read the entire packages file.
Args:
pkgfile: A python file object.
"""
self._ReadHeader(pkgfile)
self._ReadBody(pkgfile)
def RemoveFilteredPackages(self, filter_fn):
"""Remove packages which match filter_fn.
Args:
filter_fn: A function which operates on packages. If it returns True,
the package should be removed.
"""
filtered = [p for p in self.packages if not filter_fn(p)]
if filtered != self.packages:
self.modified = True
self.packages = filtered
def ResolveDuplicateUploads(self, pkgindexes):
"""Point packages at files that have already been uploaded.
For each package in our index, check if there is an existing package that
has already been uploaded to the same base URI. If so, point that package
at the existing file, so that we don't have to upload the file.
Args:
pkgindexes: A list of PackageIndex objects containing info about packages
that have already been uploaded.
Returns:
A list of the packages that still need to be uploaded.
"""
db = {}
for pkgindex in pkgindexes:
pkgindex._PopulateDuplicateDB(db)
uploads = []
base_uri = self.header['URI']
for pkg in self.packages:
sha1 = pkg.get('SHA1')
uri = db.get(sha1)
if sha1 and uri and uri.startswith(base_uri):
pkg['PATH'] = uri[len(base_uri):].lstrip('/')
else:
uploads.append(pkg)
return uploads
def SetUploadLocation(self, base_uri, path_prefix):
"""Set upload location to base_uri + path_prefix.
Args:
base_uri: Base URI for all packages in the file. We set
self.header['URI'] to this value, so all packages must live under
this directory.
path_prefix: Path prefix to use for all current packages in the file.
This will be added to the beginning of the path for every package.
"""
self.header['URI'] = base_uri
for pkg in self.packages:
path = urllib.quote(pkg['CPV'] + '.tbz2')
pkg['PATH'] = '%s/%s' % (path_prefix.rstrip('/'), path)
def Write(self, pkgfile):
"""Write a packages file to disk.
If 'modified' flag is set, the TIMESTAMP and PACKAGES fields in the header
will be updated before writing to disk.
Args:
pkgfile: A python file object.
"""
if self.modified:
self.header['TIMESTAMP'] = str(long(time.time()))
self.header['PACKAGES'] = str(len(self.packages))
self.modified = False
self._WritePkgIndex(pkgfile, self.header)
for metadata in sorted(self.packages, key=operator.itemgetter('CPV')):
self._WritePkgIndex(pkgfile, metadata)
def WriteToNamedTemporaryFile(self):
"""Write pkgindex to a temporary file.
Args:
pkgindex: The PackageIndex object.
Returns:
A temporary file containing the packages from pkgindex.
"""
f = tempfile.NamedTemporaryFile()
self.Write(f)
f.flush()
f.seek(0)
return f
def _RetryUrlOpen(url, tries=3):
"""Open the specified url, retrying if we run into temporary errors.
We retry for both network errors and 5xx Server Errors. We do not retry
for HTTP errors with a non-5xx code.
Args:
url: The specified url.
tries: The number of times to try.
Returns:
The result of urllib2.urlopen(url).
"""
for i in range(tries):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError as e:
if i + 1 >= tries or e.code < 500:
raise
else:
print 'Cannot GET %s: %s' % (url, str(e))
except urllib2.URLError as e:
if i + 1 >= tries:
raise
else:
print 'Cannot GET %s: %s' % (url, str(e))
print 'Sleeping for 10 seconds before retrying...'
time.sleep(10)
def GrabRemotePackageIndex(binhost_url):
"""Grab the latest binary package database from the specified URL.
Args:
binhost_url: Base URL of remote packages (PORTAGE_BINHOST).
Returns:
A PackageIndex object, if the Packages file can be retrieved. If the
server returns status code 404, None is returned.
"""
url = '%s/Packages' % binhost_url.rstrip('/')
try:
f = _RetryUrlOpen(url)
except urllib2.HTTPError as e:
if e.code == 404:
return None
raise
pkgindex = PackageIndex()
pkgindex.Read(f)
pkgindex.header.setdefault('URI', binhost_url)
f.close()
return pkgindex
def GrabLocalPackageIndex(package_path):
"""Read a local packages file from disk into a PackageIndex() object.
Args:
package_path: Directory containing Packages file.
Returns:
A PackageIndex object.
"""
packages_file = file(os.path.join(package_path, 'Packages'))
pkgindex = PackageIndex()
pkgindex.Read(packages_file)
packages_file.close()
return pkgindex

View File

@ -78,9 +78,8 @@ def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None,
cmd_result.cmd = cmd_str
try:
proc = subprocess.Popen(cmd_str, cwd=cwd, stdin=stdin,
stdout=stdout, stderr=stderr,
shell=shell)
proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout,
stderr=stderr, shell=shell)
(cmd_result.output, cmd_result.error) = proc.communicate(input)
if exit_code:
cmd_result.returncode = proc.returncode

View File

@ -106,18 +106,15 @@ DEFAULT_BUILD_ROOT=${CHROMEOS_BUILD_ROOT:-"$SRC_ROOT/build"}
# Set up a global ALL_BOARDS value
if [ -d $SRC_ROOT/overlays ]; then
ALL_BOARDS=$(cd $SRC_ROOT/overlays;ls -1d overlay-* 2>&-|sed 's,overlay-,,g')
fi
fi
# Strip CR
ALL_BOARDS=$(echo $ALL_BOARDS)
# Set a default BOARD
#DEFAULT_BOARD=x86-generic # or...
DEFAULT_BOARD=$(echo $ALL_BOARDS | awk '{print $NF}')
# Enable --fast by default on non-official builds
# Enable --fast by default.
DEFAULT_FAST="${FLAGS_TRUE}"
if [ "${CHROMEOS_OFFICIAL:-0}" = "1" ]; then
DEFAULT_FAST="${FLAGS_FALSE}"
fi
# Detect whether we're inside a chroot or not
if [ -e /etc/debian_chroot ]
@ -234,11 +231,15 @@ function make_pkg_common {
# Enter a chroot and restart the current script if needed
function restart_in_chroot_if_needed {
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
if [ $INSIDE_CHROOT -ne 1 ]
then
# Equivalent to enter_chroot.sh -- <current command>
local abspath=$(readlink -f "$0")
# strip everything up to (and including) /src/scripts/ from abspath
local path_from_scripts="${abspath##*/src/scripts/}"
exec $SCRIPTS_DIR/enter_chroot.sh -- \
$CHROOT_TRUNK_DIR/src/scripts/$(basename $0) $*
"$CHROOT_TRUNK_DIR/src/scripts/$path_from_scripts" "$@"
exit
fi
}

View File

@ -77,11 +77,12 @@ function verify_not_64b_elf() {
function dump_file() {
local debug_file="$1"
local text_file="$2"
local debug_directory="$(dirname "${debug_file}")"
# 64b ELF files may be installed on the target in PERL directories
verify_not_64b_elf "${debug_file}" || return 1
verify_not_64b_elf "${text_file}" || return 1
# Dump symbols as root in order to read all files.
if ! sudo "${DUMP_SYMS}" "${debug_file}" "${text_file}" > "${SYM_FILE}" \
if ! sudo "${DUMP_SYMS}" "${text_file}" "${debug_directory}" > "${SYM_FILE}" \
2> "${ERR_FILE}"; then
# A lot of files (like kernel files) contain no debug information, do
# not consider such occurrences as errors.

View File

@ -291,31 +291,6 @@ function run_auto_update {
fi
}
function remote_reboot {
info "Rebooting."
remote_sh "touch /tmp/awaiting_reboot; reboot"
local output_file
output_file="${TMP}/output"
while true; do
REMOTE_OUT=""
# This may fail while the machine is down so generate output and a
# boolean result to distinguish between down/timeout and real failure
! remote_sh_allow_changed_host_key \
"echo 0; [ -e /tmp/awaiting_reboot ] && echo '1'; true"
echo "${REMOTE_OUT}" > "${output_file}"
if grep -q "0" "${output_file}"; then
if grep -q "1" "${output_file}"; then
info "Not yet rebooted"
else
info "Rebooted and responding"
break
fi
fi
sleep .5
done
}
function verify_image {
info "Verifying image."
"${SCRIPTS_DIR}/mount_gpt_image.sh" --from "$(dirname ${IMAGE_PATH})" \

View File

@ -284,7 +284,7 @@ fi
if [ "${FLAGS_format}" == "qemu" ]; then
echo "If you have qemu-kvm installed, you can start the image by:"
echo "sudo kvm -m ${FLAGS_mem} -vga std -pidfile /tmp/kvm.pid -net nic,model=e1000 " \
echo "sudo kvm -m ${FLAGS_mem} -vga std -pidfile /tmp/kvm.pid -net nic,model=virtio " \
"-net user,hostfwd=tcp::9222-:22 \\"
echo " -hda ${FLAGS_to}/${DEFAULT_QEMU_IMAGE}"
fi

View File

@ -93,6 +93,51 @@ def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None,
return output
def RunCommandCaptureOutput(cmd, print_cmd=True, cwd=None, input=None,
enter_chroot=False,
combine_stdout_stderr=True):
"""Runs a shell command. Differs from RunCommand, because it allows
you to run a command and capture the exit code, output, and stderr
all at the same time.
Arguments:
cmd: cmd to run. Should be input to subprocess.POpen. If a string,
converted to an array using split().
print_cmd: prints the command before running it.
cwd: the working directory to run this cmd.
input: input to pipe into this command through stdin.
enter_chroot: this command should be run from within the chroot. If set,
cwd must point to the scripts directory.
combine_stdout_stderr -- combine outputs together.
Returns:
Returns a tuple: (exit_code, stdout, stderr) (integer, string, string)
stderr is None if combine_stdout_stderr is True
"""
# Set default for variables.
stdout = subprocess.PIPE
stderr = subprocess.PIPE
stdin = None
# Modify defaults based on parameters.
if input: stdin = subprocess.PIPE
if combine_stdout_stderr: stderr = subprocess.STDOUT
if enter_chroot: cmd = ['./enter_chroot.sh', '--'] + cmd
# Print out the command before running.
if print_cmd:
Info('PROGRAM(%s) -> RunCommand: %r in dir %s' %
(GetCallerName(), cmd, cwd))
proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin,
stdout=stdout, stderr=stderr)
(output, error) = proc.communicate(input)
# Error is None if stdout, stderr are combined.
return proc.returncode, output, error
class Color(object):
"""Conditionally wraps text in ANSI color escape sequences."""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
@ -191,4 +236,3 @@ def ReinterpretPathForChroot(path):
new_path = os.path.join('/home', os.getenv('USER'), 'trunk', relative_path)
return new_path

View File

@ -55,7 +55,7 @@ function start_kvm() {
-vga std \
-pidfile "${KVM_PID_FILE}" \
-daemonize \
-net nic,model=e1000 \
-net nic,model=virtio \
${nographics} \
${snapshot} \
-net user,hostfwd=tcp::${FLAGS_ssh_port}-:22 \

View File

@ -13,10 +13,14 @@ if [ -d "${TEST_DIR}" ]; then
KEEPDB="data_${BOARD}"
ls -d data_* 2>/dev/null | grep -v "${KEEPDB}" | xargs rm -fr
# Ensure there is DB directory in x86-agz and x86-mario.
if [ ! -d "${KEEPDB}" ] &&
[ "${BOARD}" = "x86-agz" -o "${BOARD}" = "x86-mario" ]; then
echo "No component DB directory found at: ${KEEPDB}"
if [ "${BOARD}" = "x86-agz" -o "${BOARD}" = "x86-mario" ]; then
# Ensure there is a DB directory in x86-agz or x86-mario.
if [ ! -d "${KEEPDB}" ]; then
echo "No component DB directory found at: ${KEEPDB}"
fi
# Remove the default DB since it is unnecessary.
DEFAULTDB="approved_components.default"
rm -f "${DEFAULTDB}"
fi
popd >/dev/null

View File

@ -292,15 +292,20 @@ install_recovery_kernel() {
# Replace vmlinuz.A with the recovery version
local sysroot="${FLAGS_build_root}/${FLAGS_board}"
local vmlinuz="$sysroot/boot/vmlinuz"
local esp_offset=$(partoffset "$RECOVERY_IMAGE" 12)
local esp_mnt=$(mktemp -d)
set +e
local failed=0
sudo mount -o loop,offset=$((esp_offset * 512)) "$RECOVERY_IMAGE" "$esp_mnt"
sudo cp "$vmlinuz" "$esp_mnt/syslinux/vmlinuz.A" || failed=1
sudo umount -d "$esp_mnt"
rmdir "$esp_mnt"
set -e
if [ "$ARCH" = "x86" ]; then
# There is no syslinux on ARM, so this copy only makes sense for x86.
set +e
local esp_offset=$(partoffset "$RECOVERY_IMAGE" 12)
local esp_mnt=$(mktemp -d)
sudo mount -o loop,offset=$((esp_offset * 512)) "$RECOVERY_IMAGE" "$esp_mnt"
sudo cp "$vmlinuz" "$esp_mnt/syslinux/vmlinuz.A" || failed=1
sudo umount -d "$esp_mnt"
rmdir "$esp_mnt"
set -e
fi
if [ $failed -eq 1 ]; then
echo "Failed to copy recovery kernel to ESP"
return 1

View File

@ -297,11 +297,23 @@ class DepGraphGenerator(object):
os.environ["PORTAGE_SYSROOT"] = "/build/" + self.board
os.environ["SYSROOT"] = "/build/" + self.board
scripts_dir = os.path.dirname(os.path.realpath(__file__))
toolchain_path = "%s/../overlays/overlay-%s/toolchain.conf"
# Strip the variant out of the board name to look for the toolchain. This
# is similar to what setup_board does.
board_no_variant = self.board.split('_')[0]
f = open(toolchain_path % (scripts_dir, board_no_variant))
public_toolchain_path = ("%s/../overlays/overlay-%s/toolchain.conf" %
(scripts_dir, board_no_variant))
private_toolchain_path = (
"%s/../private-overlays/overlay-%s-private/toolchain.conf" %
(scripts_dir, board_no_variant))
if os.path.isfile(public_toolchain_path):
toolchain_path = public_toolchain_path
elif os.path.isfile(private_toolchain_path):
toolchain_path = private_toolchain_path
else:
print "Not able to locate toolchain.conf in board overlays"
sys.exit(1)
f = open(toolchain_path)
os.environ["CHOST"] = f.readline().strip()
f.close()
@ -310,13 +322,6 @@ class DepGraphGenerator(object):
# will be going away soon as we migrate to CROS_WORKON_SRCROOT.
os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
# Modify the environment to disable locking by default.
# TODO(davidjames): This option can cause problems if packages muck
# with each other during the post-install step. There are a few host
# packages that do this, so we only do this environment modification for
# board builds.
os.environ.setdefault("PORTAGE_LOCKS", "false")
# Turn off interactive delays
os.environ["EBEEP_IGNORE"] = "1"
os.environ["EPAUSE_IGNORE"] = "1"
@ -357,11 +362,17 @@ class DepGraphGenerator(object):
# TODO(davidjames): Look for a better solution.
features = os.environ.get("FEATURES", "") + " -collision-protect"
# If we're cross-compiling, updating the environment every time we install
# a package isn't necessary, and leads to race conditions when
# PORTAGE_LOCKS is false. In this case, do environment updates at the end,
# instead.
if self.board and os.environ.get("PORTAGE_LOCKS") == "false":
# If we're installing packages to the board, and we're not using the
# official flag, we can enable the following optimizations:
# 1) Don't lock during install step. This allows multiple packages to be
# installed at once. This is safe because our board packages do not
# muck with each other during the post-install step.
# 2) Don't update the environment until the end of the build. This is
# safe because board packages don't need to run during the build --
# they're cross-compiled, so our CPU architecture doesn't support them
# anyway.
if self.board and os.environ.get("CHROMEOS_OFFICIAL") != "1":
os.environ.setdefault("PORTAGE_LOCKS", "false")
features = features + " no-env-update"
os.environ["FEATURES"] = features

View File

@ -13,6 +13,8 @@ import tempfile
import time
from chromite.lib import cros_build_lib
from chromite.lib.binpkg import (GrabLocalPackageIndex, GrabRemotePackageIndex,
PackageIndex)
"""
This script is used to upload host prebuilts as well as board BINHOSTS.
@ -53,12 +55,12 @@ _REL_HOST_PATH = 'host/%(target)s/%(version)s/packages'
# relative to build path
_PRIVATE_OVERLAY_DIR = 'src/private-overlays'
_BINHOST_BASE_DIR = 'src/overlays'
#_BINHOST_BASE_URL = 'http://commondatastorage.googleapis.com/chromeos-prebuilt'
_BINHOST_BASE_URL = 'http://gsdview.appspot.com/chromeos-prebuilt'
_BINHOST_BASE_URL = 'http://commondatastorage.googleapis.com/chromeos-prebuilt'
_PREBUILT_BASE_DIR = 'src/third_party/chromiumos-overlay/chromeos/config/'
# Created in the event of new host targets becoming available
_PREBUILT_MAKE_CONF = {'amd64': os.path.join(_PREBUILT_BASE_DIR,
'make.conf.amd64-host')}
_BINHOST_CONF_DIR = 'src/third_party/chromiumos-overlay/chromeos/binhost'
class FiltersEmpty(Exception):
@ -92,6 +94,7 @@ def UpdateLocalFile(filename, value, key='PORTAGE_BINHOST'):
file_fh = open(filename)
file_lines = []
found = False
keyval_str = '%(key)s=%(value)s'
for line in file_fh:
# Strip newlines from end of line. We already add newlines below.
line = line.rstrip("\n")
@ -102,7 +105,6 @@ def UpdateLocalFile(filename, value, key='PORTAGE_BINHOST'):
continue
file_var, file_val = line.split('=')
keyval_str = '%(key)s=%(value)s'
if file_var == key:
found = True
print 'Updating %s=%s to %s="%s"' % (file_var, file_val, key, value)
@ -117,7 +119,7 @@ def UpdateLocalFile(filename, value, key='PORTAGE_BINHOST'):
file_fh.close()
# write out new file
new_file_fh = open(filename, 'w')
new_file_fh.write('\n'.join(file_lines))
new_file_fh.write('\n'.join(file_lines) + '\n')
new_file_fh.close()
@ -143,7 +145,7 @@ def RevGitPushWithRetry(retries=5):
raise GitPushFailed('Failed to push change after %s retries' % retries)
def RevGitFile(filename, value, retries=5):
def RevGitFile(filename, value, retries=5, key='PORTAGE_BINHOST'):
"""Update and push the git file.
Args:
@ -151,6 +153,8 @@ def RevGitFile(filename, value, retries=5):
value: string representing the version of the prebuilt that has been
uploaded.
retries: The number of times to retry before giving up, default: 5
key: The variable key to update in the git file.
(Default: PORTAGE_BINHOST)
"""
prebuilt_branch = 'prebuilt_branch'
old_cwd = os.getcwd()
@ -162,10 +166,10 @@ def RevGitFile(filename, value, retries=5):
'git config url.ssh://git@gitrw.chromium.org:9222.pushinsteadof '
'http://git.chromium.org/git')
cros_build_lib.RunCommand(git_ssh_config_cmd, shell=True)
description = 'Update PORTAGE_BINHOST="%s" in %s' % (value, filename)
description = 'Update %s="%s" in %s' % (key, value, filename)
print description
try:
UpdateLocalFile(filename, value)
UpdateLocalFile(filename, value, key)
cros_build_lib.RunCommand('git config push.default tracking', shell=True)
cros_build_lib.RunCommand('git commit -am "%s"' % description, shell=True)
RevGitPushWithRetry(retries)
@ -225,70 +229,14 @@ def ShouldFilterPackage(file_path):
return False
def _ShouldFilterPackageFileSection(section):
"""Return whether an section in the package file should be filtered out.
Args:
section: The section, as a list of strings.
Returns:
True if the section should be excluded.
"""
for line in section:
if line.startswith("CPV: "):
package = line.replace("CPV: ", "").rstrip()
if ShouldFilterPackage(package):
return True
else:
return False
def FilterPackagesFile(packages_filename):
"""Read a portage Packages file and filter out private packages.
The new, filtered packages file is written to a temporary file.
Args:
packages_filename: The filename of the Packages file.
Returns:
filtered_packages: A filtered Packages file, as a NamedTemporaryFile.
"""
packages_file = open(packages_filename)
filtered_packages = tempfile.NamedTemporaryFile()
section = []
for line in packages_file:
if line == "\n":
if not _ShouldFilterPackageFileSection(section):
# Looks like this section doesn't contain a private package. Write it
# out.
filtered_packages.write("".join(section))
# Start next section.
section = []
section.append(line)
else:
if not _ShouldFilterPackageFileSection(section):
filtered_packages.write("".join(section))
packages_file.close()
# Flush contents to disk.
filtered_packages.flush()
filtered_packages.seek(0)
return filtered_packages
def _RetryRun(cmd, print_cmd=True, shell=False):
def _RetryRun(cmd, print_cmd=True, shell=False, cwd=None):
"""Run the specified command, retrying if necessary.
Args:
cmd: The command to run.
print_cmd: Whether to print out the cmd.
shell: Whether to treat the command as a shell.
cwd: Working directory to run command in.
Returns:
True if the command succeeded. Otherwise, returns False.
@ -298,7 +246,8 @@ def _RetryRun(cmd, print_cmd=True, shell=False):
# cros_build_lib.
for attempt in range(_RETRIES):
try:
output = cros_build_lib.RunCommand(cmd, print_cmd=print_cmd, shell=shell)
output = cros_build_lib.RunCommand(cmd, print_cmd=print_cmd, shell=shell,
cwd=cwd)
return True
except cros_build_lib.RunCommandError:
print 'Failed to run %s' % cmd
@ -317,12 +266,6 @@ def _GsUpload(args):
Return the arg tuple of two if the upload failed
"""
(local_file, remote_file) = args
if ShouldFilterPackage(local_file):
return
if local_file.endswith("/Packages"):
filtered_packages_file = FilterPackagesFile(local_file)
local_file = filtered_packages_file.name
cmd = '%s cp -a public-read %s %s' % (_GSUTIL_BIN, local_file, remote_file)
if not _RetryRun(cmd, print_cmd=False, shell=True):
@ -356,22 +299,24 @@ def RemoteUpload(files, pool=10):
pass
def GenerateUploadDict(local_path, gs_path):
"""Build a dictionary of local remote file key pairs for gsutil to upload.
def GenerateUploadDict(base_local_path, base_remote_path, pkgs):
"""Build a dictionary of local remote file key pairs to upload.
Args:
local_path: A path to the file on the local hard drive.
gs_path: Path to upload in Google Storage.
base_local_path: The base path to the files on the local hard drive.
remote_path: The base path to the remote paths.
pkgs: The packages to upload.
Returns:
Returns a dictionary of file path/gs_dest_path pairs
Returns a dictionary of local_path/remote_path pairs
"""
files_to_sync = cros_build_lib.ListFiles(local_path)
upload_files = {}
for file_path in files_to_sync:
filename = file_path.replace(local_path, '').lstrip('/')
gs_file_path = os.path.join(gs_path, filename)
upload_files[file_path] = gs_file_path
for pkg in pkgs:
suffix = pkg['CPV'] + '.tbz2'
local_path = os.path.join(base_local_path, suffix)
assert os.path.exists(local_path)
remote_path = '%s/%s' % (base_remote_path.rstrip('/'), suffix)
upload_files[local_path] = remote_path
return upload_files
@ -405,20 +350,50 @@ def DetermineMakeConfFile(target):
return os.path.join(make_path)
def UpdateBinhostConfFile(path, key, value):
"""Update binhost config file file with key=value.
Args:
path: Filename to update.
key: Key to update.
value: New value for key.
"""
cwd = os.path.dirname(os.path.abspath(path))
filename = os.path.basename(path)
if not os.path.isdir(cwd):
os.makedirs(cwd)
if not os.path.isfile(path):
config_file = file(path, 'w')
config_file.write('FULL_BINHOST="$PORTAGE_BINHOST"\n')
config_file.close()
UpdateLocalFile(path, value, key)
cros_build_lib.RunCommand('git add %s' % filename, cwd=cwd, shell=True)
description = 'Update %s=%s in %s' % (key, value, filename)
cros_build_lib.RunCommand('git commit -m "%s"' % description, cwd=cwd,
shell=True)
def UploadPrebuilt(build_path, upload_location, version, binhost_base_url,
board=None, git_sync=False, git_sync_retries=5):
board=None, git_sync=False, git_sync_retries=5,
key='PORTAGE_BINHOST', pkg_indexes=[],
sync_binhost_conf=False):
"""Upload Host prebuilt files to Google Storage space.
Args:
build_path: The path to the root of the chroot.
upload_location: The upload location.
board: The board to upload to Google Storage, if this is None upload
board: The board to upload to Google Storage. If this is None, upload
host packages.
git_sync: If set, update make.conf of target to reference the latest
prebuilt packages genereated here.
prebuilt packages generated here.
git_sync_retries: How many times to retry pushing when updating git files.
This helps avoid failures when multiple bots are modifying the same Repo.
default: 5
key: The variable key to update in the git file. (Default: PORTAGE_BINHOST)
pkg_indexes: Old uploaded prebuilts to compare against. Instead of
uploading duplicate files, we just link to the old files.
sync_binhost_conf: If set, update binhost config file in chromiumos-overlay
for the current board or host.
"""
if not board:
@ -428,16 +403,32 @@ def UploadPrebuilt(build_path, upload_location, version, binhost_base_url,
url_suffix = _REL_HOST_PATH % {'version': version, 'target': _HOST_TARGET}
package_string = _HOST_TARGET
git_file = os.path.join(build_path, _PREBUILT_MAKE_CONF[_HOST_TARGET])
binhost_conf = os.path.join(build_path, _BINHOST_CONF_DIR, 'host',
'%s.conf' % _HOST_TARGET)
else:
board_path = os.path.join(build_path, _BOARD_PATH % {'board': board})
package_path = os.path.join(board_path, 'packages')
package_string = board
url_suffix = _REL_BOARD_PATH % {'board': board, 'version': version}
git_file = os.path.join(build_path, DetermineMakeConfFile(board))
remote_location = os.path.join(upload_location, url_suffix)
binhost_conf = os.path.join(build_path, _BINHOST_CONF_DIR, 'target',
'%s.conf' % board)
remote_location = '%s/%s' % (upload_location.rstrip('/'), url_suffix)
# Process Packages file, removing duplicates and filtered packages.
pkg_index = GrabLocalPackageIndex(package_path)
pkg_index.SetUploadLocation(binhost_base_url, url_suffix)
pkg_index.RemoveFilteredPackages(lambda pkg: ShouldFilterPackage(pkg))
uploads = pkg_index.ResolveDuplicateUploads(pkg_indexes)
# Write Packages file.
tmp_packages_file = pkg_index.WriteToNamedTemporaryFile()
if upload_location.startswith('gs://'):
upload_files = GenerateUploadDict(package_path, remote_location)
# Build list of files to upload.
upload_files = GenerateUploadDict(package_path, remote_location, uploads)
remote_file = '%s/Packages' % remote_location.rstrip('/')
upload_files[tmp_packages_file.name] = remote_file
print 'Uploading %s' % package_string
failed_uploads = RemoteUpload(upload_files)
@ -445,17 +436,29 @@ def UploadPrebuilt(build_path, upload_location, version, binhost_base_url,
error_msg = ['%s -> %s\n' % args for args in failed_uploads]
raise UploadFailed('Error uploading:\n%s' % error_msg)
else:
pkgs = ' '.join(p['CPV'] + '.tbz2' for p in uploads)
ssh_server, remote_path = remote_location.split(':', 1)
cmds = ['ssh %s mkdir -p %s' % (ssh_server, remote_path),
'rsync -av %s/ %s/' % (package_path, remote_location)]
d = { 'pkg_index': tmp_packages_file.name,
'pkgs': pkgs,
'remote_packages': '%s/Packages' % remote_location.rstrip('/'),
'remote_path': remote_path,
'remote_location': remote_location,
'ssh_server': ssh_server }
cmds = ['ssh %(ssh_server)s mkdir -p %(remote_path)s' % d,
'rsync -av --chmod=a+r %(pkg_index)s %(remote_packages)s' % d]
if pkgs:
cmds.append('rsync -Rav %(pkgs)s %(remote_location)s/' % d)
for cmd in cmds:
if not _RetryRun(cmd, shell=True):
if not _RetryRun(cmd, shell=True, cwd=package_path):
raise UploadFailed('Could not run %s' % cmd)
if git_sync:
url_value = '%s/%s/' % (binhost_base_url, url_suffix)
RevGitFile(git_file, url_value, retries=git_sync_retries)
url_value = '%s/%s/' % (binhost_base_url, url_suffix)
if git_sync:
RevGitFile(git_file, url_value, retries=git_sync_retries, key=key)
if sync_binhost_conf:
UpdateBinhostConfFile(binhost_conf, key, url_value)
def usage(parser, msg):
"""Display usage message and parser help then exit with 1."""
@ -469,6 +472,9 @@ def main():
parser.add_option('-H', '--binhost-base-url', dest='binhost_base_url',
default=_BINHOST_BASE_URL,
help='Base URL to use for binhost in make.conf updates')
parser.add_option('', '--previous-binhost-url', action='append',
default=[], dest='previous_binhost_url',
help='Previous binhost URL')
parser.add_option('-b', '--board', dest='board', default=None,
help='Board type that was built on this machine')
parser.add_option('-p', '--build-path', dest='build_path',
@ -488,6 +494,12 @@ def main():
parser.add_option('-f', '--filters', dest='filters', action='store_true',
default=False,
help='Turn on filtering of private ebuild packages')
parser.add_option('-k', '--key', dest='key',
default='PORTAGE_BINHOST',
help='Key to update in make.conf / binhost.conf')
parser.add_option('', '--sync-binhost-conf', dest='sync_binhost_conf',
default=False, action='store_true',
help='Update binhost.conf')
options, args = parser.parse_args()
# Setup boto environment for gsutil to use
@ -499,24 +511,30 @@ def main():
usage(parser, 'Error: you need to provide an upload location using -u')
if options.filters:
# TODO(davidjames): It might be nice to be able to filter private ebuilds
# from rsync uploads as well, some day. But for now it's not needed.
if not options.upload.startswith("gs://"):
usage(parser, 'Error: filtering only works with gs:// paths')
LoadPrivateFilters(options.build_path)
version = GetVersion()
if options.prepend_version:
version = '%s-%s' % (options.prepend_version, version)
pkg_indexes = []
for url in options.previous_binhost_url:
pkg_index = GrabRemotePackageIndex(url)
if pkg_index:
pkg_indexes.append(pkg_index)
if options.sync_host:
UploadPrebuilt(options.build_path, options.upload, version,
options.binhost_base_url, git_sync=options.git_sync)
options.binhost_base_url, git_sync=options.git_sync,
key=options.key, pkg_indexes=pkg_indexes,
sync_binhost_conf=options.sync_binhost_conf)
if options.board:
UploadPrebuilt(options.build_path, options.upload, version,
options.binhost_base_url, board=options.board,
git_sync=options.git_sync)
git_sync=options.git_sync, key=options.key,
pkg_indexes=pkg_indexes,
sync_binhost_conf=options.sync_binhost_conf)
if __name__ == '__main__':

View File

@ -3,13 +3,31 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import mox
import os
import prebuilt
import shutil
import tempfile
import unittest
import urllib
from chromite.lib import cros_build_lib
from chromite.lib.binpkg import PackageIndex
PUBLIC_PACKAGES = [{'CPV': 'gtk+/public1', 'SHA1': '1'},
{'CPV': 'gtk+/public2', 'SHA1': '2',
'PATH': 'gtk%2B/foo.tgz'}]
PRIVATE_PACKAGES = [{'CPV': 'private', 'SHA1': '3'}]
def SimplePackageIndex(header=True, packages=True):
pkgindex = PackageIndex()
if header:
pkgindex.header['URI'] = 'http://www.example.com'
if packages:
pkgindex.packages = copy.deepcopy(PUBLIC_PACKAGES + PRIVATE_PACKAGES)
return pkgindex
class TestUpdateFile(unittest.TestCase):
@ -137,14 +155,6 @@ class TestPrebuiltFilters(unittest.TestCase):
class TestPrebuilt(unittest.TestCase):
fake_path = '/b/cbuild/build/chroot/build/x86-dogfood/'
bin_package_mock = ['packages/x11-misc/shared-mime-info-0.70.tbz2',
'packages/x11-misc/util-macros-1.5.0.tbz2',
'packages/x11-misc/xbitmaps-1.1.0.tbz2',
'packages/x11-misc/read-edid-1.4.2.tbz2',
'packages/x11-misc/xdg-utils-1.0.2-r3.tbz2']
files_to_sync = [os.path.join(fake_path, file) for file in bin_package_mock]
def setUp(self):
self.mox = mox.Mox()
@ -153,23 +163,17 @@ class TestPrebuilt(unittest.TestCase):
self.mox.UnsetStubs()
self.mox.VerifyAll()
def _generate_dict_results(self, gs_bucket_path):
"""
Generate a dictionary result similar to GenerateUploadDict
"""
results = {}
for entry in self.files_to_sync:
results[entry] = os.path.join(
gs_bucket_path, entry.replace(self.fake_path, '').lstrip('/'))
return results
def testGenerateUploadDict(self):
base_local_path = '/b/cbuild/build/chroot/build/x86-dogfood/'
gs_bucket_path = 'gs://chromeos-prebuilt/host/version'
self.mox.StubOutWithMock(cros_build_lib, 'ListFiles')
cros_build_lib.ListFiles(self.fake_path).AndReturn(self.files_to_sync)
local_path = os.path.join(base_local_path, 'public1.tbz2')
self.mox.StubOutWithMock(prebuilt.os.path, 'exists')
prebuilt.os.path.exists(local_path).AndReturn(True)
self.mox.ReplayAll()
result = prebuilt.GenerateUploadDict(self.fake_path, gs_bucket_path)
self.assertEqual(result, self._generate_dict_results(gs_bucket_path))
pkgs = [{ 'CPV': 'public1' }]
result = prebuilt.GenerateUploadDict(base_local_path, gs_bucket_path, pkgs)
expected = { local_path: gs_bucket_path + '/public1.tbz2' }
self.assertEqual(result, expected)
def testFailonUploadFail(self):
"""Make sure we fail if one of the upload processes fail."""
@ -195,6 +199,91 @@ class TestPrebuilt(unittest.TestCase):
class TestPackagesFileFiltering(unittest.TestCase):
def testFilterPkgIndex(self):
pkgindex = SimplePackageIndex()
pkgindex.RemoveFilteredPackages(lambda pkg: pkg in PRIVATE_PACKAGES)
self.assertEqual(pkgindex.packages, PUBLIC_PACKAGES)
self.assertEqual(pkgindex.modified, True)
class TestPopulateDuplicateDB(unittest.TestCase):
def testEmptyIndex(self):
pkgindex = SimplePackageIndex(packages=False)
db = {}
pkgindex._PopulateDuplicateDB(db)
self.assertEqual(db, {})
def testNormalIndex(self):
pkgindex = SimplePackageIndex()
db = {}
pkgindex._PopulateDuplicateDB(db)
self.assertEqual(len(db), 3)
self.assertEqual(db['1'], 'http://www.example.com/gtk%2B/public1.tbz2')
self.assertEqual(db['2'], 'http://www.example.com/gtk%2B/foo.tgz')
self.assertEqual(db['3'], 'http://www.example.com/private.tbz2')
def testMissingSHA1(self):
db = {}
pkgindex = SimplePackageIndex()
del pkgindex.packages[0]['SHA1']
pkgindex._PopulateDuplicateDB(db)
self.assertEqual(len(db), 2)
self.assertEqual(db['2'], 'http://www.example.com/gtk%2B/foo.tgz')
self.assertEqual(db['3'], 'http://www.example.com/private.tbz2')
def testFailedPopulate(self):
db = {}
pkgindex = SimplePackageIndex(header=False)
self.assertRaises(KeyError, pkgindex._PopulateDuplicateDB, db)
pkgindex = SimplePackageIndex()
del pkgindex.packages[0]['CPV']
self.assertRaises(KeyError, pkgindex._PopulateDuplicateDB, db)
class TestResolveDuplicateUploads(unittest.TestCase):
def testEmptyList(self):
pkgindex = SimplePackageIndex()
pristine = SimplePackageIndex()
uploads = pkgindex.ResolveDuplicateUploads([])
self.assertEqual(uploads, pristine.packages)
self.assertEqual(pkgindex.packages, pristine.packages)
self.assertEqual(pkgindex.modified, False)
def testEmptyIndex(self):
pkgindex = SimplePackageIndex()
pristine = SimplePackageIndex()
empty = SimplePackageIndex(packages=False)
uploads = pkgindex.ResolveDuplicateUploads([empty])
self.assertEqual(uploads, pristine.packages)
self.assertEqual(pkgindex.packages, pristine.packages)
self.assertEqual(pkgindex.modified, False)
def testDuplicates(self):
pkgindex = SimplePackageIndex()
dup_pkgindex = SimplePackageIndex()
expected_pkgindex = SimplePackageIndex()
for pkg in expected_pkgindex.packages:
pkg.setdefault('PATH', urllib.quote(pkg['CPV'] + '.tbz2'))
uploads = pkgindex.ResolveDuplicateUploads([dup_pkgindex])
self.assertEqual(pkgindex.packages, expected_pkgindex.packages)
def testMissingSHA1(self):
db = {}
pkgindex = SimplePackageIndex()
dup_pkgindex = SimplePackageIndex()
expected_pkgindex = SimplePackageIndex()
del pkgindex.packages[0]['SHA1']
del expected_pkgindex.packages[0]['SHA1']
for pkg in expected_pkgindex.packages[1:]:
pkg.setdefault('PATH', pkg['CPV'] + '.tbz2')
uploads = pkgindex.ResolveDuplicateUploads([dup_pkgindex])
self.assertEqual(pkgindex.packages, expected_pkgindex.packages)
class TestWritePackageIndex(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
@ -202,31 +291,13 @@ class TestPackagesFileFiltering(unittest.TestCase):
self.mox.UnsetStubs()
self.mox.VerifyAll()
def testFilterAllPackages(self):
self.mox.StubOutWithMock(prebuilt, 'ShouldFilterPackage')
prebuilt.ShouldFilterPackage("public1").AndReturn(False)
prebuilt.ShouldFilterPackage("private").AndReturn(True)
prebuilt.ShouldFilterPackage("public2").AndReturn(False)
full_packages_file = [
"foo: bar\n", "\n",
"CPV: public1\n", "foo: bar1\n", "\n",
"CPV: private\n", "foo: bar2\n", "\n",
"CPV: public2\n", "foo: bar3\n", "\n",
]
private_packages_file = [
"foo: bar\n", "\n",
"CPV: public1\n", "foo: bar1\n", "\n",
"CPV: public2\n", "foo: bar3\n", "\n",
]
def testSimple(self):
pkgindex = SimplePackageIndex()
self.mox.StubOutWithMock(pkgindex, 'Write')
pkgindex.Write(mox.IgnoreArg())
self.mox.ReplayAll()
temp_packages_file = tempfile.NamedTemporaryFile()
temp_packages_file.write("".join(full_packages_file))
temp_packages_file.flush()
new_packages_file = prebuilt.FilterPackagesFile(temp_packages_file.name)
new_contents = open(new_packages_file.name).read()
self.assertEqual("".join(private_packages_file), new_contents)
self.assertEqual("".join(private_packages_file), new_packages_file.read())
new_packages_file.close()
f = pkgindex.WriteToNamedTemporaryFile()
self.assertEqual(f.read(), '')
if __name__ == '__main__':

View File

@ -68,6 +68,31 @@ function learn_board() {
info "Target reports board is ${FLAGS_board}"
}
function remote_reboot {
info "Rebooting."
remote_sh "touch /tmp/awaiting_reboot; reboot"
local output_file
output_file="${TMP}/output"
while true; do
REMOTE_OUT=""
# This may fail while the machine is down so generate output and a
# boolean result to distinguish between down/timeout and real failure
! remote_sh_allow_changed_host_key \
"echo 0; [ -e /tmp/awaiting_reboot ] && echo '1'; true"
echo "${REMOTE_OUT}" > "${output_file}"
if grep -q "0" "${output_file}"; then
if grep -q "1" "${output_file}"; then
info "Not yet rebooted"
else
info "Rebooted and responding"
break
fi
fi
sleep .5
done
}
function cleanup_remote_access() {
# Call this function from the exit trap of the main script.
# Iff we started ssh-agent, be nice and clean it up.

View File

@ -48,83 +48,94 @@ function cleanup() {
function read_test_type() {
local control_file=$1
# Assume a line starts with TEST_TYPE =
local type=$(egrep -m1 \
'^[[:space:]]*TEST_TYPE[[:space:]]*=' "${control_file}")
if [[ -z "${type}" ]]; then
local test_type=$(egrep -m1 \
'^[[:space:]]*TEST_TYPE[[:space:]]*=' "${control_file}")
if [[ -z "${test_type}" ]]; then
die "Unable to find TEST_TYPE line in ${control_file}"
fi
type=$(python -c "${type}; print TEST_TYPE.lower()")
if [[ "${type}" != "client" ]] && [[ "${type}" != "server" ]]; then
die "Unknown type of test (${type}) in ${control_file}"
test_type=$(python -c "${test_type}; print TEST_TYPE.lower()")
if [[ "${test_type}" != "client" ]] && [[ "${test_type}" != "server" ]]; then
die "Unknown type of test (${test_type}) in ${control_file}"
fi
echo ${type}
echo ${test_type}
}
function create_tmp() {
# Set global TMP for remote_access.sh's sake
# and if --results_dir_root is specified,
# set TMP and create dir appropriately
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_chroot}${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d ${FLAGS_chroot}/tmp/run_remote_tests.XXXX)
fi
TMP_INSIDE_CHROOT=$(echo ${TMP#${FLAGS_chroot}})
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d /tmp/run_remote_tests.XXXX)
fi
TMP_INSIDE_CHROOT=${TMP}
TMP=$(mktemp -d /tmp/run_remote_tests.XXXX)
fi
}
function prepare_build_dir() {
local autotest_dir="$1"
INSIDE_BUILD_DIR="${TMP_INSIDE_CHROOT}/build"
BUILD_DIR="${TMP}/build"
info "Copying autotest tree into ${BUILD_DIR}."
sudo mkdir -p "${BUILD_DIR}"
sudo rsync -rl --chmod=ugo=rwx "${autotest_dir}"/ "${BUILD_DIR}"
function prepare_build_env() {
info "Pilfering toolchain shell environment from Portage."
local outside_ebuild_dir="${TMP}/chromeos-base/autotest-build"
local inside_ebuild_dir="${TMP_INSIDE_CHROOT}/chromeos-base/autotest-build"
mkdir -p "${outside_ebuild_dir}"
local ebuild_dir="${TMP}/chromeos-base/autotest-build"
mkdir -p "${ebuild_dir}"
local E_only="autotest-build-9999.ebuild"
cat > "${outside_ebuild_dir}/${E_only}" <<EOF
cat > "${ebuild_dir}/${E_only}" <<EOF
inherit toolchain-funcs
SLOT="0"
EOF
local E="chromeos-base/autotest-build/${E_only}"
${ENTER_CHROOT} "ebuild-${FLAGS_board}" "${inside_ebuild_dir}/${E_only}" \
"ebuild-${FLAGS_board}" --skip-manifest "${ebuild_dir}/${E_only}" \
clean unpack 2>&1 > /dev/null
local P_tmp="${FLAGS_chroot}/build/${FLAGS_board}/tmp/portage/"
local P_tmp="/build/${FLAGS_board}/tmp/portage/"
local E_dir="${E%%/*}/${E_only%.*}"
sudo cp "${P_tmp}/${E_dir}/temp/environment" "${BUILD_DIR}"
export BUILD_ENV="${P_tmp}/${E_dir}/temp/environment"
}
function autodetect_build() {
if [ ${FLAGS_use_emerged} -eq ${FLAGS_TRUE} ]; then
info \
"As requested, using emerged autotests already installed in your sysroot."
AUTOTEST_DIR="/build/${FLAGS_board}/usr/local/autotest"
FLAGS_build=${FLAGS_FALSE}
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Could not find pre-installed autotest, you need to emerge-${FLAGS_board} \
autotest autotest-tests (or use --build)."
fi
info \
"As requested, using emerged autotests already installed at ${AUTOTEST_DIR}."
return
fi
if ${ENTER_CHROOT} ./cros_workon --board=${FLAGS_board} list | \
grep -q autotest; then
info \
"Detected cros_workon autotests, building your sources instead of emerged \
autotest. To use installed autotest, pass --use_emerged."
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ] &&
$(dirname $0)/cros_workon --board=${FLAGS_board} list |
grep -q autotest; then
AUTOTEST_DIR="${SRC_ROOT}/third_party/autotest/files"
FLAGS_build=${FLAGS_TRUE}
else
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Detected cros_workon autotest but ${AUTOTEST_DIR} does not exist. Run \
repo sync autotest."
fi
info \
"Using emerged autotests already installed in your sysroot. To build \
autotests directly from your source directory instead, pass --build."
FLAGS_build=${FLAGS_FALSE}
"Detected cros_workon autotests. Building and running your autotests from \
${AUTOTEST_DIR}. To use emerged autotest, pass --use_emerged."
return
fi
# flag use_emerged should be false once the code reaches here.
if [ ${FLAGS_build} -eq ${FLAGS_TRUE} ]; then
AUTOTEST_DIR="${SRC_ROOT}/third_party/autotest/files"
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Build flag was turned on but ${AUTOTEST_DIR} is not found. Run cros_workon \
start autotest and repo sync to continue."
fi
info "Build and run autotests from ${AUTOTEST_DIR}."
else
AUTOTEST_DIR="/build/${FLAGS_board}/usr/local/autotest"
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Autotest was not emerged. Run emerge-${FLAGS_board} autotest \
autotest-tests to continue."
fi
info "Using emerged autotests already installed at ${AUTOTEST_DIR}."
fi
}
@ -160,24 +171,7 @@ function main() {
remote_access_init
learn_board
autotest_dir="${FLAGS_chroot}/build/${FLAGS_board}/usr/local/autotest"
ENTER_CHROOT=""
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
ENTER_CHROOT="./enter_chroot.sh --chroot ${FLAGS_chroot} --"
fi
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then
autodetect_build
fi
if [ ${FLAGS_build} -eq ${FLAGS_TRUE} ]; then
autotest_dir="${SRC_ROOT}/third_party/autotest/files"
else
if [ ! -d "${autotest_dir}" ]; then
die "You need to emerge autotest-tests (or use --build)"
fi
fi
autodetect_build
local control_files_to_run=""
local chrome_autotests="${CHROME_ROOT}/src/chrome/test/chromeos/autotest/files"
@ -187,7 +181,8 @@ function main() {
if [ -n "${CHROME_ROOT}" ]; then
search_path="${search_path} ${chrome_autotests}/client/site_tests"
fi
pushd ${autotest_dir} > /dev/null
pushd ${AUTOTEST_DIR} > /dev/null
for test_request in $FLAGS_ARGV; do
test_request=$(remove_quotes "${test_request}")
! finds=$(find ${search_path} -maxdepth 2 -type f \( -name control.\* -or \
@ -208,7 +203,6 @@ function main() {
control_files_to_run="${control_files_to_run} '${finds}'"
done
done
popd > /dev/null
echo ""
@ -216,29 +210,29 @@ function main() {
die "Found no control files"
fi
[ ${FLAGS_build} -eq ${FLAGS_TRUE} ] && prepare_build_dir "${autotest_dir}"
[ ${FLAGS_build} -eq ${FLAGS_TRUE} ] && prepare_build_env
info "Running the following control files:"
for CONTROL_FILE in ${control_files_to_run}; do
info " * ${CONTROL_FILE}"
for control_file in ${control_files_to_run}; do
info " * ${control_file}"
done
for control_file in ${control_files_to_run}; do
# Assume a line starts with TEST_TYPE =
control_file=$(remove_quotes "${control_file}")
local type=$(read_test_type "${autotest_dir}/${control_file}")
local test_type=$(read_test_type "${AUTOTEST_DIR}/${control_file}")
# Check if the control file is an absolute path (i.e. chrome autotests case)
if [[ ${control_file:0:1} == "/" ]]; then
type=$(read_test_type "${control_file}")
test_type=$(read_test_type "${control_file}")
fi
local option
if [[ "${type}" == "client" ]]; then
if [[ "${test_type}" == "client" ]]; then
option="-c"
else
option="-s"
fi
echo ""
info "Running ${type} test ${control_file}"
info "Running ${test_type} test ${control_file}"
local control_file_name=$(basename "${control_file}")
local short_name=$(basename $(dirname "${control_file}"))
@ -255,7 +249,7 @@ function main() {
fi
local results_dir_name="${short_name}"
local results_dir="${TMP_INSIDE_CHROOT}/${results_dir_name}"
local results_dir="${TMP}/${results_dir_name}"
rm -rf "${results_dir}"
local verbose=""
if [[ ${FLAGS_verbose} -eq $FLAGS_TRUE ]]; then
@ -271,39 +265,24 @@ function main() {
info "Running chrome autotest ${control_file}"
fi
local autoserv_test_args="${FLAGS_args}"
if [ -n "${autoserv_test_args}" ]; then
autoserv_test_args="-a \"${autoserv_test_args}\""
fi
local autoserv_args="-m ${FLAGS_remote} --ssh-port ${FLAGS_ssh_port} \
${option} ${control_file} -r ${results_dir} ${verbose}"
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then
cat > "${TMP}/run_test.sh" <<EOF
cd /build/${FLAGS_board}/usr/local/autotest
sudo chmod a+w ./server/{tests,site_tests}
echo ./server/autoserv ${autoserv_args} ${autoserv_test_args}
./server/autoserv ${autoserv_args} ${autoserv_test_args}
EOF
chmod a+rx "${TMP}/run_test.sh"
${ENTER_CHROOT} ${TMP_INSIDE_CHROOT}/run_test.sh >&2
if [ -n "${FLAGS_args}" ]; then
autoserv_args="${autoserv_args} -a \"${FLAGS_args}\""
fi
sudo chmod a+w ./server/{tests,site_tests}
echo ./server/autoserv ${autoserv_args}
if [ ${FLAGS_build} -eq ${FLAGS_TRUE} ]; then
# run autoserv in subshell
(. ${BUILD_ENV} && tc-export CC CXX PKG_CONFIG &&
./server/autoserv ${autoserv_args})
else
cp "${BUILD_DIR}/environment" "${TMP}/run_test.sh"
GRAPHICS_BACKEND=${GRAPHICS_BACKEND:-OPENGL}
cat >> "${TMP}/run_test.sh" <<EOF
export GCLIENT_ROOT=/home/${USER}/trunk
export GRAPHICS_BACKEND=${GRAPHICS_BACKEND}
export SSH_AUTH_SOCK=${SSH_AUTH_SOCK} TMPDIR=/tmp SSH_AGENT_PID=${SSH_AGENT_PID}
export SYSROOT=/build/${FLAGS_board}
tc-export CC CXX PKG_CONFIG
cd ${INSIDE_BUILD_DIR}
echo ./server/autoserv ${autoserv_args} ${autoserv_test_args}
./server/autoserv ${autoserv_args} ${autoserv_test_args}
EOF
sudo cp "${TMP}/run_test.sh" "${BUILD_DIR}"
sudo chmod a+rx "${BUILD_DIR}/run_test.sh"
${ENTER_CHROOT} sudo bash -c "${INSIDE_BUILD_DIR}/run_test.sh" >&2
./server/autoserv ${autoserv_args}
fi
done
popd > /dev/null
echo ""
info "Test results:"
@ -312,4 +291,5 @@ EOF
print_time_elapsed
}
restart_in_chroot_if_needed $*
main "$@"

86
update_kernel.sh Executable file
View File

@ -0,0 +1,86 @@
#!/bin/bash
# Copyright (c) 2009-2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to update the kernel on a live running ChromiumOS instance.
# Load common constants. This should be the first executable line.
# The path to common.sh should be relative to your script's location.
. "$(dirname $0)/common.sh"
. "$(dirname $0)/remote_access.sh"
DEFINE_string board "" "Override board reported by target"
DEFINE_string partition "" "Override kernel partition reported by target"
function cleanup {
cleanup_remote_access
rm -rf "${TMP}"
}
# Ask the target what the kernel partition is
function learn_partition() {
[ -n "${FLAGS_partition}" ] && return
remote_sh cat /proc/cmdline
if echo "${REMOTE_OUT}" | grep -q "/dev/sda3"; then
FLAGS_partition="/dev/sda2"
else
FLAGS_partition="/dev/sda4"
fi
if [ -z "${FLAGS_partition}" ]; then
error "Partition required"
exit 1
fi
info "Target reports kernel partition is ${FLAGS_partition}"
}
function main() {
assert_outside_chroot
cd $(dirname "$0")
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
set -e
trap cleanup EXIT
TMP=$(mktemp -d /tmp/image_to_live.XXXX)
remote_access_init
learn_board
remote_sh uname -r -v
old_kernel="${REMOTE_OUT}"
cmd="vbutil_kernel --pack new_kern.bin \
--keyblock /usr/share/vboot/devkeys/kernel.keyblock \
--signprivate /usr/share/vboot/devkeys/kernel_data_key.vbprivk \
--version 1 \
--config ../build/images/${FLAGS_board}/latest/config.txt \
--bootloader /lib64/bootstub/bootstub.efi \
--vmlinuz /build/${FLAGS_board}/boot/vmlinuz"
./enter_chroot.sh -- "${cmd}"
learn_partition
remote_cp_to new_kern.bin /tmp
remote_sh dd if=/tmp/new_kern.bin of="${FLAGS_partition}"
remote_reboot
remote_sh uname -r -v
info "old kernel: ${old_kernel}"
info "new kernel: ${REMOTE_OUT}"
}
main $@