diff --git a/autotest_run.sh b/autotest_run.sh deleted file mode 100755 index 17774074e2..0000000000 --- a/autotest_run.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# This script is intended as a wrapper to execute autotest tests for a given -# board. - -# Load common constants. This should be the first executable line. -# The path to common.sh should be relative to your script's location. -. "$(dirname "$0")/common.sh" - -# Script must be run inside the chroot -restart_in_chroot_if_needed $* -get_default_board - -DEFINE_string board "${DEFAULT_BOARD}" \ - "The board to run tests for." - -FLAGS_HELP="usage: $0 " -FLAGS "$@" || exit 1 -eval set -- "${FLAGS_ARGV}" - -# Define a directory which will not be cleaned by portage automatically. So we -# could achieve incremental build between two autoserv runs. -BUILD_RUNTIME="/build/${FLAGS_board}/usr/local/autotest/" - -# Hack: set the CHROMEOS_ROOT variable by hand here -CHROMEOS_ROOT=/home/${USER}/trunk/ - -# Ensure the configures run by autotest pick up the right config.site -CONFIG_SITE=/usr/share/config.site - -[ -z "${FLAGS_board}" ] && \ - die "You must specify --board=" - -function setup_ssh() { - eval $(ssh-agent) > /dev/null - # TODO(jrbarnette): This is a temporary hack, slated for removal - # before it was ever created. It's a bug, and you should fix it - # right away! - chmod 400 \ - ${CHROMEOS_ROOT}/src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa - ssh-add \ - ${CHROMEOS_ROOT}/src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa -} - -function teardown_ssh() { - ssh-agent -k > /dev/null -} - -src_test() { - # TODO: These places currently need to be writeable but shouldn't be - sudo chmod a+w ${BUILD_RUNTIME}/server/{tests,site_tests} - - setup_ssh - cd "${BUILD_RUNTIME}" - - local args=() - if [[ -n ${AUTOSERV_TEST_ARGS} ]]; then - args=("-a" "${AUTOSERV_TEST_ARGS}") - fi - - local timestamp=$(date +%Y-%m-%d-%H.%M.%S) - - # Do not use sudo, it'll unset all your environment - LOGNAME=${USER} ./server/autoserv -r /tmp/results.${timestamp} \ - ${AUTOSERV_ARGS} "${args[@]}" - - teardown_ssh -} - -src_test diff --git a/bin/cbuildbot.py b/bin/cbuildbot.py index e723cd6439..f6258d12fc 100755 --- a/bin/cbuildbot.py +++ b/bin/cbuildbot.py @@ -22,6 +22,7 @@ from cros_build_lib import (Die, Info, ReinterpretPathForChroot, RunCommand, Warning) _DEFAULT_RETRIES = 3 +_PACKAGE_FILE = '%(buildroot)s/src/scripts/cbuildbot_package.list' ARCHIVE_BASE = '/var/www/archive' ARCHIVE_COUNT = 10 @@ -44,27 +45,21 @@ def MakeDir(path, parents=False): raise -def RepoSync(buildroot, rw_checkout=False, retries=_DEFAULT_RETRIES): +def RepoSync(buildroot, retries=_DEFAULT_RETRIES): """Uses repo to checkout the source code. Keyword arguments: - rw_checkout -- Reconfigure repo after sync'ing to read-write. retries -- Number of retries to try before failing on the sync. - """ while retries > 0: try: # The --trace option ensures that repo shows the output from git. This # is needed so that the buildbot can kill us if git is not making # progress. + RunCommand(['repo', 'forall', '-c', 'git', 'config', + 'url.ssh://git@gitrw.chromium.org:9222.insteadof', + 'http://git.chromium.org/git'], cwd=buildroot) RunCommand(['repo', '--trace', 'sync'], cwd=buildroot) - if rw_checkout: - # Always re-run in case of new git repos or repo sync - # failed in a previous run because of a forced Stop Build. - RunCommand(['repo', 'forall', '-c', 'git', 'config', - 'url.ssh://git@gitrw.chromium.org:9222.pushinsteadof', - 'http://git.chromium.org/git'], cwd=buildroot) - retries = 0 except: retries -= 1 @@ -202,10 +197,26 @@ def _UprevFromRevisionList(buildroot, tracking_branch, revision_list, board, '--tracking_branch=%s' % tracking_branch, '--overlays=%s' % ':'.join(chroot_overlays), '--packages=%s' % ':'.join(packages), + '--drop_file=%s' % ReinterpretPathForChroot(_PACKAGE_FILE % + {'buildroot': buildroot}), 'commit'], cwd=cwd, enter_chroot=True) +def _MarkChromeAsStable(buildroot, tracking_branch, chrome_rev): + """Returns the portage atom for the revved chrome ebuild - see man emerge.""" + cwd = os.path.join(buildroot, 'src', 'scripts') + portage_atom_string = RunCommand(['bin/cros_mark_chrome_as_stable', + '--tracking_branch=%s' % tracking_branch, + chrome_rev], cwd=cwd, redirect_stdout=True, + enter_chroot=True).rstrip() + if not portage_atom_string: + Info('Found nothing to rev.') + return None + else: + return portage_atom_string.split('=')[1] + + def _UprevAllPackages(buildroot, tracking_branch, board, overlays): """Uprevs all packages that have been updated since last uprev.""" cwd = os.path.join(buildroot, 'src', 'scripts') @@ -213,7 +224,10 @@ def _UprevAllPackages(buildroot, tracking_branch, board, overlays): RunCommand(['./cros_mark_as_stable', '--all', '--board=%s' % board, '--overlays=%s' % ':'.join(chroot_overlays), - '--tracking_branch=%s' % tracking_branch, 'commit'], + '--tracking_branch=%s' % tracking_branch, + '--drop_file=%s' % ReinterpretPathForChroot(_PACKAGE_FILE % + {'buildroot': buildroot}), + 'commit'], cwd=cwd, enter_chroot=True) @@ -267,7 +281,7 @@ def _PreFlightRinse(buildroot, board, tracking_branch, overlays): RunCommand(['sudo', 'killall', 'kvm'], error_ok=True) -def _FullCheckout(buildroot, tracking_branch, rw_checkout=True, +def _FullCheckout(buildroot, tracking_branch, retries=_DEFAULT_RETRIES, url='http://git.chromium.org/git/manifest'): """Performs a full checkout and clobbers any previous checkouts.""" @@ -277,13 +291,12 @@ def _FullCheckout(buildroot, tracking_branch, rw_checkout=True, RunCommand(['repo', 'init', '-u', url, '-b', '%s' % branch[-1]], cwd=buildroot, input='\n\ny\n') - RepoSync(buildroot, rw_checkout, retries) + RepoSync(buildroot, retries) -def _IncrementalCheckout(buildroot, rw_checkout=True, - retries=_DEFAULT_RETRIES): +def _IncrementalCheckout(buildroot, retries=_DEFAULT_RETRIES): """Performs a checkout without clobbering previous checkout.""" - RepoSync(buildroot, rw_checkout, retries) + RepoSync(buildroot, retries) def _MakeChroot(buildroot): @@ -305,6 +318,13 @@ def _Build(buildroot): RunCommand(['./build_packages'], cwd=cwd, enter_chroot=True) +def _BuildChrome(buildroot, board, chrome_atom_to_build): + """Wrapper for emerge call to build Chrome.""" + cwd = os.path.join(buildroot, 'src', 'scripts') + RunCommand(['emerge-%s' % board, '=%s' % chrome_atom_to_build], + cwd=cwd, enter_chroot=True) + + def _EnableLocalAccount(buildroot): cwd = os.path.join(buildroot, 'src', 'scripts') # Set local account for test images. @@ -333,7 +353,10 @@ def _BuildVMImageForTesting(buildroot): def _RunUnitTests(buildroot): cwd = os.path.join(buildroot, 'src', 'scripts') - RunCommand(['./cros_run_unit_tests'], cwd=cwd, enter_chroot=True) + RunCommand(['./cros_run_unit_tests', + '--package_file=%s' % ReinterpretPathForChroot(_PACKAGE_FILE % + {'buildroot': buildroot}), + ], cwd=cwd, enter_chroot=True) def _RunSmokeSuite(buildroot, results_dir): @@ -386,59 +409,56 @@ def _UprevPackages(buildroot, tracking_branch, revisionfile, board, overlays): _UprevAllPackages(buildroot, tracking_branch, board, overlays) -def _UprevPush(buildroot, tracking_branch, board, overlays): +def _UprevPush(buildroot, tracking_branch, board, overlays, dryrun): """Pushes uprev changes to the main line.""" cwd = os.path.join(buildroot, 'src', 'scripts') - RunCommand(['./cros_mark_as_stable', '--srcroot=..', - '--board=%s' % board, - '--overlays=%s' % ':'.join(overlays), - '--tracking_branch=%s' % tracking_branch, - '--push_options=--bypass-hooks -f', 'push'], - cwd=cwd) + cmd = ['./cros_mark_as_stable', + '--srcroot=%s' % os.path.join(buildroot, 'src'), + '--board=%s' % board, + '--overlays=%s' % ':'.join(overlays), + '--tracking_branch=%s' % tracking_branch + ] + if dryrun: + cmd.append('--dryrun') + + cmd.append('push') + RunCommand(cmd, cwd=cwd) -def _ArchiveTestResults(buildroot, board, archive_dir, test_results_dir): - """Archives the test results into the www dir for later use. +def _ArchiveTestResults(buildroot, board, test_results_dir, + gsutil, archive_dir, acl): + """Archives the test results into Google Storage - Takes the results from the test_results_dir and dumps them into the archive - dir specified. This also archives the last qemu image. + Takes the results from the test_results_dir and the last qemu image and + uploads them to Google Storage. - board: Board to find the qemu image. - archive_dir: Path from ARCHIVE_BASE to store image. - test_results_dir: Path from buildroot/chroot to find test results. This must - a subdir of /tmp. + Arguments: + buildroot: Root directory where build occurs + board: Board to find the qemu image. + test_results_dir: Path from buildroot/chroot to find test results. + This must a subdir of /tmp. + gsutil: Location of gsutil + archive_dir: Google Storage path to store the archive + acl: ACL to set on archive in Google Storage """ + num_gsutil_retries = 5 test_results_dir = test_results_dir.lstrip('/') - if not os.path.exists(ARCHIVE_BASE): - os.makedirs(ARCHIVE_BASE) - else: - dir_entries = os.listdir(ARCHIVE_BASE) - if len(dir_entries) >= ARCHIVE_COUNT: - oldest_dirs = heapq.nsmallest((len(dir_entries) - ARCHIVE_COUNT) + 1, - [os.path.join(ARCHIVE_BASE, filename) for filename in dir_entries], - key=lambda fn: os.stat(fn).st_mtime) - Info('Removing archive dirs %s' % oldest_dirs) - for oldest_dir in oldest_dirs: - shutil.rmtree(os.path.join(ARCHIVE_BASE, oldest_dir)) - - archive_target = os.path.join(ARCHIVE_BASE, str(archive_dir)) - if os.path.exists(archive_target): - shutil.rmtree(archive_target) - results_path = os.path.join(buildroot, 'chroot', test_results_dir) RunCommand(['sudo', 'chmod', '-R', '+r', results_path]) try: - shutil.copytree(results_path, archive_target) - except: - Warning('Some files could not be copied') - - image_name = 'chromiumos_qemu_image.bin' - image_path = os.path.join(buildroot, 'src', 'build', 'images', board, - 'latest', image_name) - RunCommand(['gzip', '-f', '--fast', image_path]) - shutil.copyfile(image_path + '.gz', os.path.join(archive_target, - image_name + '.gz')) + # gsutil has the ability to resume an upload when the command is retried + RunCommand([gsutil, 'cp', '-R', results_path, archive_dir], + num_retries=num_gsutil_retries) + RunCommand([gsutil, 'setacl', acl, archive_dir]) + image_name = 'chromiumos_qemu_image.bin' + image_path = os.path.join(buildroot, 'src', 'build', 'images', board, + 'latest', image_name) + RunCommand(['gzip', '-f', '--fast', image_path]) + RunCommand([gsutil, 'cp', image_path + '.gz', archive_dir], + num_retries=num_gsutil_retries) + except Exception, e: + Warning('Could not archive test results (error=%s)' % str(e)) def _GetConfig(config_name): @@ -495,6 +515,10 @@ def main(): help='root directory where build occurs', default=".") parser.add_option('-n', '--buildnumber', help='build number', type='int', default=0) + parser.add_option('--chrome_rev', default=None, type='string', + dest='chrome_rev', + help=('Chrome_rev of type [tot|latest_release|' + 'sticky_release]')) parser.add_option('-f', '--revisionfile', help='file where new revisions are stored') parser.add_option('--clobber', action='store_true', dest='clobber', @@ -503,17 +527,29 @@ def main(): parser.add_option('--debug', action='store_true', dest='debug', default=False, help='Override some options to run as a developer.') + parser.add_option('--nosync', action='store_false', dest='sync', + default=True, + help="Don't sync before building.") + parser.add_option('--notests', action='store_false', dest='tests', + default=True, + help='Override values from buildconfig and run no tests.') parser.add_option('-t', '--tracking-branch', dest='tracking_branch', default='cros/master', help='Run the buildbot on a branch') parser.add_option('-u', '--url', dest='url', default='http://git.chromium.org/git/manifest', help='Run the buildbot on internal manifest') + parser.add_option('-g', '--gsutil', default='', help='Location of gsutil') + parser.add_option('-c', '--gsutil_archive', default='', + help='Datastore archive location') + parser.add_option('-a', '--acl', default='private', + help='ACL to set on GSD archives') (options, args) = parser.parse_args() buildroot = os.path.abspath(options.buildroot) revisionfile = options.revisionfile tracking_branch = options.tracking_branch + chrome_atom_to_build = None if len(args) >= 1: buildconfig = _GetConfig(args[-1]) @@ -527,10 +563,11 @@ def main(): try: _PreFlightRinse(buildroot, buildconfig['board'], tracking_branch, overlays) - if options.clobber or not os.path.isdir(buildroot): - _FullCheckout(buildroot, tracking_branch, url=options.url) - else: - _IncrementalCheckout(buildroot) + if options.sync: + if options.clobber or not os.path.isdir(buildroot): + _FullCheckout(buildroot, tracking_branch, url=options.url) + else: + _IncrementalCheckout(buildroot) # Check that all overlays can be found. for path in overlays: @@ -546,42 +583,56 @@ def main(): if not os.path.isdir(boardpath): _SetupBoard(buildroot, board=buildconfig['board']) - if buildconfig['uprev']: + # Perform uprev. If chrome_uprev is set, rev Chrome ebuilds. + if options.chrome_rev: + chrome_atom_to_build = _MarkChromeAsStable(buildroot, tracking_branch, + options.chrome_rev) + elif buildconfig['uprev']: _UprevPackages(buildroot, tracking_branch, revisionfile, buildconfig['board'], overlays) _EnableLocalAccount(buildroot) - _Build(buildroot) - if buildconfig['unittests']: + # Doesn't rebuild without acquiring more source. + if options.sync: + _Build(buildroot) + + if chrome_atom_to_build: + _BuildChrome(buildroot, buildconfig['board'], chrome_atom_to_build) + + if buildconfig['unittests'] and options.tests: _RunUnitTests(buildroot) _BuildImage(buildroot) - if buildconfig['smoke_bvt']: + if buildconfig['smoke_bvt'] and options.tests: _BuildVMImageForTesting(buildroot) test_results_dir = '/tmp/run_remote_tests.%s' % options.buildnumber try: _RunSmokeSuite(buildroot, test_results_dir) finally: - _ArchiveTestResults(buildroot, buildconfig['board'], - archive_dir=options.buildnumber, - test_results_dir=test_results_dir) + if not options.debug: + archive_full_path=os.path.join(options.gsutil_archive, + str(options.buildnumber)) + _ArchiveTestResults(buildroot, buildconfig['board'], + test_results_dir=test_results_dir, + gsutil=options.gsutil, + archive_dir=archive_full_path, + acl=options.acl) if buildconfig['uprev']: # Don't push changes for developers. - if not options.debug: - if buildconfig['master']: - # Master bot needs to check if the other slaves completed. - if cbuildbot_comm.HaveSlavesCompleted(config): - _UprevPush(buildroot, tracking_branch, buildconfig['board'], - overlays) - else: - Die('CBUILDBOT - One of the slaves has failed!!!') - + if buildconfig['master']: + # Master bot needs to check if the other slaves completed. + if cbuildbot_comm.HaveSlavesCompleted(config): + _UprevPush(buildroot, tracking_branch, buildconfig['board'], + overlays, options.debug) else: - # Publish my status to the master if its expecting it. - if buildconfig['important']: - cbuildbot_comm.PublishStatus(cbuildbot_comm.STATUS_BUILD_COMPLETE) + Die('CBUILDBOT - One of the slaves has failed!!!') + + else: + # Publish my status to the master if its expecting it. + if buildconfig['important'] and not options.debug: + cbuildbot_comm.PublishStatus(cbuildbot_comm.STATUS_BUILD_COMPLETE) except: # Send failure to master bot. diff --git a/bin/cbuildbot_unittest.py b/bin/cbuildbot_unittest.py index 979cda55d4..ba9b545b0d 100755 --- a/bin/cbuildbot_unittest.py +++ b/bin/cbuildbot_unittest.py @@ -112,52 +112,32 @@ class CBuildBotTest(mox.MoxTestBase): # self.mox.VerifyAll() def testArchiveTestResults(self): - """Test if we can archive the latest results dir as well as clean up.""" - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(os, 'listdir') - self.mox.StubOutWithMock(os, 'stat') - self.mox.StubOutWithMock(shutil, 'rmtree') - self.mox.StubOutWithMock(shutil, 'copytree') - self.mox.StubOutWithMock(shutil, 'copyfile') - - # Create mock stats so that file2 is older than file1. - dir_listing = ['file1', 'file2'] - stat1 = self.mox.CreateMock(posix.stat_result) - stat2 = self.mox.CreateMock(posix.stat_result) - stat1.st_mtime = 99999 - stat2.st_mtime = 10000 - + """Test if we can archive the latest results dir to Google Storage.""" # Set vars for call. buildroot = '/fake_dir' - test_results_dir = 'fake_results_dir' - archive_dir = 1234 board = 'fake-board' - - # Expected calls. - os.path.exists(cbuildbot.ARCHIVE_BASE).AndReturn(True) - os.listdir(os.path.join(cbuildbot.ARCHIVE_BASE)).AndReturn(dir_listing) - os.stat(os.path.join(cbuildbot.ARCHIVE_BASE, 'file1')).AndReturn(stat1) - os.stat(os.path.join(cbuildbot.ARCHIVE_BASE, 'file2')).AndReturn(stat2) - # Should remove the oldest path. - shutil.rmtree(os.path.join(cbuildbot.ARCHIVE_BASE, 'file2')) + test_results_dir = 'fake_results_dir' + gsutil_path='/fake/gsutil/path' + archive_dir = 1234 + acl = 'fake_acl' + num_retries = 5 # Convenience variables to make archive easier to understand. path_to_results = os.path.join(buildroot, 'chroot', test_results_dir) - path_to_archive_dir = os.path.join(cbuildbot.ARCHIVE_BASE, str(archive_dir)) path_to_image = os.path.join(buildroot, 'src', 'build', 'images', board, 'latest', 'chromiumos_qemu_image.bin') - # Archive logic - os.path.exists(path_to_archive_dir).AndReturn(False) + cbuildbot.RunCommand(['sudo', 'chmod', '-R', '+r', path_to_results]) - shutil.copytree(path_to_results, path_to_archive_dir) + cbuildbot.RunCommand([gsutil_path, 'cp', '-R', path_to_results, + archive_dir], num_retries=num_retries) + cbuildbot.RunCommand([gsutil_path, 'setacl', acl, archive_dir]) cbuildbot.RunCommand(['gzip', '-f', '--fast', path_to_image]) - shutil.copyfile(path_to_image + '.gz', os.path.join( - path_to_archive_dir, 'chromiumos_qemu_image.bin.gz')) + cbuildbot.RunCommand([gsutil_path, 'cp', path_to_image + '.gz', + archive_dir], num_retries=num_retries) self.mox.ReplayAll() - cbuildbot.ARCHIVE_COUNT = 2 # Set equal to list size so we force clean up. - cbuildbot._ArchiveTestResults(buildroot, board, archive_dir, - test_results_dir) + cbuildbot._ArchiveTestResults(buildroot, board, test_results_dir, + gsutil_path, archive_dir, acl) self.mox.VerifyAll() # TODO(sosa): Remove once we un-comment above. diff --git a/bin/cros_au_test_harness.py b/bin/cros_au_test_harness.py index 7342310ce7..bedd16e094 100755 --- a/bin/cros_au_test_harness.py +++ b/bin/cros_au_test_harness.py @@ -274,15 +274,20 @@ class VirtualAUTest(unittest.TestCase, AUTest): """Runs vm smoke suite to verify image.""" # image_to_live already verifies lsb-release matching. This is just # for additional steps. - output = RunCommand(['%s/cros_run_vm_test' % self.crosutilsbin, - '--image_path=%s' % self.vm_image_path, - '--snapshot', - '--persist', - vm_graphics_flag, - '--kvm_pid=%s' % _KVM_PID_FILE, - '--test_case=%s' % _VERIFY_SUITE, - ], error_ok=True, enter_chroot=False, - redirect_stdout=True) + + commandWithArgs = ['%s/cros_run_vm_test' % self.crosutilsbin, + '--image_path=%s' % self.vm_image_path, + '--snapshot', + '--persist', + '--kvm_pid=%s' % _KVM_PID_FILE, + _VERIFY_SUITE, + ] + + if vm_graphics_flag: + commandWithArgs.append(vm_graphics_flag) + + output = RunCommand(commandWithArgs, error_ok=True, enter_chroot=False, + redirect_stdout=True) return self.CommonVerifyImage(self, output, percent_required_to_pass) diff --git a/bin/cros_image_to_target.py b/bin/cros_image_to_target.py index dc6a0f1be5..85475b0458 100755 --- a/bin/cros_image_to_target.py +++ b/bin/cros_image_to_target.py @@ -30,10 +30,11 @@ from xml.dom import minidom # This is the default filename within the image directory to load updates from DEFAULT_IMAGE_NAME = 'chromiumos_image.bin' +DEFAULT_IMAGE_NAME_TEST = 'chromiumos_test_image.bin' # The filenames we provide to clients to pull updates UPDATE_FILENAME = 'update.gz' -STATEFUL_FILENAME = 'stateful.image.gz' +STATEFUL_FILENAME = 'stateful.tgz' # How long do we wait for the server to start before launching client SERVER_STARTUP_WAIT = 1 @@ -46,8 +47,12 @@ class Command(object): self.env = env def RunPipe(self, pipeline, infile=None, outfile=None, - capture=False, oneline=False): - """Perform a command pipeline, with optional input/output filenames.""" + capture=False, oneline=False, hide_stderr=False): + """ + Perform a command pipeline, with optional input/output filenames. + + hide_stderr Don't allow output of stderr (default False) + """ last_pipe = None while pipeline: @@ -61,8 +66,10 @@ class Command(object): kwargs['stdout'] = subprocess.PIPE elif outfile: kwargs['stdout'] = open(outfile, 'wb') + if hide_stderr: + kwargs['stderr'] = open('/dev/null', 'wb') - self.env.Info('Running: %s' % ' '.join(cmd)) + self.env.Debug('Running: %s' % ' '.join(cmd)) last_pipe = subprocess.Popen(cmd, **kwargs) if capture: @@ -139,7 +146,11 @@ class CrosEnv(object): REBOOT_START_WAIT = 5 REBOOT_WAIT_TIME = 60 - def __init__(self, verbose=False): + SILENT = 0 + INFO = 1 + DEBUG = 2 + + def __init__(self, verbose=SILENT): self.cros_root = os.path.dirname(os.path.abspath(sys.argv[0])) parent = os.path.dirname(self.cros_root) if os.path.exists(os.path.join(parent, 'chromeos-common.sh')): @@ -147,6 +158,13 @@ class CrosEnv(object): self.cmd = Command(self) self.verbose = verbose + # do we have the pv progress tool? (sudo apt-get install pv) + self.have_pv = True + try: + self.cmd.Output('pv', '--help') + except OSError: + self.have_pv = False + def Error(self, msg): print >> sys.stderr, 'ERROR: %s' % msg @@ -156,9 +174,13 @@ class CrosEnv(object): sys.exit(1) def Info(self, msg): - if self.verbose: + if self.verbose >= CrosEnv.INFO: print 'INFO: %s' % msg + def Debug(self, msg): + if self.verbose >= CrosEnv.DEBUG: + print 'DEBUG: %s' % msg + def CrosUtilsPath(self, filename): return os.path.join(self.cros_root, filename) @@ -192,23 +214,16 @@ class CrosEnv(object): return True - def BuildStateful(self, src, dst): + def BuildStateful(self, src, dst_dir, dst_file): """Create a stateful partition update image.""" - if self.GetCached(src, dst): - self.Info('Using cached stateful %s' % dst) + if self.GetCached(src, dst_file): + self.Info('Using cached stateful %s' % dst_file) return True - cgpt = self.ChrootPath('/usr/bin/cgpt') - offset = self.cmd.OutputOneLine(cgpt, 'show', '-b', '-i', '1', src) - size = self.cmd.OutputOneLine(cgpt, 'show', '-s', '-i', '1', src) - if None in (size, offset): - self.Error('Unable to use cgpt to get image geometry') - return False - - return self.cmd.RunPipe([['dd', 'if=%s' % src, 'bs=512', - 'skip=%s' % offset, 'count=%s' % size], - ['gzip', '-c']], outfile=dst) + return self.cmd.Run(self.CrosUtilsPath( + 'cros_generate_stateful_update_payload'), + '--image=%s' % src, '--output=%s' % dst_dir) def GetSize(self, filename): return os.path.getsize(filename) @@ -262,10 +277,12 @@ class CrosEnv(object): UpdateHandler.SetupUrl('/update', PingUpdateResponse()) UpdateHandler.SetupUrl('/%s' % UPDATE_FILENAME, FileUpdateResponse(update_file, - verbose=self.verbose)) + verbose=self.verbose, + have_pv=self.have_pv)) UpdateHandler.SetupUrl('/%s' % STATEFUL_FILENAME, FileUpdateResponse(stateful_file, - verbose=self.verbose)) + verbose=self.verbose, + have_pv=self.have_pv)) self.http_server = BaseHTTPServer.HTTPServer(('', port), UpdateHandler) @@ -304,6 +321,7 @@ class CrosEnv(object): def StartClient(self, port): """Ask the client machine to update from our server.""" + self.Info("Starting client...") status = self.GetUpdateStatus() if status != 'UPDATE_STATUS_IDLE': self.Error('Client update status is not IDLE: %s' % status) @@ -314,6 +332,8 @@ class CrosEnv(object): fd, update_log = tempfile.mkstemp(prefix='image-to-target-') self.Info('Starting update on client. Client output stored to %s' % update_log) + + # this will make the client read the files we have set up self.ssh_cmd.Run('/usr/bin/update_engine_client', '--update', '--omaha_url', update_url, remote_tunnel=(port, port), outfile=update_log) @@ -322,6 +342,7 @@ class CrosEnv(object): self.Error('Client update failed') return False + self.Info('Update complete - running update script on client') self.ssh_cmd.Copy(self.CrosUtilsPath('../platform/dev/stateful_update'), '/tmp') if not self.ssh_cmd.Run('/tmp/stateful_update', url_base, @@ -334,7 +355,7 @@ class CrosEnv(object): self.Error('Client may not have successfully rebooted...') return False - print 'Client update completed successfully!' + self.Info('Client update completed successfully!') return True @@ -342,7 +363,7 @@ class UpdateResponse(object): """Default response is the 404 error response.""" def Reply(self, handler, send_content=True, post_data=None): - handler.send_Error(404, 'File not found') + handler.send_error(404, 'File not found') return None @@ -350,11 +371,12 @@ class FileUpdateResponse(UpdateResponse): """Respond by sending the contents of a file.""" def __init__(self, filename, content_type='application/octet-stream', - verbose=False, blocksize=16*1024): + verbose=False, blocksize=16*1024, have_pv=False): self.filename = filename self.content_type = content_type self.verbose = verbose self.blocksize = blocksize + self.have_pv = have_pv def Reply(self, handler, send_content=True, post_data=None): """Return file contents to the client. Optionally display progress.""" @@ -373,14 +395,11 @@ class FileUpdateResponse(UpdateResponse): handler.date_time_string(filestat.st_mtime)) handler.end_headers() - if not send_content: - return - - if filesize <= self.blocksize: - handler.wfile.write(f.read()) - else: + if send_content: sent_size = 0 sent_percentage = None + + #TODO(sjg): this should use pv also while True: buf = f.read(self.blocksize) if not buf: @@ -556,7 +575,6 @@ def main(argv): parser.add_option('--from', dest='src', default=None, help='Source image to install') parser.add_option('--image-name', dest='image_name', - default=DEFAULT_IMAGE_NAME, help='Filename within image directory to load') parser.add_option('--port', dest='port', default=8081, type='int', help='TCP port to serve from and tunnel through') @@ -565,11 +583,23 @@ def main(argv): parser.add_option('--server-only', dest='server_only', default=False, action='store_true', help='Do not start client') parser.add_option('--verbose', dest='verbose', default=False, + action='store_true', help='Display progress') + parser.add_option('--debug', dest='debug', default=False, action='store_true', help='Display running commands') + parser.add_option('--test', dest='test', default=False, + action='store_true', help='Select test image') (options, args) = parser.parse_args(argv) - cros_env = CrosEnv(verbose=options.verbose) + # we can build the test image if it doesn't exist, so remember if we want to + build_test_image = False + + verbosity = CrosEnv.SILENT + if options.verbose: + verbosity = CrosEnv.INFO + if options.debug: + verbosity = CrosEnv.DEBUG + cros_env = CrosEnv(verbose=verbosity) if not options.board: options.board = cros_env.GetDefaultBoard() @@ -584,17 +614,47 @@ def main(argv): if not os.path.exists(options.src): parser.error('Path %s does not exist' % options.src) + if not options.image_name: + # auto-select the correct image + if options.test: + options.image_name = DEFAULT_IMAGE_NAME_TEST + + # we will build the test image if not found + build_test_image = True + else: + options.image_name = DEFAULT_IMAGE_NAME + if os.path.isdir(options.src): image_directory = options.src image_file = os.path.join(options.src, options.image_name) if not os.path.exists(image_file): + if build_test_image: + # we want a test image but it doesn't exist + # try to build it if we can + cros_env.Info('Creating test image') + test_output = cros_env.cmd.Output( + cros_env.CrosUtilsPath('enter_chroot.sh'), + '--', './mod_image_for_test.sh', + '--board=%s' % options.board, '-y') + if not os.path.exists(image_file): + print test_output + cros_env.Fatal('Failed to create test image - please run ' + './mod_image_for_test.sh manually inside the chroot') parser.error('Image file %s does not exist' % image_file) else: image_file = options.src image_directory = os.path.dirname(options.src) + update_file = os.path.join(image_directory, UPDATE_FILENAME) + stateful_file = os.path.join(image_directory, STATEFUL_FILENAME) + + cros_env.Debug("Image file %s" % image_file) + cros_env.Debug("Update file %s" % update_file) + cros_env.Debug("Stateful file %s" % stateful_file) + if options.remote: + cros_env.Info('Contacting client %s' % options.remote) cros_env.SetRemote(options.remote) rel = cros_env.GetRemoteRelease() if not rel: @@ -610,11 +670,8 @@ def main(argv): parser.error('Either --server-only must be specified or ' '--remote= needs to be given') - update_file = os.path.join(image_directory, UPDATE_FILENAME) - stateful_file = os.path.join(image_directory, STATEFUL_FILENAME) - if (not cros_env.GenerateUpdatePayload(image_file, update_file) or - not cros_env.BuildStateful(image_file, stateful_file)): + not cros_env.BuildStateful(image_file, image_directory, stateful_file)): cros_env.Fatal() cros_env.CreateServer(options.port, update_file, stateful_file) diff --git a/bin/cros_make_image_bootable b/bin/cros_make_image_bootable index 3517c85ec8..d5644916b8 100755 --- a/bin/cros_make_image_bootable +++ b/bin/cros_make_image_bootable @@ -250,6 +250,24 @@ make_image_bootable() { -s "${FLAGS_statefulfs_mountpoint}" } +verify_image_rootfs() { + local image=$1 + local rootfs_offset="$(partoffset ${image} 3)" + local rootfs_size="$(partsize ${image} 3)" + + local rootfs_tmp_file=$(mktemp) + trap "rm ${rootfs_tmp_file}" EXIT + sudo dd if="${image}" of="${rootfs_tmp_file}" bs=512 skip="${rootfs_offset}" + + # This flips the read-only compatibility flag, so that + # e2fsck does not complain about unknown file system capabilities. + enable_rw_mount "${rootfs_tmp_file}" + info "Running e2fsck to check root file system for errors" + sudo e2fsck -fn "${rootfs_tmp_file}" || + die "Root file system has errors, please ensure boot.desc and/or \ +command line parameters are correct" +} + # Use default of current image location if the output dir doesn't exist. if [ ! -d ${FLAGS_output_dir} ]; then warn "Output dir not found, using ${IMAGE_DIR}." @@ -265,7 +283,8 @@ mkdir -p ${FLAGS_rootfs_mountpoint} mkdir -p ${FLAGS_statefulfs_mountpoint} mkdir -p ${FLAGS_espfs_mountpoint} -make_image_bootable ${IMAGE} +make_image_bootable "${IMAGE}" +verify_image_rootfs "${IMAGE}" if [ ${FLAGS_cleanup_dirs} -eq ${FLAGS_TRUE} ]; then rmdir ${FLAGS_rootfs_mountpoint} diff --git a/bin/cros_mark_chrome_as_stable b/bin/cros_mark_chrome_as_stable new file mode 120000 index 0000000000..e429dbe05e --- /dev/null +++ b/bin/cros_mark_chrome_as_stable @@ -0,0 +1 @@ +cros_mark_chrome_as_stable.py \ No newline at end of file diff --git a/bin/cros_mark_chrome_as_stable.py b/bin/cros_mark_chrome_as_stable.py new file mode 100755 index 0000000000..7545c641a7 --- /dev/null +++ b/bin/cros_mark_chrome_as_stable.py @@ -0,0 +1,332 @@ +#!/usr/bin/python + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This module uprevs Chrome for cbuildbot. + +After calling, it prints outs CHROME_VERSION_ATOM=(version atom string). A +caller could then use this atom with emerge to build the newly uprevved version +of Chrome e.g. + +./cros_mark_chrome_as_stable tot +Returns chrome-base/chromeos-chrome-8.0.552.0_alpha_r1 + +emerge-x86-generic =chrome-base/chromeos-chrome-8.0.552.0_alpha_r1 +""" + +import optparse +import os +import re +import sys +import urllib + +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +import cros_mark_as_stable + +sys.path.append(os.path.join(os.path.dirname(__file__), '../lib')) +from cros_build_lib import RunCommand, Info, Warning + +BASE_CHROME_SVN_URL = 'http://src.chromium.org/svn' + +# Command for which chrome ebuild to uprev. +TIP_OF_TRUNK, LATEST_RELEASE, STICKY = 'tot', 'latest_release', 'sticky_release' +CHROME_REV = [TIP_OF_TRUNK, LATEST_RELEASE, STICKY] + +# Helper regex's for finding ebuilds. +_CHROME_VERSION_REGEX = '\d+\.\d+\.\d+\.\d+' +_NON_STICKY_REGEX = '%s[(_rc.*)|(_alpha.*)]+' % _CHROME_VERSION_REGEX + +# Dir where all the action happens. +_CHROME_OVERLAY_DIR = ('%(srcroot)s/third_party/chromiumos-overlay' + '/chromeos-base/chromeos-chrome') + +_GIT_COMMIT_MESSAGE = ('Marking %(chrome_rev)s for chrome ebuild with version ' + '%(chrome_version)s as stable.') + + +def _GetSvnUrl(): + """Returns the path to the svn url for the given chrome branch.""" + return os.path.join(BASE_CHROME_SVN_URL, 'trunk') + + +def _GetTipOfTrunkSvnRevision(): + """Returns the current svn revision for the chrome tree.""" + svn_url = _GetSvnUrl() + svn_info = RunCommand(['svn', 'info', svn_url], redirect_stdout=True) + + revision_re = re.compile('^Revision:\s+(\d+).*') + for line in svn_info.splitlines(): + match = revision_re.search(line) + if match: + return match.group(1) + + raise Exception('Could not find revision information from %s' % svn_url) + + +def _GetTipOfTrunkVersion(): + """Returns the current Chrome version.""" + svn_url = _GetSvnUrl() + chrome_version_file = urllib.urlopen(os.path.join(svn_url, 'src', 'chrome', + 'VERSION')) + chrome_version_info = chrome_version_file.read() + chrome_version_file.close() + + # Sanity check. + if '404 Not Found' in chrome_version_info: + raise Exception('Url %s does not have version file.' % svn_url) + + chrome_version_array = [] + + for line in chrome_version_info.splitlines(): + chrome_version_array.append(line.rpartition('=')[2]) + + return '.'.join(chrome_version_array) + + +def _GetLatestRelease(branch=None): + """Gets the latest release version from the buildspec_url for the branch. + + Args: + branch: If set, gets the latest release for branch, otherwise latest + release. + Returns: + Latest version string. + """ + buildspec_url = 'http://src.chromium.org/svn/releases' + svn_ls = RunCommand(['svn', 'ls', buildspec_url], redirect_stdout=True) + sorted_ls = RunCommand(['sort', '--version-sort'], input=svn_ls, + redirect_stdout=True) + if branch: + chrome_version_re = re.compile('^%s\.\d+.*' % branch) + else: + chrome_version_re = re.compile('^[0-9]\..*') + for chrome_version in sorted_ls.splitlines(): + if chrome_version_re.match(chrome_version): + current_version = chrome_version + + return current_version.rstrip('/') + + +def _GetStickyVersion(stable_ebuilds): + """Discovers the sticky version from the current stable_ebuilds.""" + sticky_ebuilds = [] + non_sticky_re = re.compile(_NON_STICKY_REGEX) + for ebuild in stable_ebuilds: + if not non_sticky_re.match(ebuild.version): + sticky_ebuilds.append(ebuild) + + if not sticky_ebuilds: + raise Exception('No sticky ebuilds found') + elif len(sticky_ebuilds) > 1: + Warning('More than one sticky ebuild found') + + return cros_mark_as_stable.BestEBuild(sticky_ebuilds).chrome_version + + +class ChromeEBuild(cros_mark_as_stable.EBuild): + """Thin sub-class of EBuild that adds a chrome_version field.""" + chrome_version_re = re.compile('.*chromeos-chrome-(%s|9999).*' % ( + _CHROME_VERSION_REGEX)) + chrome_version = '' + + def __init__(self, path): + cros_mark_as_stable.EBuild.__init__(self, path) + re_match = self.chrome_version_re.match(self.ebuild_path_no_revision) + if re_match: + self.chrome_version = re_match.group(1) + + def __cmp__(self, other): + """Use ebuild paths for comparison.""" + if self.ebuild_path == other.ebuild_path: + return 0 + elif self.ebuild_path > other.ebuild_path: + return 1 + else: + return (-1) + + +def FindChromeCandidates(overlay_dir): + """Return a tuple of chrome's unstable ebuild and stable ebuilds. + + Args: + overlay_dir: The path to chrome's portage overlay dir. + Returns: + Tuple [unstable_ebuild, stable_ebuilds]. + Raises: + Exception: if no unstable ebuild exists for Chrome. + """ + stable_ebuilds = [] + unstable_ebuilds = [] + for path in [ + os.path.join(overlay_dir, entry) for entry in os.listdir(overlay_dir)]: + if path.endswith('.ebuild'): + ebuild = ChromeEBuild(path) + if not ebuild.chrome_version: + Warning('Poorly formatted ebuild found at %s' % path) + else: + if not ebuild.is_stable: + unstable_ebuilds.append(ebuild) + else: + stable_ebuilds.append(ebuild) + + # Apply some sanity checks. + if not unstable_ebuilds: + raise Exception('Missing 9999 ebuild for %s' % overlay_dir) + if not stable_ebuilds: + Warning('Missing stable ebuild for %s' % overlay_dir) + + return cros_mark_as_stable.BestEBuild(unstable_ebuilds), stable_ebuilds + + +def FindChromeUprevCandidate(stable_ebuilds, chrome_rev, sticky_branch): + """Finds the Chrome uprev candidate for the given chrome_rev. + + Using the pre-flight logic, this means the stable ebuild you are uprevving + from. The difference here is that the version could be different and in + that case we want to find it to delete it. + + Args: + stable_ebuilds: A list of stable ebuilds. + chrome_rev: The chrome_rev designating which candidate to find. + sticky_branch: The the branch that is currently sticky with Major/Minor + components. For example: 9.0.553 + Returns: + Returns the EBuild, otherwise None if none found. + """ + candidates = [] + if chrome_rev == TIP_OF_TRUNK: + chrome_branch_re = re.compile('%s.*_alpha.*' % _CHROME_VERSION_REGEX) + for ebuild in stable_ebuilds: + if chrome_branch_re.search(ebuild.version): + candidates.append(ebuild) + + elif chrome_rev == STICKY: + chrome_branch_re = re.compile('%s\.\d+.*_rc.*' % sticky_branch) + for ebuild in stable_ebuilds: + if chrome_branch_re.search(ebuild.version): + candidates.append(ebuild) + + else: + chrome_branch_re = re.compile('%s.*_rc.*' % _CHROME_VERSION_REGEX) + for ebuild in stable_ebuilds: + if chrome_branch_re.search(ebuild.version) and ( + not ebuild.chrome_version.startswith(sticky_branch)): + candidates.append(ebuild) + + if candidates: + return cros_mark_as_stable.BestEBuild(candidates) + else: + return None + + +def MarkChromeEBuildAsStable(stable_candidate, unstable_ebuild, chrome_rev, + chrome_version, commit, overlay_dir): + """Uprevs the chrome ebuild specified by chrome_rev. + + This is the main function that uprevs the chrome_rev from a stable candidate + to its new version. + + Args: + stable_candidate: ebuild that corresponds to the stable ebuild we are + revving from. If None, builds the a new ebuild given the version + and logic for chrome_rev type with revision set to 1. + unstable_ebuild: ebuild corresponding to the unstable ebuild for chrome. + chrome_rev: one of CHROME_REV + TIP_OF_TRUNK - Requires commit value. Revs the ebuild for the TOT + version and uses the portage suffix of _alpha. + LATEST_RELEASE - This uses the portage suffix of _rc as they are release + candidates for the next sticky version. + STICKY - Revs the sticky version. + chrome_version: The \d.\d.\d.\d version of Chrome. + commit: Used with TIP_OF_TRUNK. The svn revision of chrome. + overlay_dir: Path to the chromeos-chrome package dir. + Returns: + Full portage version atom (including rc's, etc) that was revved. + """ + base_path = os.path.join(overlay_dir, 'chromeos-chrome-%s' % chrome_version) + # Case where we have the last stable candidate with same version just rev. + if stable_candidate and stable_candidate.chrome_version == chrome_version: + new_ebuild_path = '%s-r%d.ebuild' % ( + stable_candidate.ebuild_path_no_revision, + stable_candidate.current_revision + 1) + else: + if chrome_rev == TIP_OF_TRUNK: + portage_suffix = '_alpha' + else: + portage_suffix = '_rc' + + new_ebuild_path = base_path + ('%s-r1.ebuild' % portage_suffix) + + cros_mark_as_stable.EBuildStableMarker.MarkAsStable( + unstable_ebuild.ebuild_path, new_ebuild_path, 'CROS_SVN_COMMIT', commit) + RunCommand(['git', 'add', new_ebuild_path]) + if stable_candidate: + RunCommand(['git', 'rm', stable_candidate.ebuild_path]) + + cros_mark_as_stable.EBuildStableMarker.CommitChange( + _GIT_COMMIT_MESSAGE % {'chrome_rev': chrome_rev, + 'chrome_version': chrome_version}) + + new_ebuild = ChromeEBuild(new_ebuild_path) + return '%s-%s' % (new_ebuild.package, new_ebuild.version) + + +def main(): + usage = '%s OPTIONS [%s]' % (__file__, '|'.join(CHROME_REV)) + parser = optparse.OptionParser(usage) + parser.add_option('-s', '--srcroot', default=os.path.join(os.environ['HOME'], + 'trunk', 'src'), + help='Path to the src directory') + parser.add_option('-t', '--tracking_branch', default='cros/master', + help='Branch we are tracking changes against') + (options, args) = parser.parse_args() + + if len(args) != 1 or args[0] not in CHROME_REV: + parser.error('Commit requires arg set to one of %s.' % CHROME_REV) + + overlay_dir = os.path.abspath(_CHROME_OVERLAY_DIR % + {'srcroot': options.srcroot}) + chrome_rev = args[0] + version_to_uprev = None + commit_to_use = None + + (unstable_ebuild, stable_ebuilds) = FindChromeCandidates(overlay_dir) + sticky_version = _GetStickyVersion(stable_ebuilds) + sticky_branch = sticky_version.rpartition('.')[0] + + + if chrome_rev == TIP_OF_TRUNK: + version_to_uprev = _GetTipOfTrunkVersion() + commit_to_use = _GetTipOfTrunkSvnRevision() + elif chrome_rev == LATEST_RELEASE: + version_to_uprev = _GetLatestRelease() + else: + version_to_uprev = _GetLatestRelease(sticky_branch) + + stable_candidate = FindChromeUprevCandidate(stable_ebuilds, chrome_rev, + sticky_branch) + # There are some cases we don't need to do anything. Check for them. + if stable_candidate and (version_to_uprev == stable_candidate.chrome_version + and not commit_to_use): + Info('Found nothing to do for chrome_rev %s with version %s.' % ( + chrome_rev, version_to_uprev)) + else: + os.chdir(overlay_dir) + work_branch = cros_mark_as_stable.GitBranch( + cros_mark_as_stable.STABLE_BRANCH_NAME, options.tracking_branch) + work_branch.CreateBranch() + try: + chrome_version_atom = MarkChromeEBuildAsStable( + stable_candidate, unstable_ebuild, chrome_rev, version_to_uprev, + commit_to_use, overlay_dir) + # Explicit print to communicate to caller. + print 'CHROME_VERSION_ATOM=%s' % chrome_version_atom + except: + work_branch.Delete() + raise + + +if __name__ == '__main__': + main() diff --git a/bin/cros_mark_chrome_as_stable_unittest.py b/bin/cros_mark_chrome_as_stable_unittest.py new file mode 100755 index 0000000000..ab16cd68ea --- /dev/null +++ b/bin/cros_mark_chrome_as_stable_unittest.py @@ -0,0 +1,269 @@ +#!/usr/bin/python + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Unit tests for cros_mark_chrome_as_stable.py.""" + +import cros_mark_chrome_as_stable +import mox +import os +import shutil +import sys +import tempfile +import unittest +import urllib + +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +import cros_mark_as_stable + +unstable_data = 'KEYWORDS=~x86 ~arm' +stable_data = 'KEYWORDS=x86 arm' + +def _TouchAndWrite(path, data=None): + """Writes data (if it exists) to the file specified by the path.""" + fh = open(path, 'w') + if data: + fh.write(data) + + fh.close() + + +class CrosMarkChromeAsStable(mox.MoxTestBase): + + def setUp(self): + """Setup vars and create mock dir.""" + mox.MoxTestBase.setUp(self) + self.tmp_overlay = tempfile.mkdtemp(prefix='chromiumos-overlay') + self.mock_chrome_dir = os.path.join(self.tmp_overlay, 'chromeos-base', + 'chromeos-chrome') + os.makedirs(self.mock_chrome_dir) + + self.unstable = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-9999.ebuild') + self.sticky_branch = '8.0.224' + self.sticky_version = '%s.503' % self.sticky_branch + self.sticky = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s.ebuild' % + self.sticky_version) + self.sticky_rc_version = '%s.504' % self.sticky_branch + self.sticky_rc = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s_rc-r1.ebuild' % + self.sticky_rc_version) + self.latest_stable_version = '8.0.300.1' + self.latest_stable = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s_rc-r2.ebuild' % + self.latest_stable_version) + self.tot_stable_version = '9.0.305.0' + self.tot_stable = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s_alpha-r1.ebuild' % + self.tot_stable_version) + + self.sticky_new_rc_version = '%s.520' % self.sticky_branch + self.sticky_new_rc = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s_rc-r1.ebuild' % + self.sticky_new_rc_version) + self.latest_new_version = '9.0.305.1' + self.latest_new = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s_rc-r1.ebuild' % + self.latest_new_version) + self.tot_new_version = '9.0.306.0' + self.tot_new = os.path.join(self.mock_chrome_dir, + 'chromeos-chrome-%s_alpha-r1.ebuild' % + self.tot_new_version) + + _TouchAndWrite(self.unstable, unstable_data) + _TouchAndWrite(self.sticky, stable_data) + _TouchAndWrite(self.sticky_rc, stable_data) + _TouchAndWrite(self.latest_stable, stable_data) + _TouchAndWrite(self.tot_stable, stable_data) + + def tearDown(self): + """Cleans up mock dir.""" + shutil.rmtree(self.tmp_overlay) + + def testFindChromeCandidates(self): + """Test creation of stable ebuilds from mock dir.""" + unstable, stable_ebuilds = cros_mark_chrome_as_stable.FindChromeCandidates( + self.mock_chrome_dir) + + self.assertEqual(unstable.ebuild_path, self.unstable) + self.assertEqual(len(stable_ebuilds), 4) + self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.sticky) in + stable_ebuilds) + self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.sticky_rc) in + stable_ebuilds) + self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.latest_stable) + in stable_ebuilds) + self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.tot_stable) in + stable_ebuilds) + + def _GetStableEBuilds(self): + """Common helper to create a list of stable ebuilds.""" + return [ + cros_mark_chrome_as_stable.ChromeEBuild(self.sticky), + cros_mark_chrome_as_stable.ChromeEBuild(self.sticky_rc), + cros_mark_chrome_as_stable.ChromeEBuild(self.latest_stable), + cros_mark_chrome_as_stable.ChromeEBuild(self.tot_stable), + ] + + def testTOTFindChromeUprevCandidate(self): + """Tests if we can find tot uprev candidate from our mock dir data.""" + stable_ebuilds = self._GetStableEBuilds() + + candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate( + stable_ebuilds, cros_mark_chrome_as_stable.TIP_OF_TRUNK, + self.sticky_branch) + + self.assertEqual(candidate.ebuild_path, self.tot_stable) + + def testLatestFindChromeUprevCandidate(self): + """Tests if we can find latest uprev candidate from our mock dir data.""" + stable_ebuilds = self._GetStableEBuilds() + + candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate( + stable_ebuilds, cros_mark_chrome_as_stable.LATEST_RELEASE, + self.sticky_branch) + + self.assertEqual(candidate.ebuild_path, self.latest_stable) + + def testStickyFindChromeUprevCandidate(self): + """Tests if we can find sticky uprev candidate from our mock dir data.""" + stable_ebuilds = self._GetStableEBuilds() + + candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate( + stable_ebuilds, cros_mark_chrome_as_stable.STICKY, + self.sticky_branch) + + self.assertEqual(candidate.ebuild_path, self.sticky_rc) + + def testGetTipOfTrunkSvnRevision(self): + """Tests if we can get the latest svn revision from TOT.""" + self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand') + cros_mark_chrome_as_stable.RunCommand( + ['svn', 'info', cros_mark_chrome_as_stable._GetSvnUrl()], + redirect_stdout=True).AndReturn( + 'Some Junk 2134\nRevision: 12345\nOtherInfo: test_data') + self.mox.ReplayAll() + revision = cros_mark_chrome_as_stable._GetTipOfTrunkSvnRevision() + self.mox.VerifyAll() + self.assertEquals(revision, '12345') + + def testGetTipOfTrunkVersion(self): + """Tests if we get the latest version from TOT.""" + self.mox.StubOutWithMock(urllib, 'urlopen') + mock_file = self.mox.CreateMock(file) + urllib.urlopen(os.path.join(cros_mark_chrome_as_stable._GetSvnUrl(), 'src', + 'chrome', 'VERSION')).AndReturn(mock_file) + mock_file.read().AndReturn('A=8\nB=0\nC=256\nD=0') + mock_file.close() + + self.mox.ReplayAll() + version = cros_mark_chrome_as_stable._GetTipOfTrunkVersion() + self.mox.VerifyAll() + self.assertEquals(version, '8.0.256.0') + + def testGetLatestRelease(self): + """Tests if we can find the latest release from our mock url data.""" + test_data = '\n'.join(['7.0.224.1/', + '7.0.224.2/', + '8.0.365.5/', + 'LATEST.txt']) + self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand') + cros_mark_chrome_as_stable.RunCommand( + ['svn', 'ls', 'http://src.chromium.org/svn/releases'], + redirect_stdout=True).AndReturn('some_data') + cros_mark_chrome_as_stable.RunCommand( + ['sort', '--version-sort'], input='some_data', + redirect_stdout=True).AndReturn(test_data) + self.mox.ReplayAll() + release = cros_mark_chrome_as_stable._GetLatestRelease() + self.mox.VerifyAll() + self.assertEqual('8.0.365.5', release) + + def testGetLatestStickyRelease(self): + """Tests if we can find the latest sticky release from our mock url data.""" + test_data = '\n'.join(['7.0.222.1/', + '8.0.224.2/', + '8.0.365.5/', + 'LATEST.txt']) + self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand') + cros_mark_chrome_as_stable.RunCommand( + ['svn', 'ls', 'http://src.chromium.org/svn/releases'], + redirect_stdout=True).AndReturn('some_data') + cros_mark_chrome_as_stable.RunCommand( + ['sort', '--version-sort'], input='some_data', + redirect_stdout=True).AndReturn(test_data) + self.mox.ReplayAll() + release = cros_mark_chrome_as_stable._GetLatestRelease(self.sticky_branch) + self.mox.VerifyAll() + self.assertEqual('8.0.224.2', release) + + def testStickyVersion(self): + """Tests if we can find the sticky version from our mock directories.""" + stable_ebuilds = self._GetStableEBuilds() + sticky_version = cros_mark_chrome_as_stable._GetStickyVersion( + stable_ebuilds) + self.assertEqual(sticky_version, self.sticky_version) + + def testChromeEBuildInit(self): + """Tests if the chrome_version is set correctly in a ChromeEBuild.""" + ebuild = cros_mark_chrome_as_stable.ChromeEBuild(self.sticky) + self.assertEqual(ebuild.chrome_version, self.sticky_version) + + def _CommonMarkAsStableTest(self, chrome_rev, new_version, old_ebuild_path, + new_ebuild_path, commit_string_indicator): + """Common function used for test functions for MarkChromeEBuildAsStable. + + This function stubs out others calls, and runs MarkChromeEBuildAsStable + with the specified args. + + Args: + chrome_rev: standard chrome_rev argument + new_version: version we are revving up to + old_ebuild_path: path to the stable ebuild + new_ebuild_path: path to the to be created path + commit_string_indicator: a string that the commit message must contain + """ + self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand') + self.mox.StubOutWithMock(cros_mark_as_stable.EBuildStableMarker, + 'CommitChange') + stable_candidate = cros_mark_chrome_as_stable.ChromeEBuild(old_ebuild_path) + unstable_ebuild = cros_mark_chrome_as_stable.ChromeEBuild(self.unstable) + chrome_version = new_version + commit = None + overlay_dir = self.mock_chrome_dir + + cros_mark_chrome_as_stable.RunCommand(['git', 'add', new_ebuild_path]) + cros_mark_chrome_as_stable.RunCommand(['git', 'rm', old_ebuild_path]) + cros_mark_as_stable.EBuildStableMarker.CommitChange( + mox.StrContains(commit_string_indicator)) + + self.mox.ReplayAll() + cros_mark_chrome_as_stable.MarkChromeEBuildAsStable( + stable_candidate, unstable_ebuild, chrome_rev, chrome_version, commit, + overlay_dir) + self.mox.VerifyAll() + + def testStickyMarkAsStable(self): + """Tests to see if we can mark chrome as stable for a new sticky release.""" + self._CommonMarkAsStableTest(cros_mark_chrome_as_stable.STICKY, + self.sticky_new_rc_version, self.sticky_rc, + self.sticky_new_rc, 'sticky_release') + + def testLatestMarkAsStable(self): + """Tests to see if we can mark chrome for a latest release.""" + self._CommonMarkAsStableTest(cros_mark_chrome_as_stable.LATEST_RELEASE, + self.latest_new_version, self.latest_stable, + self.latest_new, 'latest_release') + + def testTotMarkAsStable(self): + """Tests to see if we can mark chrome for tot.""" + self._CommonMarkAsStableTest(cros_mark_chrome_as_stable.TIP_OF_TRUNK, + self.tot_new_version, self.tot_stable, + self.tot_new, 'tot') + + +if __name__ == '__main__': + unittest.main() diff --git a/bin/cros_repo_sync_all.py b/bin/cros_repo_sync_all.py index 6203b5a60d..642ef06fb4 100755 --- a/bin/cros_repo_sync_all.py +++ b/bin/cros_repo_sync_all.py @@ -26,10 +26,10 @@ def main(): if options.buildroot: if options.clobber: cbuildbot._FullCheckout(options.buildroot, options.tracking_branch, - rw_checkout=False, retries=_NUMBER_OF_RETRIES) + retries=_NUMBER_OF_RETRIES) else: - cbuildbot._IncrementalCheckout(options.buildroot, rw_checkout=False, - retries=_NUMBER_OF_RETRIES) + cbuildbot._IncrementalCheckout(options.buildroot, + retries=_NUMBER_OF_RETRIES) else: print >> sys.stderr, 'ERROR: Must set buildroot' sys.exit(1) diff --git a/bin/cros_workon_make b/bin/cros_workon_make new file mode 100755 index 0000000000..95cc709a5d --- /dev/null +++ b/bin/cros_workon_make @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Simple wrapper script to build a cros_workon package incrementally. +# You must already be cros_workon'ing the package in question. + +. "$(dirname $0)/../common.sh" + +# Script must be run inside the chroot. +assert_inside_chroot + +get_default_board + +DEFINE_string board "${DEFAULT_BOARD}" \ + "Board for which to build the package." +DEFINE_boolean test "${FLAGS_FALSE}" \ + "Compile and run tests as well." +DEFINE_boolean reconf "${FLAGS_FALSE}" \ + "Re-run configure and prepare steps." +DEFINE_boolean install "${FLAGS_FALSE}" \ + "Incrementally build and install your package." +DEFINE_boolean scrub "${FLAGS_FALSE}" \ + "Blow away all in-tree files not managed by git." + +set -e +# Parse command line. +FLAGS "$@" || exit 1 +eval set -- "${FLAGS_ARGV}" + +if [ $# -lt 1 ]; then + echo "Usage: ${0} [OPTIONS] " + exit 1 +fi + +if [ -n "${FLAGS_board}" ]; then + BOARD_DIR=/build/"${FLAGS_board}" + EBUILDCMD=ebuild-"${FLAGS_board}" + EMERGECMD=emerge-"${FLAGS_board}" + EQUERYCMD=equery-"${FLAGS_board}" + BOARD_STR="${FLAGS_board}" + BOARD_KEYWORD="$(portageq-${FLAGS_board} envvar ARCH)" +fi + +unstable_suffix="9999" +workon_name="${1}-${unstable_suffix}" +pkgfile= +workpath= + +if ! pkgfile=$("${EQUERYCMD}" which "${workon_name}" 2> /dev/null); then + if ACCEPT_KEYWORDS="~${BOARD_KEYWORD}" "${EQUERYCMD}" which "${workon_name}" \ + > /dev/null 2>&1; then + die "run './cros_workon --board ${BOARD_STR} start ${1}' first!" 1>&2 + fi + die "error looking up package $1" +fi + +if [ "${FLAGS_scrub}" = "${FLAGS_TRUE}" ]; then + eval $(${EBUILDCMD} $(${EQUERYCMD} which ${workon_name}) info) + srcdir=$(readlink -m ${CROS_WORKON_SRCDIR}) + trunkdir=$(readlink -m ${CHROOT_TRUNK_DIR}) + project_path=${srcdir#${trunkdir}/} + if ! (cd "${GCLIENT_ROOT}/${project_path}" && git clean -xf); then + die "Could not scrub source directory" + fi + exit 0 +fi + +workpath=$(\ + echo "${pkgfile}" | \ + awk -F '/' '{ print $(NF-2) "/" $(NF-1) }')-"${unstable_suffix}" + +emerge_features= +to_do="compile" +if [ "${FLAGS_test}" = "${FLAGS_TRUE}" ]; then + to_do="test" + emerge_features="test" +fi +if [ "${FLAGS_install}" = "${FLAGS_TRUE}" ]; then + FEATURES="${emerge_features}" "${EMERGECMD}" "${1}" + exit $? +fi + +clean= +if [ "${FLAGS_reconf}" = "${FLAGS_TRUE}" ]; then + clean="clean" +else + rm -f "/build/${BOARD_STR}/tmp/portage/${workpath}/.compiled" +fi +SANDBOX_WRITE=~/trunk CROS_WORKON_INPLACE=1 \ + "${EBUILDCMD}" "${pkgfile}" ${clean} "${to_do}" diff --git a/bootperf-bin/bootperf b/bootperf-bin/bootperf index 4aea1a8971..5c0ce6613b 100755 --- a/bootperf-bin/bootperf +++ b/bootperf-bin/bootperf @@ -21,9 +21,10 @@ DEFINE_boolean keep_logs "$FLAGS_FALSE" "keep autotest results" k RUN_TEST="$SCRIPT_DIR/run_remote_tests.sh" TEST=server/site_tests/platform_BootPerfServer/control TMP_RESULTS="/tmp/bootperf.$(date '+%Y%j%H%M').$$" -RESULTS_KEYVAL=platform_BootPerfServer/platform_BootPerfServer/results/keyval +RESULTS_DIR=platform_BootPerfServer/platform_BootPerfServer/results +RESULTS_KEYVAL=$RESULTS_DIR/keyval RESULTS_SUMMARY_FILES=( - $RESULTS_KEYVAL + $RESULTS_DIR platform_BootPerfServer/keyval platform_BootPerfServer/platform_BootPerfServer/keyval platform_BootPerfServer/platform_BootPerfServer/platform_BootPerf/keyval @@ -135,13 +136,19 @@ run_boot_test() { mkdir $iter_rundir echo "run $iter start at $(date)" - $RUN_TEST --results_dir_root="$TMP_RESULTS" \ + + # BEWARE: The --use_emerged option means that you must manually + # emerge chromeos-base/autotest-tests if a) you are working on + # the package, and b) you also want use this script to test your + # changes to the package. (The option is here because IMO the + # alternative is a bigger nuisance.) + $RUN_TEST --use_emerged --results_dir_root="$TMP_RESULTS" \ --remote="$remote" $TEST >$logfile 2>&1 if [ ! -e "$TMP_RESULTS/$RESULTS_KEYVAL" ]; then error "No results file; terminating test runs." - error "Check $logfile for output from the test run," + error "Check $(pwd)/$logfile for output from the test run," error "and see $TMP_RESULTS for full test logs and output." - break + return fi mkdir $summary_dir tar cf - -C $TMP_RESULTS "${RESULTS_SUMMARY_FILES[@]}" | diff --git a/build_image b/build_image index f727b7497f..bd49af5054 100755 --- a/build_image +++ b/build_image @@ -180,7 +180,7 @@ fi # We don't allow building from source with the image as a target, # and it's not possible to store prebuilts for the same package # with different use flags. -USE="${EXTRA_USE}" emerge-${FLAGS_board} \ +USE="${EXTRA_USE} ${USE}" emerge-${FLAGS_board} \ -uNDvg --binpkg-respect-use=y virtual/kernel # Use canonical path since some tools (e.g. mount) do not like symlinks. diff --git a/chromeos_version.sh b/chromeos_version.sh index 4499741bfe..b836d7e976 100755 --- a/chromeos_version.sh +++ b/chromeos_version.sh @@ -26,7 +26,7 @@ export CHROMEOS_VERSION_MINOR=9 # Increment by 2 in trunk after making a release branch. # Does not reset on a major/minor change (always increases). # (Trunk is always odd; branches are always even). -export CHROMEOS_VERSION_BRANCH=115 +export CHROMEOS_VERSION_BRANCH=129 # Patch number. # Increment by 1 each release on a branch. diff --git a/cros_generate_update_payload b/cros_generate_update_payload index 3b5b9f9d6d..384e27714f 100755 --- a/cros_generate_update_payload +++ b/cros_generate_update_payload @@ -26,7 +26,6 @@ STATE_LOOP_DEV="" # Pass an arg to not exit 1 at the end cleanup() { set +e - echo "Cleaning up" if [ -n "$SRC_MNT" ]; then sudo umount -d "$SRC_MNT" [ -d "$SRC_MNT" ] && rmdir "$SRC_MNT" @@ -74,13 +73,16 @@ extract_partition_to_temp_file() { else warn "partition offset or length not at 2MiB boundary" fi - dd if="$filename" of="$temp_file" bs=$bs count="$length" skip="$offset" + dd if="$filename" of="$temp_file" bs=$bs count="$length" \ + skip="$offset" 2>/dev/null } patch_kernel() { local IMAGE="$1" local KERN_FILE="$2" + echo "Patching kernel" $KERN_FILE + echo " into" $IMAGE STATE_LOOP_DEV=$(sudo losetup -f) [ -n "$STATE_LOOP_DEV" ] || die "no free loop device" local offset=$(partoffset "${IMAGE}" 1) @@ -88,7 +90,7 @@ patch_kernel() { sudo losetup -o "$offset" "$STATE_LOOP_DEV" "$IMAGE" STATE_MNT=$(mktemp -d /tmp/state.XXXXXX) sudo mount --read-only "$STATE_LOOP_DEV" "$STATE_MNT" - dd if="$STATE_MNT"/vmlinuz_hd.vblock of="$KERN_FILE" conv=notrunc + dd if="$STATE_MNT"/vmlinuz_hd.vblock of="$KERN_FILE" conv=notrunc 2>/dev/null sudo umount "$STATE_MNT" STATE_MNT="" sudo losetup -d "$STATE_LOOP_DEV" @@ -163,11 +165,6 @@ DELTA=$FLAGS_TRUE if [ -z "$FLAGS_src_image" ]; then DELTA=$FLAGS_FALSE - if [ "$FLAGS_old_style" = "$FLAGS_TRUE" ]; then - echo "Generating an old-style full update" - else - echo "Generating a new-style full update" - fi fi if [ "$DELTA" -eq "$FLAGS_TRUE" -o "$FLAGS_old_style" -eq "$FLAGS_FALSE" ]; then @@ -227,7 +224,7 @@ if [ "$DELTA" -eq "$FLAGS_TRUE" -o "$FLAGS_old_style" -eq "$FLAGS_FALSE" ]; then echo "Done generating new style full update." fi else - echo "Generating full update" + echo "Generating old-style full update" trap cleanup INT TERM EXIT DST_KERNEL=$(extract_partition_to_temp_file "$FLAGS_image" 2) diff --git a/cros_mark_as_stable.py b/cros_mark_as_stable.py index b2f2cd4067..7e170d2952 100755 --- a/cros_mark_as_stable.py +++ b/cros_mark_as_stable.py @@ -18,36 +18,36 @@ import sys sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) from cros_build_lib import Info, RunCommand, Warning, Die - +gflags.DEFINE_boolean('all', False, + 'Mark all packages as stable.') gflags.DEFINE_string('board', '', 'Board for which the package belongs.', short_name='b') +gflags.DEFINE_string('drop_file', None, + 'File to list packages that were revved.') +gflags.DEFINE_boolean('dryrun', False, + 'Passes dry-run to git push if pushing a change.') gflags.DEFINE_string('overlays', '', 'Colon-separated list of overlays to modify.', short_name='o') gflags.DEFINE_string('packages', '', 'Colon-separated list of packages to mark as stable.', short_name='p') -gflags.DEFINE_string('push_options', '', - 'Options to use with git-cl push using push command.') gflags.DEFINE_string('srcroot', '%s/trunk/src' % os.environ['HOME'], 'Path to root src directory.', short_name='r') gflags.DEFINE_string('tracking_branch', 'cros/master', 'Used with commit to specify branch to track against.', short_name='t') -gflags.DEFINE_boolean('all', False, - 'Mark all packages as stable.') gflags.DEFINE_boolean('verbose', False, 'Prints out verbose information about what is going on.', short_name='v') # Takes two strings, package_name and commit_id. -_GIT_COMMIT_MESSAGE = \ - 'Marking 9999 ebuild for %s with commit %s as stable.' +_GIT_COMMIT_MESSAGE = 'Marking 9999 ebuild for %s with commit %s as stable.' # Dictionary of valid commands with usage information. -_COMMAND_DICTIONARY = { +COMMAND_DICTIONARY = { 'clean': 'Cleans up previous calls to either commit or push', 'commit': @@ -57,7 +57,17 @@ _COMMAND_DICTIONARY = { } # Name used for stabilizing branch. -_STABLE_BRANCH_NAME = 'stabilizing_branch' +STABLE_BRANCH_NAME = 'stabilizing_branch' + + +def BestEBuild(ebuilds): + """Returns the newest EBuild from a list of EBuild objects.""" + from portage.versions import vercmp + winner = ebuilds[0] + for ebuild in ebuilds[1:]: + if vercmp(winner.version, ebuild.version) < 0: + winner = ebuild + return winner # ======================= Global Helper Functions ======================== @@ -83,16 +93,6 @@ def _CleanStalePackages(board, package_array): RunCommand(['sudo', 'eclean', '-d', 'packages'], redirect_stderr=True) -def _BestEBuild(ebuilds): - """Returns the newest EBuild from a list of EBuild objects.""" - from portage.versions import vercmp - winner = ebuilds[0] - for ebuild in ebuilds[1:]: - if vercmp(winner.version, ebuild.version) < 0: - winner = ebuild - return winner - - def _FindUprevCandidates(files): """Return a list of uprev candidates from specified list of files. @@ -108,7 +108,7 @@ def _FindUprevCandidates(files): unstable_ebuilds = [] for path in files: if path.endswith('.ebuild') and not os.path.islink(path): - ebuild = _EBuild(path) + ebuild = EBuild(path) if ebuild.is_workon: workon_dir = True if ebuild.is_stable: @@ -121,7 +121,7 @@ def _FindUprevCandidates(files): if len(unstable_ebuilds) > 1: Die('Found multiple unstable ebuilds in %s' % os.path.dirname(path)) if len(stable_ebuilds) > 1: - stable_ebuilds = [_BestEBuild(stable_ebuilds)] + stable_ebuilds = [BestEBuild(stable_ebuilds)] # Print a warning if multiple stable ebuilds are found in the same # directory. Storing multiple stable ebuilds is error-prone because @@ -166,15 +166,15 @@ def _BuildEBuildDictionary(overlays, all, packages): overlays[overlay].append(ebuild) -def _CheckOnStabilizingBranch(): +def _CheckOnStabilizingBranch(stable_branch): """Returns true if the git branch is on the stabilizing branch.""" current_branch = _SimpleRunCommand('git branch | grep \*').split()[1] - return current_branch == _STABLE_BRANCH_NAME + return current_branch == stable_branch def _CheckSaneArguments(package_list, command): """Checks to make sure the flags are sane. Dies if arguments are not sane.""" - if not command in _COMMAND_DICTIONARY.keys(): + if not command in COMMAND_DICTIONARY.keys(): _PrintUsageAndDie('%s is not a valid command' % command) if not gflags.FLAGS.packages and command == 'commit' and not gflags.FLAGS.all: _PrintUsageAndDie('Please specify at least one package') @@ -185,19 +185,13 @@ def _CheckSaneArguments(package_list, command): gflags.FLAGS.srcroot = os.path.abspath(gflags.FLAGS.srcroot) -def _Clean(): - """Cleans up uncommitted changes on either stabilizing branch or master.""" - _SimpleRunCommand('git reset HEAD --hard') - _SimpleRunCommand('git checkout %s' % gflags.FLAGS.tracking_branch) - - def _PrintUsageAndDie(error_message=''): """Prints optional error_message the usage and returns an error exit code.""" command_usage = 'Commands: \n' # Add keys and usage information from dictionary. - commands = sorted(_COMMAND_DICTIONARY.keys()) + commands = sorted(COMMAND_DICTIONARY.keys()) for command in commands: - command_usage += ' %s: %s\n' % (command, _COMMAND_DICTIONARY[command]) + command_usage += ' %s: %s\n' % (command, COMMAND_DICTIONARY[command]) commands_str = '|'.join(commands) Warning('Usage: %s FLAGS [%s]\n\n%s\nFlags:%s' % (sys.argv[0], commands_str, command_usage, gflags.FLAGS)) @@ -206,40 +200,6 @@ def _PrintUsageAndDie(error_message=''): else: sys.exit(1) -def _PushChange(): - """Pushes changes to the git repository. - - Pushes locals commits from calls to CommitChange to the remote git - repository specified by os.pwd. - - Raises: - OSError: Error occurred while pushing. - """ - - # TODO(sosa) - Add logic for buildbot to check whether other slaves have - # completed and push this change only if they have. - - # Sanity check to make sure we're on a stabilizing branch before pushing. - if not _CheckOnStabilizingBranch(): - Info('Not on branch %s so no work found to push. Exiting' % \ - _STABLE_BRANCH_NAME) - return - - description = _SimpleRunCommand('git log --format=format:%s%n%n%b ' + - gflags.FLAGS.tracking_branch + '..') - description = 'Marking set of ebuilds as stable\n\n%s' % description - merge_branch_name = 'merge_branch' - _SimpleRunCommand('git remote update') - merge_branch = _GitBranch(merge_branch_name) - merge_branch.CreateBranch() - if not merge_branch.Exists(): - Die('Unable to create merge branch.') - _SimpleRunCommand('git merge --squash %s' % _STABLE_BRANCH_NAME) - _SimpleRunCommand('git commit -m "%s"' % description) - # Ugh. There has got to be an easier way to push to a tracking branch - _SimpleRunCommand('git config push.default tracking') - _SimpleRunCommand('git push') - def _SimpleRunCommand(command): """Runs a shell command and returns stdout back to caller.""" @@ -248,19 +208,79 @@ def _SimpleRunCommand(command): stdout = proc_handle.communicate()[0] retcode = proc_handle.wait() if retcode != 0: - raise subprocess.CalledProcessError(retcode, command, output=stdout) + _Print(stdout) + raise subprocess.CalledProcessError(retcode, command) return stdout # ======================= End Global Helper Functions ======================== -class _GitBranch(object): +def Clean(tracking_branch): + """Cleans up uncommitted changes. + + Args: + tracking_branch: The tracking branch we want to return to after the call. + """ + _SimpleRunCommand('git reset HEAD --hard') + _SimpleRunCommand('git checkout %s' % tracking_branch) + + +def PushChange(stable_branch, tracking_branch): + """Pushes commits in the stable_branch to the remote git repository. + + Pushes locals commits from calls to CommitChange to the remote git + repository specified by current working directory. + + Args: + stable_branch: The local branch with commits we want to push. + tracking_branch: The tracking branch of the local branch. + Raises: + OSError: Error occurred while pushing. + """ + num_retries = 5 + + # Sanity check to make sure we're on a stabilizing branch before pushing. + if not _CheckOnStabilizingBranch(stable_branch): + Info('Not on branch %s so no work found to push. Exiting' % stable_branch) + return + + description = _SimpleRunCommand('git log --format=format:%s%n%n%b ' + + tracking_branch + '..') + description = 'Marking set of ebuilds as stable\n\n%s' % description + Info('Using description %s' % description) + merge_branch_name = 'merge_branch' + for push_try in range(num_retries + 1): + try: + _SimpleRunCommand('git remote update') + merge_branch = GitBranch(merge_branch_name, tracking_branch) + merge_branch.CreateBranch() + if not merge_branch.Exists(): + Die('Unable to create merge branch.') + _SimpleRunCommand('git merge --squash %s' % stable_branch) + _SimpleRunCommand('git commit -m "%s"' % description) + _SimpleRunCommand('git config push.default tracking') + if gflags.FLAGS.dryrun: + _SimpleRunCommand('git push --dry-run') + else: + _SimpleRunCommand('git push') + + break + except: + if push_try < num_retries: + Warning('Failed to push change, performing retry (%s/%s)' % ( + push_try + 1, num_retries)) + else: + raise + + +class GitBranch(object): """Wrapper class for a git branch.""" - def __init__(self, branch_name): + def __init__(self, branch_name, tracking_branch): """Sets up variables but does not create the branch.""" self.branch_name = branch_name + self.tracking_branch = tracking_branch def CreateBranch(self): """Creates a new git branch or replaces an existing one.""" @@ -271,7 +291,7 @@ class _GitBranch(object): def _Checkout(self, target, create=True): """Function used internally to create and move between branches.""" if create: - git_cmd = 'git checkout -b %s %s' % (target, gflags.FLAGS.tracking_branch) + git_cmd = 'git checkout -b %s %s' % (target, self.tracking_branch) else: git_cmd = 'git checkout %s' % target _SimpleRunCommand(git_cmd) @@ -287,30 +307,30 @@ class _GitBranch(object): Returns True on success. """ - self._Checkout(gflags.FLAGS.tracking_branch, create=False) + self._Checkout(self.tracking_branch, create=False) delete_cmd = 'git branch -D %s' % self.branch_name _SimpleRunCommand(delete_cmd) -class _EBuild(object): - """Wrapper class for an ebuild.""" +class EBuild(object): + """Wrapper class for information about an ebuild.""" def __init__(self, path): - """Initializes all data about an ebuild. - - Uses equery to find the ebuild path and sets data about an ebuild for - easy reference. - """ + """Sets up data about an ebuild from its path.""" from portage.versions import pkgsplit - self.ebuild_path = path - (self.ebuild_path_no_revision, - self.ebuild_path_no_version, - self.current_revision) = self._ParseEBuildPath(self.ebuild_path) - _, self.category, pkgpath, filename = path.rsplit('/', 3) - filename_no_suffix = os.path.join(filename.replace('.ebuild', '')) - self.pkgname, version_no_rev, rev = pkgsplit(filename_no_suffix) + unused_path, self.category, self.pkgname, filename = path.rsplit('/', 3) + unused_pkgname, version_no_rev, rev = pkgsplit( + filename.replace('.ebuild', '')) + + self.ebuild_path_no_version = os.path.join( + os.path.dirname(path), self.pkgname) + self.ebuild_path_no_revision = '%s-%s' % (self.ebuild_path_no_version, + version_no_rev) + self.current_revision = int(rev.replace('r', '')) self.version = '%s-%s' % (version_no_rev, rev) self.package = '%s/%s' % (self.category, self.pkgname) + self.ebuild_path = path + self.is_workon = False self.is_stable = False @@ -324,7 +344,6 @@ class _EBuild(object): def GetCommitId(self): """Get the commit id for this ebuild.""" - # Grab and evaluate CROS_WORKON variables from this ebuild. unstable_ebuild = '%s-9999.ebuild' % self.ebuild_path_no_version cmd = ('export CROS_WORKON_LOCALNAME="%s" CROS_WORKON_PROJECT="%s"; ' @@ -367,39 +386,53 @@ class _EBuild(object): Die('Missing commit id for %s' % self.ebuild_path) return output.rstrip() - @classmethod - def _ParseEBuildPath(cls, ebuild_path): - """Static method that parses the path of an ebuild - - Returns a tuple containing the (ebuild path without the revision - string, without the version string, and the current revision number for - the ebuild). - """ - # Get the ebuild name without the revision string. - (ebuild_no_rev, _, rev_string) = ebuild_path.rpartition('-') - - # Verify the revision string starts with the revision character. - if rev_string.startswith('r'): - # Get the ebuild name without the revision and version strings. - ebuild_no_version = ebuild_no_rev.rpartition('-')[0] - rev_string = rev_string[1:].rpartition('.ebuild')[0] - else: - # Has no revision so we stripped the version number instead. - ebuild_no_version = ebuild_no_rev - ebuild_no_rev = ebuild_path.rpartition('9999.ebuild')[0] + '0.0.1' - rev_string = '0' - revision = int(rev_string) - return (ebuild_no_rev, ebuild_no_version, revision) - class EBuildStableMarker(object): """Class that revs the ebuild and commits locally or pushes the change.""" def __init__(self, ebuild): + assert ebuild self._ebuild = ebuild - def RevEBuild(self, commit_id='', redirect_file=None): - """Revs an ebuild given the git commit id. + @classmethod + def MarkAsStable(cls, unstable_ebuild_path, new_stable_ebuild_path, + commit_keyword, commit_value, redirect_file=None): + """Static function that creates a revved stable ebuild. + + This function assumes you have already figured out the name of the new + stable ebuild path and then creates that file from the given unstable + ebuild and marks it as stable. If the commit_value is set, it also + set the commit_keyword=commit_value pair in the ebuild. + + Args: + unstable_ebuild_path: The path to the unstable ebuild. + new_stable_ebuild_path: The path you want to use for the new stable + ebuild. + commit_keyword: Optional keyword to set in the ebuild to mark it as + stable. + commit_value: Value to set the above keyword to. + redirect_file: Optionally redirect output of new ebuild somewhere else. + """ + shutil.copyfile(unstable_ebuild_path, new_stable_ebuild_path) + for line in fileinput.input(new_stable_ebuild_path, inplace=1): + # Has to be done here to get changes to sys.stdout from fileinput.input. + if not redirect_file: + redirect_file = sys.stdout + if line.startswith('KEYWORDS'): + # Actually mark this file as stable by removing ~'s. + redirect_file.write(line.replace('~', '')) + elif line.startswith('EAPI'): + # Always add new commit_id after EAPI definition. + redirect_file.write(line) + if commit_keyword and commit_value: + redirect_file.write('%s="%s"\n' % (commit_keyword, commit_value)) + elif not line.startswith(commit_keyword): + # Skip old commit_keyword definition. + redirect_file.write(line) + fileinput.close() + + def RevWorkOnEBuild(self, commit_id, redirect_file=None): + """Revs a workon ebuild given the git commit hash. By default this class overwrites a new ebuild given the normal ebuild rev'ing logic. However, a user can specify a redirect_file @@ -418,44 +451,34 @@ class EBuildStableMarker(object): Returns: True if the revved package is different than the old ebuild. """ - # TODO(sosa): Change to a check. - if not self._ebuild: - Die('Invalid ebuild given to EBuildStableMarker') + if self._ebuild.is_stable: + new_stable_ebuild_path = '%s-r%d.ebuild' % ( + self._ebuild.ebuild_path_no_revision, + self._ebuild.current_revision + 1) + else: + # If given unstable ebuild, use 0.0.1 rather than 9999. + new_stable_ebuild_path = '%s-0.0.1-r%d.ebuild' % ( + self._ebuild.ebuild_path_no_version, + self._ebuild.current_revision + 1) - new_ebuild_path = '%s-r%d.ebuild' % (self._ebuild.ebuild_path_no_revision, - self._ebuild.current_revision + 1) + _Print('Creating new stable ebuild %s' % new_stable_ebuild_path) + unstable_ebuild_path = ('%s-9999.ebuild' % + self._ebuild.ebuild_path_no_version) + if not os.path.exists(unstable_ebuild_path): + Die('Missing unstable ebuild: %s' % unstable_ebuild_path) - _Print('Creating new stable ebuild %s' % new_ebuild_path) - workon_ebuild = '%s-9999.ebuild' % self._ebuild.ebuild_path_no_version - if not os.path.exists(workon_ebuild): - Die('Missing 9999 ebuild: %s' % workon_ebuild) - shutil.copyfile(workon_ebuild, new_ebuild_path) - - for line in fileinput.input(new_ebuild_path, inplace=1): - # Has to be done here to get changes to sys.stdout from fileinput.input. - if not redirect_file: - redirect_file = sys.stdout - if line.startswith('KEYWORDS'): - # Actually mark this file as stable by removing ~'s. - redirect_file.write(line.replace('~', '')) - elif line.startswith('EAPI'): - # Always add new commit_id after EAPI definition. - redirect_file.write(line) - redirect_file.write('CROS_WORKON_COMMIT="%s"\n' % commit_id) - elif not line.startswith('CROS_WORKON_COMMIT'): - # Skip old CROS_WORKON_COMMIT definition. - redirect_file.write(line) - fileinput.close() + self.MarkAsStable(unstable_ebuild_path, new_stable_ebuild_path, + 'CROS_WORKON_COMMIT', commit_id, redirect_file) old_ebuild_path = self._ebuild.ebuild_path - diff_cmd = ['diff', '-Bu', old_ebuild_path, new_ebuild_path] + diff_cmd = ['diff', '-Bu', old_ebuild_path, new_stable_ebuild_path] if 0 == RunCommand(diff_cmd, exit_code=True, redirect_stdout=True, redirect_stderr=True, print_cmd=gflags.FLAGS.verbose): - os.unlink(new_ebuild_path) + os.unlink(new_stable_ebuild_path) return False else: _Print('Adding new stable ebuild to git') - _SimpleRunCommand('git add %s' % new_ebuild_path) + _SimpleRunCommand('git add %s' % new_stable_ebuild_path) if self._ebuild.is_stable: _Print('Removing old ebuild from git') @@ -463,11 +486,9 @@ class EBuildStableMarker(object): return True - def CommitChange(self, message): - """Commits current changes in git locally. - - This method will take any changes from invocations to RevEBuild - and commits them locally in the git repository that contains os.pwd. + @classmethod + def CommitChange(cls, message): + """Commits current changes in git locally with given commit message. Args: message: the commit string to write when committing to git. @@ -475,8 +496,7 @@ class EBuildStableMarker(object): Raises: OSError: Error occurred while committing. """ - _Print('Committing changes for %s with commit message %s' % \ - (self._ebuild.package, message)) + Info('Committing changes with commit message: %s' % message) git_commit_cmd = 'git commit -am "%s"' % message _SimpleRunCommand(git_commit_cmd) @@ -520,11 +540,11 @@ def main(argv): os.chdir(overlay) if command == 'clean': - _Clean() + Clean(gflags.FLAGS.tracking_branch) elif command == 'push': - _PushChange() + PushChange(STABLE_BRANCH_NAME, gflags.FLAGS.tracking_branch) elif command == 'commit' and ebuilds: - work_branch = _GitBranch(_STABLE_BRANCH_NAME) + work_branch = GitBranch(STABLE_BRANCH_NAME, gflags.FLAGS.tracking_branch) work_branch.CreateBranch() if not work_branch.Exists(): Die('Unable to create stabilizing branch in %s' % overlay) @@ -536,7 +556,7 @@ def main(argv): _Print('Working on %s' % ebuild.package) worker = EBuildStableMarker(ebuild) commit_id = ebuild.GetCommitId() - if worker.RevEBuild(commit_id): + if worker.RevWorkOnEBuild(commit_id): message = _GIT_COMMIT_MESSAGE % (ebuild.package, commit_id) worker.CommitChange(message) revved_packages.append(ebuild.package) @@ -549,6 +569,10 @@ def main(argv): if revved_packages: _CleanStalePackages(gflags.FLAGS.board, revved_packages) + if gflags.FLAGS.drop_file: + fh = open(gflags.FLAGS.drop_file, 'w') + fh.write(' '.join(revved_packages)) + fh.close() else: work_branch.Delete() diff --git a/cros_mark_as_stable_unittest.py b/cros_mark_as_stable_unittest.py index e7fd3317b2..4f9763e7f9 100755 --- a/cros_mark_as_stable_unittest.py +++ b/cros_mark_as_stable_unittest.py @@ -6,16 +6,45 @@ """Unit tests for cros_mark_as_stable.py.""" - +import fileinput import mox import os import sys import unittest -# Required to include '.' in the python path. -sys.path.append(os.path.dirname(__file__)) import cros_mark_as_stable +class NonClassTests(mox.MoxTestBase): + def setUp(self): + mox.MoxTestBase.setUp(self) + self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand') + self._branch = 'test_branch' + self._tracking_branch = 'cros/test' + + def testPushChange(self): + git_log = 'Marking test_one as stable\nMarking test_two as stable\n' + fake_description = 'Marking set of ebuilds as stable\n\n%s' % git_log + self.mox.StubOutWithMock(cros_mark_as_stable, '_CheckOnStabilizingBranch') + self.mox.StubOutWithMock(cros_mark_as_stable.GitBranch, 'CreateBranch') + self.mox.StubOutWithMock(cros_mark_as_stable.GitBranch, 'Exists') + + cros_mark_as_stable._CheckOnStabilizingBranch(self._branch).AndReturn(True) + cros_mark_as_stable.GitBranch.CreateBranch() + cros_mark_as_stable.GitBranch.Exists().AndReturn(True) + cros_mark_as_stable._SimpleRunCommand('git log --format=format:%s%n%n%b ' + + self._tracking_branch + '..').AndReturn(git_log) + cros_mark_as_stable._SimpleRunCommand('git remote update') + cros_mark_as_stable._SimpleRunCommand('git merge --squash %s' % + self._branch) + cros_mark_as_stable._SimpleRunCommand('git commit -m "%s"' % + fake_description) + cros_mark_as_stable._SimpleRunCommand('git config push.default tracking') + cros_mark_as_stable._SimpleRunCommand('git push') + self.mox.ReplayAll() + cros_mark_as_stable.PushChange(self._branch, self._tracking_branch) + self.mox.VerifyAll() + + class GitBranchTest(mox.MoxTestBase): def setUp(self): @@ -23,10 +52,11 @@ class GitBranchTest(mox.MoxTestBase): # Always stub RunCommmand out as we use it in every method. self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand') self._branch = 'test_branch' + self._tracking_branch = 'cros/test' def testCreateBranchNoPrevious(self): # Test init with no previous branch existing. - branch = cros_mark_as_stable._GitBranch(self._branch) + branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch) self.mox.StubOutWithMock(branch, 'Exists') self.mox.StubOutWithMock(branch, '_Checkout') branch.Exists().AndReturn(False) @@ -37,7 +67,7 @@ class GitBranchTest(mox.MoxTestBase): def testCreateBranchWithPrevious(self): # Test init with previous branch existing. - branch = cros_mark_as_stable._GitBranch(self._branch) + branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch) self.mox.StubOutWithMock(branch, 'Exists') self.mox.StubOutWithMock(branch, 'Delete') self.mox.StubOutWithMock(branch, '_Checkout') @@ -51,35 +81,36 @@ class GitBranchTest(mox.MoxTestBase): def testCheckoutCreate(self): # Test init with no previous branch existing. cros_mark_as_stable._SimpleRunCommand( - 'git checkout -b %s cros/master' % self._branch) + 'git checkout -b %s %s' % (self._branch, self._tracking_branch)) self.mox.ReplayAll() - branch = cros_mark_as_stable._GitBranch(self._branch) + branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch) branch._Checkout(self._branch) self.mox.VerifyAll() def testCheckoutNoCreate(self): # Test init with previous branch existing. - cros_mark_as_stable._SimpleRunCommand('git checkout cros/master') + cros_mark_as_stable._SimpleRunCommand('git checkout %s' % ( + self._tracking_branch)) self.mox.ReplayAll() - branch = cros_mark_as_stable._GitBranch(self._branch) - branch._Checkout('cros/master', False) + branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch) + branch._Checkout(self._tracking_branch, False) self.mox.VerifyAll() def testDelete(self): - branch = cros_mark_as_stable._GitBranch(self._branch) + branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch) self.mox.StubOutWithMock(branch, '_Checkout') - branch._Checkout('cros/master', create=False) + branch._Checkout(self._tracking_branch, create=False) cros_mark_as_stable._SimpleRunCommand('git branch -D ' + self._branch) self.mox.ReplayAll() branch.Delete() self.mox.VerifyAll() def testExists(self): - branch = cros_mark_as_stable._GitBranch(self._branch) + branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch) # Test if branch exists that is created cros_mark_as_stable._SimpleRunCommand('git branch').AndReturn( - '%s %s' % (self._branch, 'cros/master')) + '%s %s' % (self._branch, self._tracking_branch)) self.mox.ReplayAll() self.assertTrue(branch.Exists()) self.mox.VerifyAll() @@ -90,45 +121,34 @@ class EBuildTest(mox.MoxTestBase): def setUp(self): mox.MoxTestBase.setUp(self) - def testInit(self): - self.mox.StubOutWithMock(cros_mark_as_stable._EBuild, '_ParseEBuildPath') - - ebuild_path = '/overlay/cat/test_package/test_package-0.0.1-r1.ebuild' - cros_mark_as_stable._EBuild._ParseEBuildPath( - ebuild_path).AndReturn(['/overlay/cat/test_package-0.0.1', - '/overlay/cat/test_package', - 1]) - self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input') - mock_file = ['EAPI=2', 'CROS_WORKON_COMMIT=old_id', - 'KEYWORDS=\"~x86 ~arm\"', 'src_unpack(){}'] - cros_mark_as_stable.fileinput.input(ebuild_path).AndReturn(mock_file) - - self.mox.ReplayAll() - ebuild = cros_mark_as_stable._EBuild(ebuild_path) - self.mox.VerifyAll() - self.assertEquals(ebuild.package, 'cat/test_package') - self.assertEquals(ebuild.ebuild_path, ebuild_path) - self.assertEquals(ebuild.ebuild_path_no_revision, - '/overlay/cat/test_package-0.0.1') - self.assertEquals(ebuild.ebuild_path_no_version, - '/overlay/cat/test_package') - self.assertEquals(ebuild.current_revision, 1) - def testParseEBuildPath(self): # Test with ebuild with revision number. - no_rev, no_version, revision = cros_mark_as_stable._EBuild._ParseEBuildPath( - '/path/test_package-0.0.1-r1.ebuild') - self.assertEquals(no_rev, '/path/test_package-0.0.1') - self.assertEquals(no_version, '/path/test_package') - self.assertEquals(revision, 1) + fake_ebuild_path = '/path/to/test_package/test_package-0.0.1-r1.ebuild' + self.mox.StubOutWithMock(fileinput, 'input') + fileinput.input(fake_ebuild_path).AndReturn('') + self.mox.ReplayAll() + fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path) + self.mox.VerifyAll() + self.assertEquals(fake_ebuild.ebuild_path_no_revision, + '/path/to/test_package/test_package-0.0.1') + self.assertEquals(fake_ebuild.ebuild_path_no_version, + '/path/to/test_package/test_package') + self.assertEquals(fake_ebuild.current_revision, 1) def testParseEBuildPathNoRevisionNumber(self): # Test with ebuild without revision number. - no_rev, no_version, revision = cros_mark_as_stable._EBuild._ParseEBuildPath( - '/path/test_package-9999.ebuild') - self.assertEquals(no_rev, '/path/test_package-0.0.1') - self.assertEquals(no_version, '/path/test_package') - self.assertEquals(revision, 0) + fake_ebuild_path = '/path/to/test_package/test_package-9999.ebuild' + self.mox.StubOutWithMock(fileinput, 'input') + fileinput.input(fake_ebuild_path).AndReturn('') + self.mox.ReplayAll() + fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path) + self.mox.VerifyAll() + + self.assertEquals(fake_ebuild.ebuild_path_no_revision, + '/path/to/test_package/test_package-9999') + self.assertEquals(fake_ebuild.ebuild_path_no_version, + '/path/to/test_package/test_package') + self.assertEquals(fake_ebuild.current_revision, 0) class EBuildStableMarkerTest(mox.MoxTestBase): @@ -138,7 +158,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase): self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand') self.mox.StubOutWithMock(cros_mark_as_stable, 'RunCommand') self.mox.StubOutWithMock(os, 'unlink') - self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable._EBuild) + self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable.EBuild) self.m_ebuild.is_stable = True self.m_ebuild.package = 'test_package' self.m_ebuild.current_revision = 1 @@ -147,7 +167,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase): self.m_ebuild.ebuild_path = '/path/test_package-0.0.1-r1.ebuild' self.revved_ebuild_path = '/path/test_package-0.0.1-r2.ebuild' - def testRevEBuild(self): + def testRevWorkOnEBuild(self): self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input') self.mox.StubOutWithMock(cros_mark_as_stable.os.path, 'exists') self.mox.StubOutWithMock(cros_mark_as_stable.shutil, 'copyfile') @@ -177,7 +197,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase): self.mox.ReplayAll() marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild) - marker.RevEBuild('my_id', redirect_file=m_file) + marker.RevWorkOnEBuild('my_id', redirect_file=m_file) self.mox.VerifyAll() def testRevUnchangedEBuild(self): @@ -209,7 +229,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase): self.mox.ReplayAll() marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild) - marker.RevEBuild('my_id', redirect_file=m_file) + marker.RevWorkOnEBuild('my_id', redirect_file=m_file) self.mox.VerifyAll() def testRevMissingEBuild(self): @@ -226,7 +246,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase): ebuild_9999 = self.m_ebuild.ebuild_path_no_version + '-9999.ebuild' cros_mark_as_stable.os.path.exists(ebuild_9999).AndReturn(False) - cros_mark_as_stable.Die("Missing 9999 ebuild: %s" % ebuild_9999) + cros_mark_as_stable.Die("Missing unstable ebuild: %s" % ebuild_9999) cros_mark_as_stable.shutil.copyfile(ebuild_9999, self.revved_ebuild_path) cros_mark_as_stable.fileinput.input(self.revved_ebuild_path, inplace=1).AndReturn(mock_file) @@ -244,7 +264,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase): self.mox.ReplayAll() marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild) - marker.RevEBuild('my_id', redirect_file=m_file) + marker.RevWorkOnEBuild('my_id', redirect_file=m_file) self.mox.VerifyAll() @@ -257,14 +277,6 @@ class EBuildStableMarkerTest(mox.MoxTestBase): marker.CommitChange(mock_message) self.mox.VerifyAll() - def testPushChange(self): - #cros_mark_as_stable._SimpleRunCommand('git push') - #self.mox.ReplayAll() - #marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild) - #marker.PushChange() - #self.mox.VerifyAll() - pass - class _Package(object): def __init__(self, package): diff --git a/cros_run_unit_tests b/cros_run_unit_tests index 169edde582..220ad97d23 100755 --- a/cros_run_unit_tests +++ b/cros_run_unit_tests @@ -19,6 +19,8 @@ DEFINE_string board "${DEFAULT_BOARD}" \ "Target board of which tests were built" DEFINE_string build_root "${DEFAULT_BUILD_ROOT}" \ "Root of build output" +DEFINE_string package_file "" \ + "File with space-separated list of packages to run unit tests" f DEFINE_string packages "" \ "Optional space-separated list of packages to run unit tests" p @@ -55,11 +57,19 @@ set -e [ -z "${FLAGS_board}" ] && die "--board required" -# If no packages are specified we run all unit tests for chromeos-base -# packages. -if [ -n "${FLAGS_packages}" ]; then - PACKAGE_LIST="${FLAGS_packages}" -else +# Create package list from package file and list of packages. +if [ -n "${FLAGS_package_file}" ]; then + if [ -f "${FLAGS_package_file}" ]; then + PACKAGE_LIST="$(cat ${FLAGS_package_file})" + else + warn "Missing package file." + fi +fi + +[ -n "${FLAGS_packages}" ] && PACKAGE_LIST="${PACKAGE_LIST} ${FLAGS_packages}" + +# If we didn't specify packages, find all packages. +if [ -z "${FLAGS_package_file}" -a -z "${FLAGS_packages}" ]; then PACKAGE_LIST=$( ./get_package_list chromeos --board="${FLAGS_board}" | egrep '^chromeos-base' ) fi @@ -71,7 +81,8 @@ for package in ${PACKAGE_LIST}; do warn "Skipping package ${package} since it is blacklisted." continue fi - EBUILD_PATH=$( equery-${FLAGS_board} which ${package} 2> /dev/null ) + EBUILD_PATH=$( equery-${FLAGS_board} which ${package} 2> /dev/null ) || \ + warn "${package} not found" if [ -n "${EBUILD_PATH}" ]; then if check_src_test "${EBUILD_PATH}"; then run_unit_test "${EBUILD_PATH}" || record_test_failure "${package}" diff --git a/enter_chroot.sh b/enter_chroot.sh index 3ca59bf9e7..606eaf51a6 100755 --- a/enter_chroot.sh +++ b/enter_chroot.sh @@ -278,6 +278,9 @@ setup_env # Use git:8 chars of sha1 REVISION=$(cd ${FLAGS_trunk}/src/scripts ; git rev-parse --short=8 HEAD) CHROOT_PASSTHRU="CHROMEOS_REVISION=$REVISION BUILDBOT_BUILD=$FLAGS_build_number CHROMEOS_OFFICIAL=$CHROMEOS_OFFICIAL" +CHROOT_PASSTHRU="${CHROOT_PASSTHRU} \ +CHROMEOS_RELEASE_APPID=${CHROMEOS_RELEASE_APPID:-"{DEV-BUILD}"}" + if [ -d "$HOME/.subversion" ]; then # Bind mounting .subversion into chroot info "mounting ~/.subversion into chroot" diff --git a/image_to_live.sh b/image_to_live.sh index 88d75b9659..58b83e9138 100755 --- a/image_to_live.sh +++ b/image_to_live.sh @@ -36,6 +36,8 @@ DEFINE_integer devserver_port 8080 \ DEFINE_boolean for_vm ${FLAGS_FALSE} "Image is for a vm." DEFINE_string image "" \ "Update with this image path that is in this source checkout." i +DEFINE_string payload "" \ + "Update with this update payload, ignoring specified images." DEFINE_string src_image "" \ "Create a delta update by passing in the image on the remote machine." DEFINE_boolean update_stateful ${FLAGS_TRUE} \ @@ -132,6 +134,11 @@ function start_dev_server { --image $(reinterpret_path_for_chroot ${IMAGE_PATH})" fi + if [ -n "${FLAGS_payload}" ]; then + devserver_flags="${devserver_flags} \ + --payload $(reinterpret_path_for_chroot ${FLAGS_payload})" + fi + [ ${FLAGS_for_vm} -eq ${FLAGS_TRUE} ] && \ devserver_flags="${devserver_flags} --for_vm" diff --git a/lib/cros_build_lib.py b/lib/cros_build_lib.py index 4888a4eae6..0b728efd38 100644 --- a/lib/cros_build_lib.py +++ b/lib/cros_build_lib.py @@ -13,6 +13,11 @@ _STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() # TODO(sosa): Move logging to logging module. +class RunCommandException(Exception): + """Raised when there is an error in RunCommand.""" + pass + + def GetCallerName(): """Returns the name of the calling module with __main__.""" top_frame = inspect.stack()[-1][0] @@ -21,24 +26,30 @@ def GetCallerName(): def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None, exit_code=False, redirect_stdout=False, redirect_stderr=False, - cwd=None, input=None, enter_chroot=False): + cwd=None, input=None, enter_chroot=False, num_retries=0): """Runs a shell command. - Keyword arguments: - cmd - cmd to run. Should be input to subprocess.POpen. If a string, + Arguments: + cmd: cmd to run. Should be input to subprocess.POpen. If a string, converted to an array using split(). - print_cmd -- prints the command before running it. - error_ok -- does not raise an exception on error. - error_message -- prints out this message when an error occurrs. - exit_code -- returns the return code of the shell command. - redirect_stdout -- returns the stdout. - redirect_stderr -- holds stderr output until input is communicated. - cwd -- the working directory to run this cmd. - input -- input to pipe into this command through stdin. - enter_chroot -- this command should be run from within the chroot. If set, + print_cmd: prints the command before running it. + error_ok: does not raise an exception on error. + error_message: prints out this message when an error occurrs. + exit_code: returns the return code of the shell command. + redirect_stdout: returns the stdout. + redirect_stderr: holds stderr output until input is communicated. + cwd: the working directory to run this cmd. + input: input to pipe into this command through stdin. + enter_chroot: this command should be run from within the chroot. If set, cwd must point to the scripts directory. + num_retries: the number of retries to perform before dying + + Returns: + If exit_code is True, returns the return code of the shell command. + Else returns the output of the shell command. + Raises: - Exception: Raises generic exception on error with optional error_message. + Exception: Raises RunCommandException on error with optional error_message. """ # Set default for variables. stdout = None @@ -57,21 +68,27 @@ def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None, Info('PROGRAM(%s) -> RunCommand: %r in dir %s' % (GetCallerName(), cmd, cwd)) - try: - proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin, - stdout=stdout, stderr=stderr) - (output, error) = proc.communicate(input) - if exit_code: - return proc.returncode + for retry_count in range(num_retries + 1): + try: + proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin, + stdout=stdout, stderr=stderr) + (output, error) = proc.communicate(input) + if exit_code and retry_count == num_retries: + return proc.returncode - if not error_ok and proc.returncode: - raise Exception('Command "%r" failed.\n' % (cmd) + - (error_message or error or output or '')) - except Exception, e: - if not error_ok: - raise - else: - Warning(str(e)) + if proc.returncode == 0: + break + + raise RunCommandException('Command "%r" failed.\n' % (cmd) + + (error_message or error or output or '')) + except Exception, e: + if not error_ok and retry_count == num_retries: + raise RunCommandException(e) + else: + Warning(str(e)) + if print_cmd: + Info('PROGRAM(%s) -> RunCommand: retrying %r in dir %s' % + (GetCallerName(), cmd, cwd)) return output diff --git a/lib/cros_image_common.sh b/lib/cros_image_common.sh index 0ad9d0bbcd..86bba75504 100644 --- a/lib/cros_image_common.sh +++ b/lib/cros_image_common.sh @@ -8,19 +8,20 @@ # especially for being redistributed into platforms without complete Chromium OS # developing environment. -# Check if given command is available in current system -has_command() { +# Checks if given command is available in current system +image_has_command() { type "$1" >/dev/null 2>&1 } -err_die() { +# Prints error message and exit as 1 (error) +image_die() { echo "ERROR: $@" >&2 exit 1 } -# Finds the best gzip compressor and invoke it. -gzip_compress() { - if has_command pigz; then +# Finds the best gzip compressor and invoke it +image_gzip_compress() { + if image_has_command pigz; then # echo " ** Using parallel gzip **" >&2 # Tested with -b 32, 64, 128(default), 256, 1024, 16384, and -b 32 (max # window size of Deflate) seems to be the best in output size. @@ -30,43 +31,58 @@ gzip_compress() { fi } +# Finds the best bzip2 compressor and invoke it +image_bzip2_compress() { + if image_has_command pbzip2; then + pbzip2 "$@" + else + bzip2 "$@" + fi +} + # Finds if current system has tools for part_* commands -has_part_tools() { - has_command cgpt || has_command parted +image_has_part_tools() { + image_has_command cgpt || image_has_command parted } # Finds the best partition tool and print partition offset -part_offset() { +image_part_offset() { local file="$1" local partno="$2" + local unpack_file="$(dirname "$file")/unpack_partitions.sh" - if has_command cgpt; then + # TODO parted is available on most Linux so we may deprecate other code path + if image_has_command cgpt; then cgpt show -b -i "$partno" "$file" - elif has_command parted; then - parted -m "$file" unit s print | - grep "^$partno:" | cut -d ':' -f 2 | sed 's/s$//' + elif image_has_command parted; then + parted -m "$file" unit s print | awk -F ':' "/^$partno:/ { print int(\$2) }" + elif [ -f "$unpack_file" ]; then + awk "/ $partno *Label:/ { print \$2 }" "$unpack_file" else exit 1 fi } # Finds the best partition tool and print partition size -part_size() { +image_part_size() { local file="$1" local partno="$2" + local unpack_file="$(dirname "$file")/unpack_partitions.sh" - if has_command cgpt; then + # TODO parted is available on most Linux so we may deprecate other code path + if image_has_command cgpt; then cgpt show -s -i "$partno" "$file" - elif has_command parted; then - parted -m "$file" unit s print | - grep "^$partno:" | cut -d ':' -f 4 | sed 's/s$//' + elif image_has_command parted; then + parted -m "$file" unit s print | awk -F ':' "/^$partno:/ { print int(\$4) }" + elif [ -s "$unpack_file" ]; then + awk "/ $partno *Label:/ { print \$3 }" "$unpack_file" else exit 1 fi } # Dumps a file by given offset and size (in sectors) -dump_partial_file() { +image_dump_partial_file() { local file="$1" local offset="$2" local sectors="$3" @@ -82,10 +98,10 @@ dump_partial_file() { bs=$((bs * buffer_ratio)) fi - if has_command pv; then + if image_has_command pv; then dd if="$file" bs=$bs skip="$offset" count="$sectors" \ oflag=sync status=noxfer 2>/dev/null | - pv -ptreb -B 4m -s $((sectors * $bs)) + pv -ptreb -B $bs -s $((sectors * bs)) else dd if="$file" bs=$bs skip="$offset" count="$sectors" \ oflag=sync status=noxfer 2>/dev/null @@ -93,14 +109,62 @@ dump_partial_file() { } # Dumps a specific partition from given image file -dump_partition() { +image_dump_partition() { local file="$1" local part_num="$2" - local offset="$(part_offset "$file" "$part_num")" || - err_die "failed to dump partition #$part_num from: $file" - local size="$(part_size "$file" "$part_num")" || - err_die "failed to dump partition #$part_num from: $file" + local offset="$(image_part_offset "$file" "$part_num")" || + image_die "failed to find partition #$part_num from: $file" + local size="$(image_part_size "$file" "$part_num")" || + image_die "failed to find partition #$part_num from: $file" - dump_partial_file "$file" "$offset" "$size" + image_dump_partial_file "$file" "$offset" "$size" } +# Maps a specific partition from given image file to a loop device +image_map_partition() { + local file="$1" + local part_num="$2" + local offset="$(image_part_offset "$file" "$part_num")" || + image_die "failed to find partition #$part_num from: $file" + local size="$(image_part_size "$file" "$part_num")" || + image_die "failed to find partition #$part_num from: $file" + + losetup --offset $((offset * 512)) --sizelimit=$((size * 512)) \ + -f --show "$file" +} + +# Unmaps a loop device created by image_map_partition +image_unmap_partition() { + local map_point="$1" + + losetup -d "$map_point" +} + +# Mounts a specific partition inside a given image file +image_mount_partition() { + local file="$1" + local part_num="$2" + local mount_point="$3" + local mount_opt="$4" + local offset="$(image_part_offset "$file" "$part_num")" || + image_die "failed to find partition #$part_num from: $file" + local size="$(image_part_size "$file" "$part_num")" || + image_die "failed to find partition #$part_num from: $file" + + if [ -z "$mount_opt" ]; then + # by default, mount as read-only. + mount_opt=",ro" + fi + + mount \ + -o "loop,offset=$((offset * 512)),sizelimit=$((size * 512)),$mount_opt" \ + "$file" \ + "$mount_point" +} + +# Unmounts a partition mount point by mount_partition +image_umount_partition() { + local mount_point="$1" + + umount -d "$mount_point" +} diff --git a/make_factory_package.sh b/make_factory_package.sh index 57c23a159c..0eb200e52a 100755 --- a/make_factory_package.sh +++ b/make_factory_package.sh @@ -39,32 +39,33 @@ DEFINE_string subfolder "" \ FLAGS "$@" || exit 1 eval set -- "${FLAGS_ARGV}" -if [ ! -f "${FLAGS_release}" ] ; then +if [ ! -f "${FLAGS_release}" ]; then echo "Cannot find image file ${FLAGS_release}" exit 1 fi -if [ ! -f "${FLAGS_factory}" ] ; then +if [ ! -f "${FLAGS_factory}" ]; then echo "Cannot find image file ${FLAGS_factory}" exit 1 fi -if [ ! -z "${FLAGS_firmware_updater}" ] && \ - [ ! -f "${FLAGS_firmware_updater}" ] ; then +if [ -n "${FLAGS_firmware_updater}" ] && + [ ! -f "${FLAGS_firmware_updater}" ]; then echo "Cannot find firmware file ${FLAGS_firmware_updater}" exit 1 fi # Convert args to paths. Need eval to un-quote the string so that shell # chars like ~ are processed; just doing FOO=`readlink -f ${FOO}` won't work. -OMAHA_DIR=${SRC_ROOT}/platform/dev -OMAHA_DATA_DIR=${OMAHA_DIR}/static/ +OMAHA_DIR="${SRC_ROOT}/platform/dev" +OMAHA_CONF="${OMAHA_DIR}/miniomaha.conf" +OMAHA_DATA_DIR="${OMAHA_DIR}/static/" # Note: The subfolder flag can only append configs. That means you will need # to have unique board IDs for every time you run. If you delete miniomaha.conf # you can still use this flag and it will start fresh. -if [ -n "${FLAGS_subfolder}" ] ; then - OMAHA_DATA_DIR=${OMAHA_DIR}/static/${FLAGS_subfolder}/ +if [ -n "${FLAGS_subfolder}" ]; then + OMAHA_DATA_DIR="${OMAHA_DIR}/static/${FLAGS_subfolder}/" fi if [ ${INSIDE_CHROOT} -eq 0 ]; then @@ -74,20 +75,20 @@ if [ ${INSIDE_CHROOT} -eq 0 ]; then fi # Use this image as the source image to copy -RELEASE_DIR=`dirname ${FLAGS_release}` -FACTORY_DIR=`dirname ${FLAGS_factory}` -RELEASE_IMAGE=`basename ${FLAGS_release}` -FACTORY_IMAGE=`basename ${FLAGS_factory}` +RELEASE_DIR="$(dirname "${FLAGS_release}")" +FACTORY_DIR="$(dirname "${FLAGS_factory}")" +RELEASE_IMAGE="$(basename "${FLAGS_release}")" +FACTORY_IMAGE="$(basename "${FLAGS_factory}")" prepare_omaha() { - sudo rm -rf ${OMAHA_DATA_DIR}/rootfs-test.gz - sudo rm -rf ${OMAHA_DATA_DIR}/rootfs-release.gz - rm -rf ${OMAHA_DATA_DIR}/efi.gz - rm -rf ${OMAHA_DATA_DIR}/oem.gz - rm -rf ${OMAHA_DATA_DIR}/state.gz - if [ ! -f "${OMAHA_DATA_DIR}" ] ; then - mkdir -p ${OMAHA_DATA_DIR} + sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-test.gz" + sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-release.gz" + rm -rf "${OMAHA_DATA_DIR}/efi.gz" + rm -rf "${OMAHA_DATA_DIR}/oem.gz" + rm -rf "${OMAHA_DATA_DIR}/state.gz" + if [ ! -d "${OMAHA_DATA_DIR}" ]; then + mkdir -p "${OMAHA_DATA_DIR}" fi } @@ -102,12 +103,12 @@ prepare_dir() { compress_and_hash_memento_image() { local input_file="$1" - if has_part_tools; then - sudo "${SCRIPTS_DIR}/mk_memento_images.sh" "$input_file" 2 3 | + if [ -n "${IMAGE_IS_UNPACKED}" ]; then + sudo "${SCRIPTS_DIR}/mk_memento_images.sh" part_2 part_3 | grep hash | awk '{print $4}' else - sudo "${SCRIPTS_DIR}/mk_memento_images.sh" part_2 part_3 | + sudo "${SCRIPTS_DIR}/mk_memento_images.sh" "$input_file" 2 3 | grep hash | awk '{print $4}' fi @@ -119,12 +120,12 @@ compress_and_hash_file() { if [ -z "$input_file" ]; then # Runs as a pipe processor - gzip_compress -c -9 | + image_gzip_compress -c -9 | tee "$output_file" | openssl sha1 -binary | openssl base64 else - gzip_compress -c -9 "$input_file" | + image_gzip_compress -c -9 "$input_file" | tee "$output_file" | openssl sha1 -binary | openssl base64 @@ -136,30 +137,38 @@ compress_and_hash_partition() { local part_num="$2" local output_file="$3" - if has_part_tools; then - dump_partition "$input_file" "$part_num" | - compress_and_hash_file "" "$output_file" - else + if [ -n "${IMAGE_IS_UNPACKED}" ]; then compress_and_hash_file "part_$part_num" "$output_file" + else + image_dump_partition "$input_file" "$part_num" | + compress_and_hash_file "" "$output_file" fi } # Clean up stale config and data files. prepare_omaha +# Decide if we should unpack partition +if image_has_part_tools; then + IMAGE_IS_UNPACKED= +else + #TODO(hungte) Currently we run unpack_partitions.sh if part_tools are not + # found. If the format of unpack_partitions.sh is reliable, we can prevent + # creating temporary files. See image_part_offset for more information. + echo "WARNING: cannot find partition tools. Using unpack_partitions.sh." >&2 + IMAGE_IS_UNPACKED=1 +fi + # Get the release image. -pushd ${RELEASE_DIR} > /dev/null +pushd "${RELEASE_DIR}" >/dev/null echo "Generating omaha release image from ${FLAGS_release}" echo "Generating omaha factory image from ${FLAGS_factory}" echo "Output omaha image to ${OMAHA_DATA_DIR}" -echo "Output omaha config to ${OMAHA_DIR}/miniomaha.conf" +echo "Output omaha config to ${OMAHA_CONF}" prepare_dir -if ! has_part_tools; then - #TODO(hungte) we can still avoid running unpack_partitions.sh - # by $(cat unpack_partitions.sh | grep Label | sed "s/#//" | grep ${name}" | - # awk '{ print $1}') to fetch offset/size. +if [ -n "${IMAGE_IS_UNPACKED}" ]; then echo "Unpacking image ${RELEASE_IMAGE} ..." >&2 sudo ./unpack_partitions.sh "${RELEASE_IMAGE}" 2>/dev/null fi @@ -167,24 +176,24 @@ fi release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")" sudo chmod a+rw update.gz mv update.gz rootfs-release.gz -mv rootfs-release.gz ${OMAHA_DATA_DIR} +mv rootfs-release.gz "${OMAHA_DATA_DIR}" echo "release: ${release_hash}" oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")" -mv oem.gz ${OMAHA_DATA_DIR} +mv oem.gz "${OMAHA_DATA_DIR}" echo "oem: ${oem_hash}" efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")" -mv efi.gz ${OMAHA_DATA_DIR} +mv efi.gz "${OMAHA_DATA_DIR}" echo "efi: ${efi_hash}" -popd > /dev/null +popd >/dev/null # Go to retrieve the factory test image. -pushd ${FACTORY_DIR} > /dev/null +pushd "${FACTORY_DIR}" >/dev/null prepare_dir -if ! has_part_tools; then +if [ -n "${IMAGE_IS_UNPACKED}" ]; then echo "Unpacking image ${FACTORY_IMAGE} ..." >&2 sudo ./unpack_partitions.sh "${FACTORY_IMAGE}" 2>/dev/null fi @@ -192,16 +201,16 @@ fi test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")" sudo chmod a+rw update.gz mv update.gz rootfs-test.gz -mv rootfs-test.gz ${OMAHA_DATA_DIR} +mv rootfs-test.gz "${OMAHA_DATA_DIR}" echo "test: ${test_hash}" state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")" -mv state.gz ${OMAHA_DATA_DIR} +mv state.gz "${OMAHA_DATA_DIR}" echo "state: ${state_hash}" -popd > /dev/null +popd >/dev/null -if [ ! -z ${FLAGS_firmware_updater} ] ; then +if [ -n "${FLAGS_firmware_updater}" ]; then SHELLBALL="${FLAGS_firmware_updater}" if [ ! -f "$SHELLBALL" ]; then echo "Failed to find firmware updater: $SHELLBALL." @@ -209,70 +218,70 @@ if [ ! -z ${FLAGS_firmware_updater} ] ; then fi firmware_hash="$(compress_and_hash_file "$SHELLBALL" "firmware.gz")" - mv firmware.gz ${OMAHA_DATA_DIR} + mv firmware.gz "${OMAHA_DATA_DIR}" echo "firmware: ${firmware_hash}" fi # If the file does exist and we are using the subfolder flag we are going to # append another config. -if [ -n "${FLAGS_subfolder}" ] && \ - [ -f "${OMAHA_DIR}"/miniomaha.conf"" ] ; then +if [ -n "${FLAGS_subfolder}" ] && + [ -f "${OMAHA_CONF}" ]; then # Remove the ']' from the last line of the file so we can add another config. - while [ -s "${OMAHA_DIR}/miniomaha.conf" ]; do + while [ -s "${OMAHA_CONF}" ]; do # If the last line is null - if [ -z "$(tail -1 "${OMAHA_DIR}/miniomaha.conf")" ]; then - sed -i '$d' "${OMAHA_DIR}/miniomaha.conf" - elif [ "$(tail -1 "${OMAHA_DIR}/miniomaha.conf")" != ']' ]; then - sed -i '$d' "${OMAHA_DIR}/miniomaha.conf" + if [ -z "$(tail -1 "${OMAHA_CONF}")" ]; then + sed -i '$d' "${OMAHA_CONF}" + elif [ "$(tail -1 "${OMAHA_CONF}")" != ']' ]; then + sed -i '$d' "${OMAHA_CONF}" else break fi done # Remove the last ] - if [ "$(tail -1 "${OMAHA_DIR}/miniomaha.conf")" = ']' ]; then - sed -i '$d' "${OMAHA_DIR}/miniomaha.conf" + if [ "$(tail -1 "${OMAHA_CONF}")" = ']' ]; then + sed -i '$d' "${OMAHA_CONF}" fi # If the file is empty, create it from scratch - if [ ! -s "${OMAHA_DIR}/miniomaha.conf" ]; then - echo "config = [" > "${OMAHA_DIR}/miniomaha.conf" + if [ ! -s "${OMAHA_CONF}" ]; then + echo "config = [" >"${OMAHA_CONF}" fi else - echo "config = [" > "${OMAHA_DIR}/miniomaha.conf" + echo "config = [" >"${OMAHA_CONF}" fi -if [ -n "${FLAGS_subfolder}" ] ; then +if [ -n "${FLAGS_subfolder}" ]; then subfolder="${FLAGS_subfolder}/" fi echo -n "{ 'qual_ids': set([\"${FLAGS_board}\"]), - 'factory_image': '"${subfolder}"rootfs-test.gz', + 'factory_image': '${subfolder}rootfs-test.gz', 'factory_checksum': '${test_hash}', - 'release_image': '"${subfolder}"rootfs-release.gz', + 'release_image': '${subfolder}rootfs-release.gz', 'release_checksum': '${release_hash}', - 'oempartitionimg_image': '"${subfolder}"oem.gz', + 'oempartitionimg_image': '${subfolder}oem.gz', 'oempartitionimg_checksum': '${oem_hash}', - 'efipartitionimg_image': '"${subfolder}"efi.gz', + 'efipartitionimg_image': '${subfolder}efi.gz', 'efipartitionimg_checksum': '${efi_hash}', - 'stateimg_image': '"${subfolder}"state.gz', - 'stateimg_checksum': '${state_hash}'," >> ${OMAHA_DIR}/miniomaha.conf + 'stateimg_image': '${subfolder}state.gz', + 'stateimg_checksum': '${state_hash}'," >>"${OMAHA_CONF}" -if [ ! -z "${FLAGS_firmware_updater}" ] ; then +if [ -n "${FLAGS_firmware_updater}" ] ; then echo -n " - 'firmware_image': '"${subfolder}"firmware.gz', - 'firmware_checksum': '${firmware_hash}'," >> ${OMAHA_DIR}/miniomaha.conf + 'firmware_image': '${subfolder}firmware.gz', + 'firmware_checksum': '${firmware_hash}'," >>"${OMAHA_CONF}" fi echo -n " }, ] -" >> ${OMAHA_DIR}/miniomaha.conf +" >>"${OMAHA_CONF}" -echo "The miniomaha server lives in src/platform/dev" -echo "to validate the configutarion, run:" -echo " python2.6 devserver.py --factory_config miniomaha.conf \ ---validate_factory_config" -echo "To run the server:" -echo " python2.6 devserver.py --factory_config miniomaha.conf" +echo "The miniomaha server lives in src/platform/dev. +To validate the configutarion, run: + python2.6 devserver.py --factory_config miniomaha.conf \ + --validate_factory_config +To run the server: + python2.6 devserver.py --factory_config miniomaha.conf" diff --git a/mk_memento_images.sh b/mk_memento_images.sh index 415768fb4b..7a10745427 100755 --- a/mk_memento_images.sh +++ b/mk_memento_images.sh @@ -28,13 +28,13 @@ if [ "$CROS_GENERATE_UPDATE_PAYLOAD_CALLED" != "1" ]; then echo " Please run that script with --help to see how to use it." fi -if ! has_command pigz; then +if ! image_has_command pigz; then (echo "WARNING:" echo " Your system does not have pigz (parallel gzip) installed." echo " COMPRESSING WILL BE VERY SLOW. It is recommended to install pigz" - if has_command apt-get; then + if image_has_command apt-get; then echo " by 'sudo apt-get install pigz'." - elif has_command emerge; then + elif image_has_command emerge; then echo " by 'sudo emerge pigz'." fi) >&2 fi @@ -58,14 +58,14 @@ else # chromiumos_img kern_part_no rootfs_part_no KPART="$1" ROOT_PART="$1" - KPART_OFFSET="$(part_offset "$KPART" "$2")" || - err_die "cannot retieve kernel partition offset" - KPART_SECTORS="$(part_size "$KPART" "$2")" || - err_die "cannot retieve kernel partition size" - ROOT_OFFSET="$(part_offset "$ROOT_PART" "$3")" || - err_die "cannot retieve root partition offset" - ROOT_SECTORS="$(part_size "$ROOT_PART" "$3")" || - err_die "cannot retieve root partition size" + KPART_OFFSET="$(image_part_offset "$KPART" "$2")" || + image_die "cannot retieve kernel partition offset" + KPART_SECTORS="$(image_part_size "$KPART" "$2")" || + image_die "cannot retieve kernel partition size" + ROOT_OFFSET="$(image_part_offset "$ROOT_PART" "$3")" || + image_die "cannot retieve root partition offset" + ROOT_SECTORS="$(image_part_size "$ROOT_PART" "$3")" || + image_die "cannot retieve root partition size" KPART_SIZE=$((KPART_SECTORS * 512)) fi @@ -91,10 +91,10 @@ KPART_SIZE_SIGNATURE="$(printf "%016x" "$KPART_SIZE" | CS_AND_RET_CODES="$( (echo -en "$KPART_SIZE_SIGNATURE" echo "Compressing kernel..." >&2 - dump_partial_file "$KPART" "$KPART_OFFSET" "$KPART_SECTORS" + image_dump_partial_file "$KPART" "$KPART_OFFSET" "$KPART_SECTORS" echo "Compressing rootfs..." >&2 - dump_partial_file "$ROOT_PART" "$ROOT_OFFSET" "$ROOT_SECTORS") | - gzip_compress -9 -c | + image_dump_partial_file "$ROOT_PART" "$ROOT_OFFSET" "$ROOT_SECTORS") | + image_gzip_compress -c -9 | tee "$FINAL_OUT_FILE" | openssl sha1 -binary | openssl base64 | diff --git a/mod_for_factory_scripts/200patchInitScript b/mod_for_factory_scripts/200patchInitScript index 7caa95acb4..747f10b215 100755 --- a/mod_for_factory_scripts/200patchInitScript +++ b/mod_for_factory_scripts/200patchInitScript @@ -6,36 +6,26 @@ echo "Applying patch to init scripts." -touch ${ROOT_FS_DIR}/root/.factory_test -patch -d ${ROOT_FS_DIR} -Np1 < ${ROOT_FS_DIR}/etc/init/factory.conf <"${ROOT_FS_DIR}/etc/init/factory.conf" < ${ROOT_FS_DIR}/etc/init/factory.conf <> /var/log/factory.log +date >>/var/log/factory.log if [ ! -e factory_started ]; then touch factory_started cp -f site_tests/suite_Factory/control . - ./bin/autotest control >> /var/log/factory.log 2>&1 + ./bin/autotest control >>/var/log/factory.log 2>&1 else - ./tools/autotest >> /var/log/factory.log 2>&1 + ./tools/autotest >>/var/log/factory.log 2>&1 fi end script EOF -cat > ${ROOT_FS_DIR}/etc/init/factorylog.conf <"${ROOT_FS_DIR}/etc/init/factorylog.conf" < /dev/tty3 +tail -n 48 -F /var/log/factory.log >/dev/tty3 end script EOF -patch -d ${ROOT_FS_DIR} -Np1 < "${GLOBAL_CONFIG}" <"${GLOBAL_CONFIG}" < /dev/null +if [ -d "${TEST_DIR}" ]; then + pushd "${TEST_DIR}" >/dev/null -# Remove the DB directories belonging to other boards. -KEEPDB="data_${BOARD}" -ls -d data_* 2> /dev/null | grep -v ${KEEPDB} | xargs rm -fr + # Remove the DB directories belonging to other boards. + KEEPDB="data_${BOARD}" + ls -d data_* 2>/dev/null | grep -v "${KEEPDB}" | xargs rm -fr -# Ensure there is DB directory in x86-agz and x86-mario. -if [ ! -d ${KEEPDB} -a \ - \( "${BOARD}" = "x86-agz" -o "${BOARD}" = "x86-mario" \) ]; then - echo "No component DB directory found at: ${KEEPDB}" + # Ensure there is DB directory in x86-agz and x86-mario. + if [ ! -d "${KEEPDB}" ] && + [ "${BOARD}" = "x86-agz" -o "${BOARD}" = "x86-mario" ]; then + echo "No component DB directory found at: ${KEEPDB}" + fi + + popd >/dev/null fi - -popd 1> /dev/null diff --git a/mod_for_factory_scripts/600customizeRelease b/mod_for_factory_scripts/600customizeRelease new file mode 100755 index 0000000000..0c8dd21a76 --- /dev/null +++ b/mod_for_factory_scripts/600customizeRelease @@ -0,0 +1,24 @@ +#!/bin/bash + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +TEST_DIR="${ROOT_FS_DIR}/usr/local/autotest/site_tests/suite_Factory" + +if [ -d "${TEST_DIR}" ]; then + pushd "${TEST_DIR}" >/dev/null + + # If there is a customize_$BOARD script for this board, let's run it. + # This allows adding settings for specific factories or systems. + CUSTOMIZE="customize_${BOARD}" + if [ -e "${CUSTOMIZE}" ]; then + echo "Running ${CUSTOMIZE}" + "./${CUSTOMIZE}" + fi + + # We don't need the customize script anymore. + rm -f customize_* + + popd >/dev/null +fi diff --git a/mod_for_factory_scripts/factory_setup.sh b/mod_for_factory_scripts/factory_setup.sh index 97959eea34..9d26bbb9cf 100755 --- a/mod_for_factory_scripts/factory_setup.sh +++ b/mod_for_factory_scripts/factory_setup.sh @@ -5,9 +5,11 @@ # found in the LICENSE file. echo "Modifying image for factory test..." +set -e -for SCRIPT in \ - ${GCLIENT_ROOT}/src/scripts/mod_for_factory_scripts/[0-9][0-9][0-9]*[!$~] +SCRIPT_BASE="${GCLIENT_ROOT}/src/scripts/mod_for_factory_scripts/" +for SCRIPT in "${SCRIPT_BASE}"[0-9][0-9][0-9]*[!$~] do - ${SCRIPT} + echo "Apply $(basename "${SCRIPT}")..." + bash -e "${SCRIPT}" done diff --git a/mod_image_for_recovery.sh b/mod_image_for_recovery.sh index 3e4a6f9780..e6cc8859a8 100755 --- a/mod_image_for_recovery.sh +++ b/mod_image_for_recovery.sh @@ -284,6 +284,10 @@ install_recovery_kernel() { count=$kern_a_size \ conv=notrunc + # Set the 'Success' flag to 1 (to prevent the firmware from updating + # the 'Tries' flag). + sudo $GPT add -i 2 -S 1 "$RECOVERY_IMAGE" + # Repeat for the legacy bioses. # Replace vmlinuz.A with the recovery version local sysroot="${FLAGS_build_root}/${FLAGS_board}" diff --git a/mod_image_for_test.sh b/mod_image_for_test.sh index 32e19ca4a5..97aa5e0cc7 100755 --- a/mod_image_for_test.sh +++ b/mod_image_for_test.sh @@ -144,6 +144,7 @@ install_autotest() { --exclude=site_tests/platform_StackProtector \ --exclude=deps/chrome_test \ --exclude=site_tests/desktopui_BrowserTest \ + --exclude=site_tests/desktopui_PageCyclerTests \ --exclude=site_tests/desktopui_UITest \ --exclude=.svn \ ${AUTOTEST_SRC}/client/* "${stateful_root}/${autotest_client}" diff --git a/mount_gpt_image.sh b/mount_gpt_image.sh index 3156c1586e..1c95a91eb1 100755 --- a/mount_gpt_image.sh +++ b/mount_gpt_image.sh @@ -20,6 +20,8 @@ DEFINE_string board "$DEFAULT_BOARD" \ "The board for which the image was built." b DEFINE_boolean read_only $FLAGS_FALSE \ "Mount in read only mode -- skips stateful items." +DEFINE_boolean safe $FLAGS_FALSE \ + "Mount rootfs in read only mode." DEFINE_boolean unmount $FLAGS_FALSE \ "Unmount previously mounted dir." u DEFINE_string from "/dev/sdc" \ @@ -64,9 +66,12 @@ function unmount_image() { function get_usb_partitions() { local ro_flag="" + local safe_flag="" [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} ] && ro_flag="-o ro" + [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} -o \ + ${FLAGS_safe} -eq ${FLAGS_TRUE} ] && safe_flag="-o ro -t ext2" - sudo mount ${ro_flag} "${FLAGS_from}3" "${FLAGS_rootfs_mountpt}" + sudo mount ${safe_flag} "${FLAGS_from}3" "${FLAGS_rootfs_mountpt}" sudo mount ${ro_flag} "${FLAGS_from}1" "${FLAGS_stateful_mountpt}" if [[ -n "${FLAGS_esp_mountpt}" ]]; then sudo mount ${ro_flag} "${FLAGS_from}12" "${FLAGS_esp_mountpt}" @@ -79,8 +84,15 @@ function get_gpt_partitions() { # Mount the rootfs partition using a loopback device. local offset=$(partoffset "${FLAGS_from}/${filename}" 3) local ro_flag="" + local safe_flag="" + if [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} ]; then ro_flag="-o ro" + fi + + if [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} -o \ + ${FLAGS_safe} -eq ${FLAGS_TRUE} ]; then + safe_flag="-o ro -t ext2" else # Make sure any callers can actually mount and modify the fs # if desired. @@ -88,7 +100,7 @@ function get_gpt_partitions() { enable_rw_mount "${FLAGS_from}/${filename}" "$(( offset * 512 ))" fi - sudo mount ${ro_flag} -o loop,offset=$(( offset * 512 )) \ + sudo mount ${safe_flag} -o loop,offset=$(( offset * 512 )) \ "${FLAGS_from}/${filename}" "${FLAGS_rootfs_mountpt}" # Mount the stateful partition using a loopback device. diff --git a/run_remote_tests.sh b/run_remote_tests.sh index babbbdceb0..0c644d46f1 100755 --- a/run_remote_tests.sh +++ b/run_remote_tests.sh @@ -271,25 +271,24 @@ function main() { info "Running chrome autotest ${control_file}" fi - export AUTOSERV_TEST_ARGS="${FLAGS_args}" - export AUTOSERV_ARGS="-m ${FLAGS_remote} \ - --ssh-port ${FLAGS_ssh_port} \ + local autoserv_test_args="${FLAGS_args}" + if [ -n "${autoserv_test_args}" ]; then + autoserv_test_args="-a \"${autoserv_test_args}\"" + fi + local autoserv_args="-m ${FLAGS_remote} --ssh-port ${FLAGS_ssh_port} \ ${option} ${control_file} -r ${results_dir} ${verbose}" if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then cat > "${TMP}/run_test.sh" <&2 else cp "${BUILD_DIR}/environment" "${TMP}/run_test.sh" GRAPHICS_BACKEND=${GRAPHICS_BACKEND:-OPENGL} - if [ -n "${AUTOSERV_TEST_ARGS}" ]; then - AUTOSERV_TEST_ARGS="-a \"${AUTOSERV_TEST_ARGS}\"" - fi cat >> "${TMP}/run_test.sh" <