mirror of
https://github.com/flatcar/scripts.git
synced 2025-08-15 08:56:58 +02:00
Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils
This commit is contained in:
commit
4b6485133f
@ -1,74 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style license that can be
|
|
||||||
# found in the LICENSE file.
|
|
||||||
|
|
||||||
# This script is intended as a wrapper to execute autotest tests for a given
|
|
||||||
# board.
|
|
||||||
|
|
||||||
# Load common constants. This should be the first executable line.
|
|
||||||
# The path to common.sh should be relative to your script's location.
|
|
||||||
. "$(dirname "$0")/common.sh"
|
|
||||||
|
|
||||||
# Script must be run inside the chroot
|
|
||||||
restart_in_chroot_if_needed $*
|
|
||||||
get_default_board
|
|
||||||
|
|
||||||
DEFINE_string board "${DEFAULT_BOARD}" \
|
|
||||||
"The board to run tests for."
|
|
||||||
|
|
||||||
FLAGS_HELP="usage: $0 <flags>"
|
|
||||||
FLAGS "$@" || exit 1
|
|
||||||
eval set -- "${FLAGS_ARGV}"
|
|
||||||
|
|
||||||
# Define a directory which will not be cleaned by portage automatically. So we
|
|
||||||
# could achieve incremental build between two autoserv runs.
|
|
||||||
BUILD_RUNTIME="/build/${FLAGS_board}/usr/local/autotest/"
|
|
||||||
|
|
||||||
# Hack: set the CHROMEOS_ROOT variable by hand here
|
|
||||||
CHROMEOS_ROOT=/home/${USER}/trunk/
|
|
||||||
|
|
||||||
# Ensure the configures run by autotest pick up the right config.site
|
|
||||||
CONFIG_SITE=/usr/share/config.site
|
|
||||||
|
|
||||||
[ -z "${FLAGS_board}" ] && \
|
|
||||||
die "You must specify --board="
|
|
||||||
|
|
||||||
function setup_ssh() {
|
|
||||||
eval $(ssh-agent) > /dev/null
|
|
||||||
# TODO(jrbarnette): This is a temporary hack, slated for removal
|
|
||||||
# before it was ever created. It's a bug, and you should fix it
|
|
||||||
# right away!
|
|
||||||
chmod 400 \
|
|
||||||
${CHROMEOS_ROOT}/src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa
|
|
||||||
ssh-add \
|
|
||||||
${CHROMEOS_ROOT}/src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa
|
|
||||||
}
|
|
||||||
|
|
||||||
function teardown_ssh() {
|
|
||||||
ssh-agent -k > /dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
src_test() {
|
|
||||||
# TODO: These places currently need to be writeable but shouldn't be
|
|
||||||
sudo chmod a+w ${BUILD_RUNTIME}/server/{tests,site_tests}
|
|
||||||
|
|
||||||
setup_ssh
|
|
||||||
cd "${BUILD_RUNTIME}"
|
|
||||||
|
|
||||||
local args=()
|
|
||||||
if [[ -n ${AUTOSERV_TEST_ARGS} ]]; then
|
|
||||||
args=("-a" "${AUTOSERV_TEST_ARGS}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
local timestamp=$(date +%Y-%m-%d-%H.%M.%S)
|
|
||||||
|
|
||||||
# Do not use sudo, it'll unset all your environment
|
|
||||||
LOGNAME=${USER} ./server/autoserv -r /tmp/results.${timestamp} \
|
|
||||||
${AUTOSERV_ARGS} "${args[@]}"
|
|
||||||
|
|
||||||
teardown_ssh
|
|
||||||
}
|
|
||||||
|
|
||||||
src_test
|
|
167
bin/cbuildbot.py
167
bin/cbuildbot.py
@ -22,6 +22,7 @@ from cros_build_lib import (Die, Info, ReinterpretPathForChroot, RunCommand,
|
|||||||
Warning)
|
Warning)
|
||||||
|
|
||||||
_DEFAULT_RETRIES = 3
|
_DEFAULT_RETRIES = 3
|
||||||
|
_PACKAGE_FILE = '%(buildroot)s/src/scripts/cbuildbot_package.list'
|
||||||
ARCHIVE_BASE = '/var/www/archive'
|
ARCHIVE_BASE = '/var/www/archive'
|
||||||
ARCHIVE_COUNT = 10
|
ARCHIVE_COUNT = 10
|
||||||
|
|
||||||
@ -44,27 +45,21 @@ def MakeDir(path, parents=False):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def RepoSync(buildroot, rw_checkout=False, retries=_DEFAULT_RETRIES):
|
def RepoSync(buildroot, retries=_DEFAULT_RETRIES):
|
||||||
"""Uses repo to checkout the source code.
|
"""Uses repo to checkout the source code.
|
||||||
|
|
||||||
Keyword arguments:
|
Keyword arguments:
|
||||||
rw_checkout -- Reconfigure repo after sync'ing to read-write.
|
|
||||||
retries -- Number of retries to try before failing on the sync.
|
retries -- Number of retries to try before failing on the sync.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
while retries > 0:
|
while retries > 0:
|
||||||
try:
|
try:
|
||||||
# The --trace option ensures that repo shows the output from git. This
|
# The --trace option ensures that repo shows the output from git. This
|
||||||
# is needed so that the buildbot can kill us if git is not making
|
# is needed so that the buildbot can kill us if git is not making
|
||||||
# progress.
|
# progress.
|
||||||
RunCommand(['repo', '--trace', 'sync'], cwd=buildroot)
|
|
||||||
if rw_checkout:
|
|
||||||
# Always re-run in case of new git repos or repo sync
|
|
||||||
# failed in a previous run because of a forced Stop Build.
|
|
||||||
RunCommand(['repo', 'forall', '-c', 'git', 'config',
|
RunCommand(['repo', 'forall', '-c', 'git', 'config',
|
||||||
'url.ssh://git@gitrw.chromium.org:9222.pushinsteadof',
|
'url.ssh://git@gitrw.chromium.org:9222.insteadof',
|
||||||
'http://git.chromium.org/git'], cwd=buildroot)
|
'http://git.chromium.org/git'], cwd=buildroot)
|
||||||
|
RunCommand(['repo', '--trace', 'sync'], cwd=buildroot)
|
||||||
retries = 0
|
retries = 0
|
||||||
except:
|
except:
|
||||||
retries -= 1
|
retries -= 1
|
||||||
@ -202,10 +197,26 @@ def _UprevFromRevisionList(buildroot, tracking_branch, revision_list, board,
|
|||||||
'--tracking_branch=%s' % tracking_branch,
|
'--tracking_branch=%s' % tracking_branch,
|
||||||
'--overlays=%s' % ':'.join(chroot_overlays),
|
'--overlays=%s' % ':'.join(chroot_overlays),
|
||||||
'--packages=%s' % ':'.join(packages),
|
'--packages=%s' % ':'.join(packages),
|
||||||
|
'--drop_file=%s' % ReinterpretPathForChroot(_PACKAGE_FILE %
|
||||||
|
{'buildroot': buildroot}),
|
||||||
'commit'],
|
'commit'],
|
||||||
cwd=cwd, enter_chroot=True)
|
cwd=cwd, enter_chroot=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _MarkChromeAsStable(buildroot, tracking_branch, chrome_rev):
|
||||||
|
"""Returns the portage atom for the revved chrome ebuild - see man emerge."""
|
||||||
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
|
portage_atom_string = RunCommand(['bin/cros_mark_chrome_as_stable',
|
||||||
|
'--tracking_branch=%s' % tracking_branch,
|
||||||
|
chrome_rev], cwd=cwd, redirect_stdout=True,
|
||||||
|
enter_chroot=True).rstrip()
|
||||||
|
if not portage_atom_string:
|
||||||
|
Info('Found nothing to rev.')
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return portage_atom_string.split('=')[1]
|
||||||
|
|
||||||
|
|
||||||
def _UprevAllPackages(buildroot, tracking_branch, board, overlays):
|
def _UprevAllPackages(buildroot, tracking_branch, board, overlays):
|
||||||
"""Uprevs all packages that have been updated since last uprev."""
|
"""Uprevs all packages that have been updated since last uprev."""
|
||||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
@ -213,7 +224,10 @@ def _UprevAllPackages(buildroot, tracking_branch, board, overlays):
|
|||||||
RunCommand(['./cros_mark_as_stable', '--all',
|
RunCommand(['./cros_mark_as_stable', '--all',
|
||||||
'--board=%s' % board,
|
'--board=%s' % board,
|
||||||
'--overlays=%s' % ':'.join(chroot_overlays),
|
'--overlays=%s' % ':'.join(chroot_overlays),
|
||||||
'--tracking_branch=%s' % tracking_branch, 'commit'],
|
'--tracking_branch=%s' % tracking_branch,
|
||||||
|
'--drop_file=%s' % ReinterpretPathForChroot(_PACKAGE_FILE %
|
||||||
|
{'buildroot': buildroot}),
|
||||||
|
'commit'],
|
||||||
cwd=cwd, enter_chroot=True)
|
cwd=cwd, enter_chroot=True)
|
||||||
|
|
||||||
|
|
||||||
@ -267,7 +281,7 @@ def _PreFlightRinse(buildroot, board, tracking_branch, overlays):
|
|||||||
RunCommand(['sudo', 'killall', 'kvm'], error_ok=True)
|
RunCommand(['sudo', 'killall', 'kvm'], error_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def _FullCheckout(buildroot, tracking_branch, rw_checkout=True,
|
def _FullCheckout(buildroot, tracking_branch,
|
||||||
retries=_DEFAULT_RETRIES,
|
retries=_DEFAULT_RETRIES,
|
||||||
url='http://git.chromium.org/git/manifest'):
|
url='http://git.chromium.org/git/manifest'):
|
||||||
"""Performs a full checkout and clobbers any previous checkouts."""
|
"""Performs a full checkout and clobbers any previous checkouts."""
|
||||||
@ -277,13 +291,12 @@ def _FullCheckout(buildroot, tracking_branch, rw_checkout=True,
|
|||||||
RunCommand(['repo', 'init', '-u',
|
RunCommand(['repo', 'init', '-u',
|
||||||
url, '-b',
|
url, '-b',
|
||||||
'%s' % branch[-1]], cwd=buildroot, input='\n\ny\n')
|
'%s' % branch[-1]], cwd=buildroot, input='\n\ny\n')
|
||||||
RepoSync(buildroot, rw_checkout, retries)
|
RepoSync(buildroot, retries)
|
||||||
|
|
||||||
|
|
||||||
def _IncrementalCheckout(buildroot, rw_checkout=True,
|
def _IncrementalCheckout(buildroot, retries=_DEFAULT_RETRIES):
|
||||||
retries=_DEFAULT_RETRIES):
|
|
||||||
"""Performs a checkout without clobbering previous checkout."""
|
"""Performs a checkout without clobbering previous checkout."""
|
||||||
RepoSync(buildroot, rw_checkout, retries)
|
RepoSync(buildroot, retries)
|
||||||
|
|
||||||
|
|
||||||
def _MakeChroot(buildroot):
|
def _MakeChroot(buildroot):
|
||||||
@ -305,6 +318,13 @@ def _Build(buildroot):
|
|||||||
RunCommand(['./build_packages'], cwd=cwd, enter_chroot=True)
|
RunCommand(['./build_packages'], cwd=cwd, enter_chroot=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _BuildChrome(buildroot, board, chrome_atom_to_build):
|
||||||
|
"""Wrapper for emerge call to build Chrome."""
|
||||||
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
|
RunCommand(['emerge-%s' % board, '=%s' % chrome_atom_to_build],
|
||||||
|
cwd=cwd, enter_chroot=True)
|
||||||
|
|
||||||
|
|
||||||
def _EnableLocalAccount(buildroot):
|
def _EnableLocalAccount(buildroot):
|
||||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
# Set local account for test images.
|
# Set local account for test images.
|
||||||
@ -333,7 +353,10 @@ def _BuildVMImageForTesting(buildroot):
|
|||||||
|
|
||||||
def _RunUnitTests(buildroot):
|
def _RunUnitTests(buildroot):
|
||||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
RunCommand(['./cros_run_unit_tests'], cwd=cwd, enter_chroot=True)
|
RunCommand(['./cros_run_unit_tests',
|
||||||
|
'--package_file=%s' % ReinterpretPathForChroot(_PACKAGE_FILE %
|
||||||
|
{'buildroot': buildroot}),
|
||||||
|
], cwd=cwd, enter_chroot=True)
|
||||||
|
|
||||||
|
|
||||||
def _RunSmokeSuite(buildroot, results_dir):
|
def _RunSmokeSuite(buildroot, results_dir):
|
||||||
@ -386,59 +409,56 @@ def _UprevPackages(buildroot, tracking_branch, revisionfile, board, overlays):
|
|||||||
_UprevAllPackages(buildroot, tracking_branch, board, overlays)
|
_UprevAllPackages(buildroot, tracking_branch, board, overlays)
|
||||||
|
|
||||||
|
|
||||||
def _UprevPush(buildroot, tracking_branch, board, overlays):
|
def _UprevPush(buildroot, tracking_branch, board, overlays, dryrun):
|
||||||
"""Pushes uprev changes to the main line."""
|
"""Pushes uprev changes to the main line."""
|
||||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
RunCommand(['./cros_mark_as_stable', '--srcroot=..',
|
cmd = ['./cros_mark_as_stable',
|
||||||
|
'--srcroot=%s' % os.path.join(buildroot, 'src'),
|
||||||
'--board=%s' % board,
|
'--board=%s' % board,
|
||||||
'--overlays=%s' % ':'.join(overlays),
|
'--overlays=%s' % ':'.join(overlays),
|
||||||
'--tracking_branch=%s' % tracking_branch,
|
'--tracking_branch=%s' % tracking_branch
|
||||||
'--push_options=--bypass-hooks -f', 'push'],
|
]
|
||||||
cwd=cwd)
|
if dryrun:
|
||||||
|
cmd.append('--dryrun')
|
||||||
|
|
||||||
|
cmd.append('push')
|
||||||
|
RunCommand(cmd, cwd=cwd)
|
||||||
|
|
||||||
|
|
||||||
def _ArchiveTestResults(buildroot, board, archive_dir, test_results_dir):
|
def _ArchiveTestResults(buildroot, board, test_results_dir,
|
||||||
"""Archives the test results into the www dir for later use.
|
gsutil, archive_dir, acl):
|
||||||
|
"""Archives the test results into Google Storage
|
||||||
|
|
||||||
Takes the results from the test_results_dir and dumps them into the archive
|
Takes the results from the test_results_dir and the last qemu image and
|
||||||
dir specified. This also archives the last qemu image.
|
uploads them to Google Storage.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
buildroot: Root directory where build occurs
|
||||||
board: Board to find the qemu image.
|
board: Board to find the qemu image.
|
||||||
archive_dir: Path from ARCHIVE_BASE to store image.
|
test_results_dir: Path from buildroot/chroot to find test results.
|
||||||
test_results_dir: Path from buildroot/chroot to find test results. This must
|
This must a subdir of /tmp.
|
||||||
a subdir of /tmp.
|
gsutil: Location of gsutil
|
||||||
|
archive_dir: Google Storage path to store the archive
|
||||||
|
acl: ACL to set on archive in Google Storage
|
||||||
"""
|
"""
|
||||||
|
num_gsutil_retries = 5
|
||||||
test_results_dir = test_results_dir.lstrip('/')
|
test_results_dir = test_results_dir.lstrip('/')
|
||||||
if not os.path.exists(ARCHIVE_BASE):
|
|
||||||
os.makedirs(ARCHIVE_BASE)
|
|
||||||
else:
|
|
||||||
dir_entries = os.listdir(ARCHIVE_BASE)
|
|
||||||
if len(dir_entries) >= ARCHIVE_COUNT:
|
|
||||||
oldest_dirs = heapq.nsmallest((len(dir_entries) - ARCHIVE_COUNT) + 1,
|
|
||||||
[os.path.join(ARCHIVE_BASE, filename) for filename in dir_entries],
|
|
||||||
key=lambda fn: os.stat(fn).st_mtime)
|
|
||||||
Info('Removing archive dirs %s' % oldest_dirs)
|
|
||||||
for oldest_dir in oldest_dirs:
|
|
||||||
shutil.rmtree(os.path.join(ARCHIVE_BASE, oldest_dir))
|
|
||||||
|
|
||||||
archive_target = os.path.join(ARCHIVE_BASE, str(archive_dir))
|
|
||||||
if os.path.exists(archive_target):
|
|
||||||
shutil.rmtree(archive_target)
|
|
||||||
|
|
||||||
results_path = os.path.join(buildroot, 'chroot', test_results_dir)
|
results_path = os.path.join(buildroot, 'chroot', test_results_dir)
|
||||||
RunCommand(['sudo', 'chmod', '-R', '+r', results_path])
|
RunCommand(['sudo', 'chmod', '-R', '+r', results_path])
|
||||||
try:
|
try:
|
||||||
shutil.copytree(results_path, archive_target)
|
# gsutil has the ability to resume an upload when the command is retried
|
||||||
except:
|
RunCommand([gsutil, 'cp', '-R', results_path, archive_dir],
|
||||||
Warning('Some files could not be copied')
|
num_retries=num_gsutil_retries)
|
||||||
|
RunCommand([gsutil, 'setacl', acl, archive_dir])
|
||||||
|
|
||||||
image_name = 'chromiumos_qemu_image.bin'
|
image_name = 'chromiumos_qemu_image.bin'
|
||||||
image_path = os.path.join(buildroot, 'src', 'build', 'images', board,
|
image_path = os.path.join(buildroot, 'src', 'build', 'images', board,
|
||||||
'latest', image_name)
|
'latest', image_name)
|
||||||
RunCommand(['gzip', '-f', '--fast', image_path])
|
RunCommand(['gzip', '-f', '--fast', image_path])
|
||||||
shutil.copyfile(image_path + '.gz', os.path.join(archive_target,
|
RunCommand([gsutil, 'cp', image_path + '.gz', archive_dir],
|
||||||
image_name + '.gz'))
|
num_retries=num_gsutil_retries)
|
||||||
|
except Exception, e:
|
||||||
|
Warning('Could not archive test results (error=%s)' % str(e))
|
||||||
|
|
||||||
|
|
||||||
def _GetConfig(config_name):
|
def _GetConfig(config_name):
|
||||||
@ -495,6 +515,10 @@ def main():
|
|||||||
help='root directory where build occurs', default=".")
|
help='root directory where build occurs', default=".")
|
||||||
parser.add_option('-n', '--buildnumber',
|
parser.add_option('-n', '--buildnumber',
|
||||||
help='build number', type='int', default=0)
|
help='build number', type='int', default=0)
|
||||||
|
parser.add_option('--chrome_rev', default=None, type='string',
|
||||||
|
dest='chrome_rev',
|
||||||
|
help=('Chrome_rev of type [tot|latest_release|'
|
||||||
|
'sticky_release]'))
|
||||||
parser.add_option('-f', '--revisionfile',
|
parser.add_option('-f', '--revisionfile',
|
||||||
help='file where new revisions are stored')
|
help='file where new revisions are stored')
|
||||||
parser.add_option('--clobber', action='store_true', dest='clobber',
|
parser.add_option('--clobber', action='store_true', dest='clobber',
|
||||||
@ -503,17 +527,29 @@ def main():
|
|||||||
parser.add_option('--debug', action='store_true', dest='debug',
|
parser.add_option('--debug', action='store_true', dest='debug',
|
||||||
default=False,
|
default=False,
|
||||||
help='Override some options to run as a developer.')
|
help='Override some options to run as a developer.')
|
||||||
|
parser.add_option('--nosync', action='store_false', dest='sync',
|
||||||
|
default=True,
|
||||||
|
help="Don't sync before building.")
|
||||||
|
parser.add_option('--notests', action='store_false', dest='tests',
|
||||||
|
default=True,
|
||||||
|
help='Override values from buildconfig and run no tests.')
|
||||||
parser.add_option('-t', '--tracking-branch', dest='tracking_branch',
|
parser.add_option('-t', '--tracking-branch', dest='tracking_branch',
|
||||||
default='cros/master', help='Run the buildbot on a branch')
|
default='cros/master', help='Run the buildbot on a branch')
|
||||||
parser.add_option('-u', '--url', dest='url',
|
parser.add_option('-u', '--url', dest='url',
|
||||||
default='http://git.chromium.org/git/manifest',
|
default='http://git.chromium.org/git/manifest',
|
||||||
help='Run the buildbot on internal manifest')
|
help='Run the buildbot on internal manifest')
|
||||||
|
parser.add_option('-g', '--gsutil', default='', help='Location of gsutil')
|
||||||
|
parser.add_option('-c', '--gsutil_archive', default='',
|
||||||
|
help='Datastore archive location')
|
||||||
|
parser.add_option('-a', '--acl', default='private',
|
||||||
|
help='ACL to set on GSD archives')
|
||||||
|
|
||||||
(options, args) = parser.parse_args()
|
(options, args) = parser.parse_args()
|
||||||
|
|
||||||
buildroot = os.path.abspath(options.buildroot)
|
buildroot = os.path.abspath(options.buildroot)
|
||||||
revisionfile = options.revisionfile
|
revisionfile = options.revisionfile
|
||||||
tracking_branch = options.tracking_branch
|
tracking_branch = options.tracking_branch
|
||||||
|
chrome_atom_to_build = None
|
||||||
|
|
||||||
if len(args) >= 1:
|
if len(args) >= 1:
|
||||||
buildconfig = _GetConfig(args[-1])
|
buildconfig = _GetConfig(args[-1])
|
||||||
@ -527,6 +563,7 @@ def main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
_PreFlightRinse(buildroot, buildconfig['board'], tracking_branch, overlays)
|
_PreFlightRinse(buildroot, buildconfig['board'], tracking_branch, overlays)
|
||||||
|
if options.sync:
|
||||||
if options.clobber or not os.path.isdir(buildroot):
|
if options.clobber or not os.path.isdir(buildroot):
|
||||||
_FullCheckout(buildroot, tracking_branch, url=options.url)
|
_FullCheckout(buildroot, tracking_branch, url=options.url)
|
||||||
else:
|
else:
|
||||||
@ -546,41 +583,55 @@ def main():
|
|||||||
if not os.path.isdir(boardpath):
|
if not os.path.isdir(boardpath):
|
||||||
_SetupBoard(buildroot, board=buildconfig['board'])
|
_SetupBoard(buildroot, board=buildconfig['board'])
|
||||||
|
|
||||||
if buildconfig['uprev']:
|
# Perform uprev. If chrome_uprev is set, rev Chrome ebuilds.
|
||||||
|
if options.chrome_rev:
|
||||||
|
chrome_atom_to_build = _MarkChromeAsStable(buildroot, tracking_branch,
|
||||||
|
options.chrome_rev)
|
||||||
|
elif buildconfig['uprev']:
|
||||||
_UprevPackages(buildroot, tracking_branch, revisionfile,
|
_UprevPackages(buildroot, tracking_branch, revisionfile,
|
||||||
buildconfig['board'], overlays)
|
buildconfig['board'], overlays)
|
||||||
|
|
||||||
_EnableLocalAccount(buildroot)
|
_EnableLocalAccount(buildroot)
|
||||||
|
# Doesn't rebuild without acquiring more source.
|
||||||
|
if options.sync:
|
||||||
_Build(buildroot)
|
_Build(buildroot)
|
||||||
if buildconfig['unittests']:
|
|
||||||
|
if chrome_atom_to_build:
|
||||||
|
_BuildChrome(buildroot, buildconfig['board'], chrome_atom_to_build)
|
||||||
|
|
||||||
|
if buildconfig['unittests'] and options.tests:
|
||||||
_RunUnitTests(buildroot)
|
_RunUnitTests(buildroot)
|
||||||
|
|
||||||
_BuildImage(buildroot)
|
_BuildImage(buildroot)
|
||||||
|
|
||||||
if buildconfig['smoke_bvt']:
|
if buildconfig['smoke_bvt'] and options.tests:
|
||||||
_BuildVMImageForTesting(buildroot)
|
_BuildVMImageForTesting(buildroot)
|
||||||
test_results_dir = '/tmp/run_remote_tests.%s' % options.buildnumber
|
test_results_dir = '/tmp/run_remote_tests.%s' % options.buildnumber
|
||||||
try:
|
try:
|
||||||
_RunSmokeSuite(buildroot, test_results_dir)
|
_RunSmokeSuite(buildroot, test_results_dir)
|
||||||
finally:
|
finally:
|
||||||
|
if not options.debug:
|
||||||
|
archive_full_path=os.path.join(options.gsutil_archive,
|
||||||
|
str(options.buildnumber))
|
||||||
_ArchiveTestResults(buildroot, buildconfig['board'],
|
_ArchiveTestResults(buildroot, buildconfig['board'],
|
||||||
archive_dir=options.buildnumber,
|
test_results_dir=test_results_dir,
|
||||||
test_results_dir=test_results_dir)
|
gsutil=options.gsutil,
|
||||||
|
archive_dir=archive_full_path,
|
||||||
|
acl=options.acl)
|
||||||
|
|
||||||
if buildconfig['uprev']:
|
if buildconfig['uprev']:
|
||||||
# Don't push changes for developers.
|
# Don't push changes for developers.
|
||||||
if not options.debug:
|
|
||||||
if buildconfig['master']:
|
if buildconfig['master']:
|
||||||
# Master bot needs to check if the other slaves completed.
|
# Master bot needs to check if the other slaves completed.
|
||||||
if cbuildbot_comm.HaveSlavesCompleted(config):
|
if cbuildbot_comm.HaveSlavesCompleted(config):
|
||||||
_UprevPush(buildroot, tracking_branch, buildconfig['board'],
|
_UprevPush(buildroot, tracking_branch, buildconfig['board'],
|
||||||
overlays)
|
overlays, options.debug)
|
||||||
else:
|
else:
|
||||||
Die('CBUILDBOT - One of the slaves has failed!!!')
|
Die('CBUILDBOT - One of the slaves has failed!!!')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Publish my status to the master if its expecting it.
|
# Publish my status to the master if its expecting it.
|
||||||
if buildconfig['important']:
|
if buildconfig['important'] and not options.debug:
|
||||||
cbuildbot_comm.PublishStatus(cbuildbot_comm.STATUS_BUILD_COMPLETE)
|
cbuildbot_comm.PublishStatus(cbuildbot_comm.STATUS_BUILD_COMPLETE)
|
||||||
|
|
||||||
except:
|
except:
|
||||||
|
@ -112,52 +112,32 @@ class CBuildBotTest(mox.MoxTestBase):
|
|||||||
# self.mox.VerifyAll()
|
# self.mox.VerifyAll()
|
||||||
|
|
||||||
def testArchiveTestResults(self):
|
def testArchiveTestResults(self):
|
||||||
"""Test if we can archive the latest results dir as well as clean up."""
|
"""Test if we can archive the latest results dir to Google Storage."""
|
||||||
self.mox.StubOutWithMock(os.path, 'exists')
|
|
||||||
self.mox.StubOutWithMock(os, 'listdir')
|
|
||||||
self.mox.StubOutWithMock(os, 'stat')
|
|
||||||
self.mox.StubOutWithMock(shutil, 'rmtree')
|
|
||||||
self.mox.StubOutWithMock(shutil, 'copytree')
|
|
||||||
self.mox.StubOutWithMock(shutil, 'copyfile')
|
|
||||||
|
|
||||||
# Create mock stats so that file2 is older than file1.
|
|
||||||
dir_listing = ['file1', 'file2']
|
|
||||||
stat1 = self.mox.CreateMock(posix.stat_result)
|
|
||||||
stat2 = self.mox.CreateMock(posix.stat_result)
|
|
||||||
stat1.st_mtime = 99999
|
|
||||||
stat2.st_mtime = 10000
|
|
||||||
|
|
||||||
# Set vars for call.
|
# Set vars for call.
|
||||||
buildroot = '/fake_dir'
|
buildroot = '/fake_dir'
|
||||||
test_results_dir = 'fake_results_dir'
|
|
||||||
archive_dir = 1234
|
|
||||||
board = 'fake-board'
|
board = 'fake-board'
|
||||||
|
test_results_dir = 'fake_results_dir'
|
||||||
# Expected calls.
|
gsutil_path='/fake/gsutil/path'
|
||||||
os.path.exists(cbuildbot.ARCHIVE_BASE).AndReturn(True)
|
archive_dir = 1234
|
||||||
os.listdir(os.path.join(cbuildbot.ARCHIVE_BASE)).AndReturn(dir_listing)
|
acl = 'fake_acl'
|
||||||
os.stat(os.path.join(cbuildbot.ARCHIVE_BASE, 'file1')).AndReturn(stat1)
|
num_retries = 5
|
||||||
os.stat(os.path.join(cbuildbot.ARCHIVE_BASE, 'file2')).AndReturn(stat2)
|
|
||||||
# Should remove the oldest path.
|
|
||||||
shutil.rmtree(os.path.join(cbuildbot.ARCHIVE_BASE, 'file2'))
|
|
||||||
|
|
||||||
# Convenience variables to make archive easier to understand.
|
# Convenience variables to make archive easier to understand.
|
||||||
path_to_results = os.path.join(buildroot, 'chroot', test_results_dir)
|
path_to_results = os.path.join(buildroot, 'chroot', test_results_dir)
|
||||||
path_to_archive_dir = os.path.join(cbuildbot.ARCHIVE_BASE, str(archive_dir))
|
|
||||||
path_to_image = os.path.join(buildroot, 'src', 'build', 'images', board,
|
path_to_image = os.path.join(buildroot, 'src', 'build', 'images', board,
|
||||||
'latest', 'chromiumos_qemu_image.bin')
|
'latest', 'chromiumos_qemu_image.bin')
|
||||||
# Archive logic
|
|
||||||
os.path.exists(path_to_archive_dir).AndReturn(False)
|
|
||||||
cbuildbot.RunCommand(['sudo', 'chmod', '-R', '+r', path_to_results])
|
cbuildbot.RunCommand(['sudo', 'chmod', '-R', '+r', path_to_results])
|
||||||
shutil.copytree(path_to_results, path_to_archive_dir)
|
cbuildbot.RunCommand([gsutil_path, 'cp', '-R', path_to_results,
|
||||||
|
archive_dir], num_retries=num_retries)
|
||||||
|
cbuildbot.RunCommand([gsutil_path, 'setacl', acl, archive_dir])
|
||||||
cbuildbot.RunCommand(['gzip', '-f', '--fast', path_to_image])
|
cbuildbot.RunCommand(['gzip', '-f', '--fast', path_to_image])
|
||||||
shutil.copyfile(path_to_image + '.gz', os.path.join(
|
cbuildbot.RunCommand([gsutil_path, 'cp', path_to_image + '.gz',
|
||||||
path_to_archive_dir, 'chromiumos_qemu_image.bin.gz'))
|
archive_dir], num_retries=num_retries)
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
cbuildbot.ARCHIVE_COUNT = 2 # Set equal to list size so we force clean up.
|
cbuildbot._ArchiveTestResults(buildroot, board, test_results_dir,
|
||||||
cbuildbot._ArchiveTestResults(buildroot, board, archive_dir,
|
gsutil_path, archive_dir, acl)
|
||||||
test_results_dir)
|
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
# TODO(sosa): Remove once we un-comment above.
|
# TODO(sosa): Remove once we un-comment above.
|
||||||
|
@ -274,14 +274,19 @@ class VirtualAUTest(unittest.TestCase, AUTest):
|
|||||||
"""Runs vm smoke suite to verify image."""
|
"""Runs vm smoke suite to verify image."""
|
||||||
# image_to_live already verifies lsb-release matching. This is just
|
# image_to_live already verifies lsb-release matching. This is just
|
||||||
# for additional steps.
|
# for additional steps.
|
||||||
output = RunCommand(['%s/cros_run_vm_test' % self.crosutilsbin,
|
|
||||||
|
commandWithArgs = ['%s/cros_run_vm_test' % self.crosutilsbin,
|
||||||
'--image_path=%s' % self.vm_image_path,
|
'--image_path=%s' % self.vm_image_path,
|
||||||
'--snapshot',
|
'--snapshot',
|
||||||
'--persist',
|
'--persist',
|
||||||
vm_graphics_flag,
|
|
||||||
'--kvm_pid=%s' % _KVM_PID_FILE,
|
'--kvm_pid=%s' % _KVM_PID_FILE,
|
||||||
'--test_case=%s' % _VERIFY_SUITE,
|
_VERIFY_SUITE,
|
||||||
], error_ok=True, enter_chroot=False,
|
]
|
||||||
|
|
||||||
|
if vm_graphics_flag:
|
||||||
|
commandWithArgs.append(vm_graphics_flag)
|
||||||
|
|
||||||
|
output = RunCommand(commandWithArgs, error_ok=True, enter_chroot=False,
|
||||||
redirect_stdout=True)
|
redirect_stdout=True)
|
||||||
return self.CommonVerifyImage(self, output, percent_required_to_pass)
|
return self.CommonVerifyImage(self, output, percent_required_to_pass)
|
||||||
|
|
||||||
|
@ -30,10 +30,11 @@ from xml.dom import minidom
|
|||||||
|
|
||||||
# This is the default filename within the image directory to load updates from
|
# This is the default filename within the image directory to load updates from
|
||||||
DEFAULT_IMAGE_NAME = 'chromiumos_image.bin'
|
DEFAULT_IMAGE_NAME = 'chromiumos_image.bin'
|
||||||
|
DEFAULT_IMAGE_NAME_TEST = 'chromiumos_test_image.bin'
|
||||||
|
|
||||||
# The filenames we provide to clients to pull updates
|
# The filenames we provide to clients to pull updates
|
||||||
UPDATE_FILENAME = 'update.gz'
|
UPDATE_FILENAME = 'update.gz'
|
||||||
STATEFUL_FILENAME = 'stateful.image.gz'
|
STATEFUL_FILENAME = 'stateful.tgz'
|
||||||
|
|
||||||
# How long do we wait for the server to start before launching client
|
# How long do we wait for the server to start before launching client
|
||||||
SERVER_STARTUP_WAIT = 1
|
SERVER_STARTUP_WAIT = 1
|
||||||
@ -46,8 +47,12 @@ class Command(object):
|
|||||||
self.env = env
|
self.env = env
|
||||||
|
|
||||||
def RunPipe(self, pipeline, infile=None, outfile=None,
|
def RunPipe(self, pipeline, infile=None, outfile=None,
|
||||||
capture=False, oneline=False):
|
capture=False, oneline=False, hide_stderr=False):
|
||||||
"""Perform a command pipeline, with optional input/output filenames."""
|
"""
|
||||||
|
Perform a command pipeline, with optional input/output filenames.
|
||||||
|
|
||||||
|
hide_stderr Don't allow output of stderr (default False)
|
||||||
|
"""
|
||||||
|
|
||||||
last_pipe = None
|
last_pipe = None
|
||||||
while pipeline:
|
while pipeline:
|
||||||
@ -61,8 +66,10 @@ class Command(object):
|
|||||||
kwargs['stdout'] = subprocess.PIPE
|
kwargs['stdout'] = subprocess.PIPE
|
||||||
elif outfile:
|
elif outfile:
|
||||||
kwargs['stdout'] = open(outfile, 'wb')
|
kwargs['stdout'] = open(outfile, 'wb')
|
||||||
|
if hide_stderr:
|
||||||
|
kwargs['stderr'] = open('/dev/null', 'wb')
|
||||||
|
|
||||||
self.env.Info('Running: %s' % ' '.join(cmd))
|
self.env.Debug('Running: %s' % ' '.join(cmd))
|
||||||
last_pipe = subprocess.Popen(cmd, **kwargs)
|
last_pipe = subprocess.Popen(cmd, **kwargs)
|
||||||
|
|
||||||
if capture:
|
if capture:
|
||||||
@ -139,7 +146,11 @@ class CrosEnv(object):
|
|||||||
REBOOT_START_WAIT = 5
|
REBOOT_START_WAIT = 5
|
||||||
REBOOT_WAIT_TIME = 60
|
REBOOT_WAIT_TIME = 60
|
||||||
|
|
||||||
def __init__(self, verbose=False):
|
SILENT = 0
|
||||||
|
INFO = 1
|
||||||
|
DEBUG = 2
|
||||||
|
|
||||||
|
def __init__(self, verbose=SILENT):
|
||||||
self.cros_root = os.path.dirname(os.path.abspath(sys.argv[0]))
|
self.cros_root = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||||
parent = os.path.dirname(self.cros_root)
|
parent = os.path.dirname(self.cros_root)
|
||||||
if os.path.exists(os.path.join(parent, 'chromeos-common.sh')):
|
if os.path.exists(os.path.join(parent, 'chromeos-common.sh')):
|
||||||
@ -147,6 +158,13 @@ class CrosEnv(object):
|
|||||||
self.cmd = Command(self)
|
self.cmd = Command(self)
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
|
|
||||||
|
# do we have the pv progress tool? (sudo apt-get install pv)
|
||||||
|
self.have_pv = True
|
||||||
|
try:
|
||||||
|
self.cmd.Output('pv', '--help')
|
||||||
|
except OSError:
|
||||||
|
self.have_pv = False
|
||||||
|
|
||||||
def Error(self, msg):
|
def Error(self, msg):
|
||||||
print >> sys.stderr, 'ERROR: %s' % msg
|
print >> sys.stderr, 'ERROR: %s' % msg
|
||||||
|
|
||||||
@ -156,9 +174,13 @@ class CrosEnv(object):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def Info(self, msg):
|
def Info(self, msg):
|
||||||
if self.verbose:
|
if self.verbose >= CrosEnv.INFO:
|
||||||
print 'INFO: %s' % msg
|
print 'INFO: %s' % msg
|
||||||
|
|
||||||
|
def Debug(self, msg):
|
||||||
|
if self.verbose >= CrosEnv.DEBUG:
|
||||||
|
print 'DEBUG: %s' % msg
|
||||||
|
|
||||||
def CrosUtilsPath(self, filename):
|
def CrosUtilsPath(self, filename):
|
||||||
return os.path.join(self.cros_root, filename)
|
return os.path.join(self.cros_root, filename)
|
||||||
|
|
||||||
@ -192,23 +214,16 @@ class CrosEnv(object):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def BuildStateful(self, src, dst):
|
def BuildStateful(self, src, dst_dir, dst_file):
|
||||||
"""Create a stateful partition update image."""
|
"""Create a stateful partition update image."""
|
||||||
|
|
||||||
if self.GetCached(src, dst):
|
if self.GetCached(src, dst_file):
|
||||||
self.Info('Using cached stateful %s' % dst)
|
self.Info('Using cached stateful %s' % dst_file)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
cgpt = self.ChrootPath('/usr/bin/cgpt')
|
return self.cmd.Run(self.CrosUtilsPath(
|
||||||
offset = self.cmd.OutputOneLine(cgpt, 'show', '-b', '-i', '1', src)
|
'cros_generate_stateful_update_payload'),
|
||||||
size = self.cmd.OutputOneLine(cgpt, 'show', '-s', '-i', '1', src)
|
'--image=%s' % src, '--output=%s' % dst_dir)
|
||||||
if None in (size, offset):
|
|
||||||
self.Error('Unable to use cgpt to get image geometry')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return self.cmd.RunPipe([['dd', 'if=%s' % src, 'bs=512',
|
|
||||||
'skip=%s' % offset, 'count=%s' % size],
|
|
||||||
['gzip', '-c']], outfile=dst)
|
|
||||||
|
|
||||||
def GetSize(self, filename):
|
def GetSize(self, filename):
|
||||||
return os.path.getsize(filename)
|
return os.path.getsize(filename)
|
||||||
@ -262,10 +277,12 @@ class CrosEnv(object):
|
|||||||
UpdateHandler.SetupUrl('/update', PingUpdateResponse())
|
UpdateHandler.SetupUrl('/update', PingUpdateResponse())
|
||||||
UpdateHandler.SetupUrl('/%s' % UPDATE_FILENAME,
|
UpdateHandler.SetupUrl('/%s' % UPDATE_FILENAME,
|
||||||
FileUpdateResponse(update_file,
|
FileUpdateResponse(update_file,
|
||||||
verbose=self.verbose))
|
verbose=self.verbose,
|
||||||
|
have_pv=self.have_pv))
|
||||||
UpdateHandler.SetupUrl('/%s' % STATEFUL_FILENAME,
|
UpdateHandler.SetupUrl('/%s' % STATEFUL_FILENAME,
|
||||||
FileUpdateResponse(stateful_file,
|
FileUpdateResponse(stateful_file,
|
||||||
verbose=self.verbose))
|
verbose=self.verbose,
|
||||||
|
have_pv=self.have_pv))
|
||||||
|
|
||||||
self.http_server = BaseHTTPServer.HTTPServer(('', port), UpdateHandler)
|
self.http_server = BaseHTTPServer.HTTPServer(('', port), UpdateHandler)
|
||||||
|
|
||||||
@ -304,6 +321,7 @@ class CrosEnv(object):
|
|||||||
def StartClient(self, port):
|
def StartClient(self, port):
|
||||||
"""Ask the client machine to update from our server."""
|
"""Ask the client machine to update from our server."""
|
||||||
|
|
||||||
|
self.Info("Starting client...")
|
||||||
status = self.GetUpdateStatus()
|
status = self.GetUpdateStatus()
|
||||||
if status != 'UPDATE_STATUS_IDLE':
|
if status != 'UPDATE_STATUS_IDLE':
|
||||||
self.Error('Client update status is not IDLE: %s' % status)
|
self.Error('Client update status is not IDLE: %s' % status)
|
||||||
@ -314,6 +332,8 @@ class CrosEnv(object):
|
|||||||
fd, update_log = tempfile.mkstemp(prefix='image-to-target-')
|
fd, update_log = tempfile.mkstemp(prefix='image-to-target-')
|
||||||
self.Info('Starting update on client. Client output stored to %s' %
|
self.Info('Starting update on client. Client output stored to %s' %
|
||||||
update_log)
|
update_log)
|
||||||
|
|
||||||
|
# this will make the client read the files we have set up
|
||||||
self.ssh_cmd.Run('/usr/bin/update_engine_client', '--update',
|
self.ssh_cmd.Run('/usr/bin/update_engine_client', '--update',
|
||||||
'--omaha_url', update_url, remote_tunnel=(port, port),
|
'--omaha_url', update_url, remote_tunnel=(port, port),
|
||||||
outfile=update_log)
|
outfile=update_log)
|
||||||
@ -322,6 +342,7 @@ class CrosEnv(object):
|
|||||||
self.Error('Client update failed')
|
self.Error('Client update failed')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
self.Info('Update complete - running update script on client')
|
||||||
self.ssh_cmd.Copy(self.CrosUtilsPath('../platform/dev/stateful_update'),
|
self.ssh_cmd.Copy(self.CrosUtilsPath('../platform/dev/stateful_update'),
|
||||||
'/tmp')
|
'/tmp')
|
||||||
if not self.ssh_cmd.Run('/tmp/stateful_update', url_base,
|
if not self.ssh_cmd.Run('/tmp/stateful_update', url_base,
|
||||||
@ -334,7 +355,7 @@ class CrosEnv(object):
|
|||||||
self.Error('Client may not have successfully rebooted...')
|
self.Error('Client may not have successfully rebooted...')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
print 'Client update completed successfully!'
|
self.Info('Client update completed successfully!')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@ -342,7 +363,7 @@ class UpdateResponse(object):
|
|||||||
"""Default response is the 404 error response."""
|
"""Default response is the 404 error response."""
|
||||||
|
|
||||||
def Reply(self, handler, send_content=True, post_data=None):
|
def Reply(self, handler, send_content=True, post_data=None):
|
||||||
handler.send_Error(404, 'File not found')
|
handler.send_error(404, 'File not found')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -350,11 +371,12 @@ class FileUpdateResponse(UpdateResponse):
|
|||||||
"""Respond by sending the contents of a file."""
|
"""Respond by sending the contents of a file."""
|
||||||
|
|
||||||
def __init__(self, filename, content_type='application/octet-stream',
|
def __init__(self, filename, content_type='application/octet-stream',
|
||||||
verbose=False, blocksize=16*1024):
|
verbose=False, blocksize=16*1024, have_pv=False):
|
||||||
self.filename = filename
|
self.filename = filename
|
||||||
self.content_type = content_type
|
self.content_type = content_type
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.blocksize = blocksize
|
self.blocksize = blocksize
|
||||||
|
self.have_pv = have_pv
|
||||||
|
|
||||||
def Reply(self, handler, send_content=True, post_data=None):
|
def Reply(self, handler, send_content=True, post_data=None):
|
||||||
"""Return file contents to the client. Optionally display progress."""
|
"""Return file contents to the client. Optionally display progress."""
|
||||||
@ -373,14 +395,11 @@ class FileUpdateResponse(UpdateResponse):
|
|||||||
handler.date_time_string(filestat.st_mtime))
|
handler.date_time_string(filestat.st_mtime))
|
||||||
handler.end_headers()
|
handler.end_headers()
|
||||||
|
|
||||||
if not send_content:
|
if send_content:
|
||||||
return
|
|
||||||
|
|
||||||
if filesize <= self.blocksize:
|
|
||||||
handler.wfile.write(f.read())
|
|
||||||
else:
|
|
||||||
sent_size = 0
|
sent_size = 0
|
||||||
sent_percentage = None
|
sent_percentage = None
|
||||||
|
|
||||||
|
#TODO(sjg): this should use pv also
|
||||||
while True:
|
while True:
|
||||||
buf = f.read(self.blocksize)
|
buf = f.read(self.blocksize)
|
||||||
if not buf:
|
if not buf:
|
||||||
@ -556,7 +575,6 @@ def main(argv):
|
|||||||
parser.add_option('--from', dest='src', default=None,
|
parser.add_option('--from', dest='src', default=None,
|
||||||
help='Source image to install')
|
help='Source image to install')
|
||||||
parser.add_option('--image-name', dest='image_name',
|
parser.add_option('--image-name', dest='image_name',
|
||||||
default=DEFAULT_IMAGE_NAME,
|
|
||||||
help='Filename within image directory to load')
|
help='Filename within image directory to load')
|
||||||
parser.add_option('--port', dest='port', default=8081, type='int',
|
parser.add_option('--port', dest='port', default=8081, type='int',
|
||||||
help='TCP port to serve from and tunnel through')
|
help='TCP port to serve from and tunnel through')
|
||||||
@ -565,11 +583,23 @@ def main(argv):
|
|||||||
parser.add_option('--server-only', dest='server_only', default=False,
|
parser.add_option('--server-only', dest='server_only', default=False,
|
||||||
action='store_true', help='Do not start client')
|
action='store_true', help='Do not start client')
|
||||||
parser.add_option('--verbose', dest='verbose', default=False,
|
parser.add_option('--verbose', dest='verbose', default=False,
|
||||||
|
action='store_true', help='Display progress')
|
||||||
|
parser.add_option('--debug', dest='debug', default=False,
|
||||||
action='store_true', help='Display running commands')
|
action='store_true', help='Display running commands')
|
||||||
|
parser.add_option('--test', dest='test', default=False,
|
||||||
|
action='store_true', help='Select test image')
|
||||||
|
|
||||||
(options, args) = parser.parse_args(argv)
|
(options, args) = parser.parse_args(argv)
|
||||||
|
|
||||||
cros_env = CrosEnv(verbose=options.verbose)
|
# we can build the test image if it doesn't exist, so remember if we want to
|
||||||
|
build_test_image = False
|
||||||
|
|
||||||
|
verbosity = CrosEnv.SILENT
|
||||||
|
if options.verbose:
|
||||||
|
verbosity = CrosEnv.INFO
|
||||||
|
if options.debug:
|
||||||
|
verbosity = CrosEnv.DEBUG
|
||||||
|
cros_env = CrosEnv(verbose=verbosity)
|
||||||
|
|
||||||
if not options.board:
|
if not options.board:
|
||||||
options.board = cros_env.GetDefaultBoard()
|
options.board = cros_env.GetDefaultBoard()
|
||||||
@ -584,17 +614,47 @@ def main(argv):
|
|||||||
if not os.path.exists(options.src):
|
if not os.path.exists(options.src):
|
||||||
parser.error('Path %s does not exist' % options.src)
|
parser.error('Path %s does not exist' % options.src)
|
||||||
|
|
||||||
|
if not options.image_name:
|
||||||
|
# auto-select the correct image
|
||||||
|
if options.test:
|
||||||
|
options.image_name = DEFAULT_IMAGE_NAME_TEST
|
||||||
|
|
||||||
|
# we will build the test image if not found
|
||||||
|
build_test_image = True
|
||||||
|
else:
|
||||||
|
options.image_name = DEFAULT_IMAGE_NAME
|
||||||
|
|
||||||
if os.path.isdir(options.src):
|
if os.path.isdir(options.src):
|
||||||
image_directory = options.src
|
image_directory = options.src
|
||||||
image_file = os.path.join(options.src, options.image_name)
|
image_file = os.path.join(options.src, options.image_name)
|
||||||
|
|
||||||
if not os.path.exists(image_file):
|
if not os.path.exists(image_file):
|
||||||
|
if build_test_image:
|
||||||
|
# we want a test image but it doesn't exist
|
||||||
|
# try to build it if we can
|
||||||
|
cros_env.Info('Creating test image')
|
||||||
|
test_output = cros_env.cmd.Output(
|
||||||
|
cros_env.CrosUtilsPath('enter_chroot.sh'),
|
||||||
|
'--', './mod_image_for_test.sh',
|
||||||
|
'--board=%s' % options.board, '-y')
|
||||||
|
if not os.path.exists(image_file):
|
||||||
|
print test_output
|
||||||
|
cros_env.Fatal('Failed to create test image - please run '
|
||||||
|
'./mod_image_for_test.sh manually inside the chroot')
|
||||||
parser.error('Image file %s does not exist' % image_file)
|
parser.error('Image file %s does not exist' % image_file)
|
||||||
else:
|
else:
|
||||||
image_file = options.src
|
image_file = options.src
|
||||||
image_directory = os.path.dirname(options.src)
|
image_directory = os.path.dirname(options.src)
|
||||||
|
|
||||||
|
update_file = os.path.join(image_directory, UPDATE_FILENAME)
|
||||||
|
stateful_file = os.path.join(image_directory, STATEFUL_FILENAME)
|
||||||
|
|
||||||
|
cros_env.Debug("Image file %s" % image_file)
|
||||||
|
cros_env.Debug("Update file %s" % update_file)
|
||||||
|
cros_env.Debug("Stateful file %s" % stateful_file)
|
||||||
|
|
||||||
if options.remote:
|
if options.remote:
|
||||||
|
cros_env.Info('Contacting client %s' % options.remote)
|
||||||
cros_env.SetRemote(options.remote)
|
cros_env.SetRemote(options.remote)
|
||||||
rel = cros_env.GetRemoteRelease()
|
rel = cros_env.GetRemoteRelease()
|
||||||
if not rel:
|
if not rel:
|
||||||
@ -610,11 +670,8 @@ def main(argv):
|
|||||||
parser.error('Either --server-only must be specified or '
|
parser.error('Either --server-only must be specified or '
|
||||||
'--remote=<client> needs to be given')
|
'--remote=<client> needs to be given')
|
||||||
|
|
||||||
update_file = os.path.join(image_directory, UPDATE_FILENAME)
|
|
||||||
stateful_file = os.path.join(image_directory, STATEFUL_FILENAME)
|
|
||||||
|
|
||||||
if (not cros_env.GenerateUpdatePayload(image_file, update_file) or
|
if (not cros_env.GenerateUpdatePayload(image_file, update_file) or
|
||||||
not cros_env.BuildStateful(image_file, stateful_file)):
|
not cros_env.BuildStateful(image_file, image_directory, stateful_file)):
|
||||||
cros_env.Fatal()
|
cros_env.Fatal()
|
||||||
|
|
||||||
cros_env.CreateServer(options.port, update_file, stateful_file)
|
cros_env.CreateServer(options.port, update_file, stateful_file)
|
||||||
|
@ -250,6 +250,24 @@ make_image_bootable() {
|
|||||||
-s "${FLAGS_statefulfs_mountpoint}"
|
-s "${FLAGS_statefulfs_mountpoint}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
verify_image_rootfs() {
|
||||||
|
local image=$1
|
||||||
|
local rootfs_offset="$(partoffset ${image} 3)"
|
||||||
|
local rootfs_size="$(partsize ${image} 3)"
|
||||||
|
|
||||||
|
local rootfs_tmp_file=$(mktemp)
|
||||||
|
trap "rm ${rootfs_tmp_file}" EXIT
|
||||||
|
sudo dd if="${image}" of="${rootfs_tmp_file}" bs=512 skip="${rootfs_offset}"
|
||||||
|
|
||||||
|
# This flips the read-only compatibility flag, so that
|
||||||
|
# e2fsck does not complain about unknown file system capabilities.
|
||||||
|
enable_rw_mount "${rootfs_tmp_file}"
|
||||||
|
info "Running e2fsck to check root file system for errors"
|
||||||
|
sudo e2fsck -fn "${rootfs_tmp_file}" ||
|
||||||
|
die "Root file system has errors, please ensure boot.desc and/or \
|
||||||
|
command line parameters are correct"
|
||||||
|
}
|
||||||
|
|
||||||
# Use default of current image location if the output dir doesn't exist.
|
# Use default of current image location if the output dir doesn't exist.
|
||||||
if [ ! -d ${FLAGS_output_dir} ]; then
|
if [ ! -d ${FLAGS_output_dir} ]; then
|
||||||
warn "Output dir not found, using ${IMAGE_DIR}."
|
warn "Output dir not found, using ${IMAGE_DIR}."
|
||||||
@ -265,7 +283,8 @@ mkdir -p ${FLAGS_rootfs_mountpoint}
|
|||||||
mkdir -p ${FLAGS_statefulfs_mountpoint}
|
mkdir -p ${FLAGS_statefulfs_mountpoint}
|
||||||
mkdir -p ${FLAGS_espfs_mountpoint}
|
mkdir -p ${FLAGS_espfs_mountpoint}
|
||||||
|
|
||||||
make_image_bootable ${IMAGE}
|
make_image_bootable "${IMAGE}"
|
||||||
|
verify_image_rootfs "${IMAGE}"
|
||||||
|
|
||||||
if [ ${FLAGS_cleanup_dirs} -eq ${FLAGS_TRUE} ]; then
|
if [ ${FLAGS_cleanup_dirs} -eq ${FLAGS_TRUE} ]; then
|
||||||
rmdir ${FLAGS_rootfs_mountpoint}
|
rmdir ${FLAGS_rootfs_mountpoint}
|
||||||
|
1
bin/cros_mark_chrome_as_stable
Symbolic link
1
bin/cros_mark_chrome_as_stable
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
cros_mark_chrome_as_stable.py
|
332
bin/cros_mark_chrome_as_stable.py
Executable file
332
bin/cros_mark_chrome_as_stable.py
Executable file
@ -0,0 +1,332 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
"""This module uprevs Chrome for cbuildbot.
|
||||||
|
|
||||||
|
After calling, it prints outs CHROME_VERSION_ATOM=(version atom string). A
|
||||||
|
caller could then use this atom with emerge to build the newly uprevved version
|
||||||
|
of Chrome e.g.
|
||||||
|
|
||||||
|
./cros_mark_chrome_as_stable tot
|
||||||
|
Returns chrome-base/chromeos-chrome-8.0.552.0_alpha_r1
|
||||||
|
|
||||||
|
emerge-x86-generic =chrome-base/chromeos-chrome-8.0.552.0_alpha_r1
|
||||||
|
"""
|
||||||
|
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||||
|
import cros_mark_as_stable
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
|
||||||
|
from cros_build_lib import RunCommand, Info, Warning
|
||||||
|
|
||||||
|
BASE_CHROME_SVN_URL = 'http://src.chromium.org/svn'
|
||||||
|
|
||||||
|
# Command for which chrome ebuild to uprev.
|
||||||
|
TIP_OF_TRUNK, LATEST_RELEASE, STICKY = 'tot', 'latest_release', 'sticky_release'
|
||||||
|
CHROME_REV = [TIP_OF_TRUNK, LATEST_RELEASE, STICKY]
|
||||||
|
|
||||||
|
# Helper regex's for finding ebuilds.
|
||||||
|
_CHROME_VERSION_REGEX = '\d+\.\d+\.\d+\.\d+'
|
||||||
|
_NON_STICKY_REGEX = '%s[(_rc.*)|(_alpha.*)]+' % _CHROME_VERSION_REGEX
|
||||||
|
|
||||||
|
# Dir where all the action happens.
|
||||||
|
_CHROME_OVERLAY_DIR = ('%(srcroot)s/third_party/chromiumos-overlay'
|
||||||
|
'/chromeos-base/chromeos-chrome')
|
||||||
|
|
||||||
|
_GIT_COMMIT_MESSAGE = ('Marking %(chrome_rev)s for chrome ebuild with version '
|
||||||
|
'%(chrome_version)s as stable.')
|
||||||
|
|
||||||
|
|
||||||
|
def _GetSvnUrl():
|
||||||
|
"""Returns the path to the svn url for the given chrome branch."""
|
||||||
|
return os.path.join(BASE_CHROME_SVN_URL, 'trunk')
|
||||||
|
|
||||||
|
|
||||||
|
def _GetTipOfTrunkSvnRevision():
|
||||||
|
"""Returns the current svn revision for the chrome tree."""
|
||||||
|
svn_url = _GetSvnUrl()
|
||||||
|
svn_info = RunCommand(['svn', 'info', svn_url], redirect_stdout=True)
|
||||||
|
|
||||||
|
revision_re = re.compile('^Revision:\s+(\d+).*')
|
||||||
|
for line in svn_info.splitlines():
|
||||||
|
match = revision_re.search(line)
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
|
||||||
|
raise Exception('Could not find revision information from %s' % svn_url)
|
||||||
|
|
||||||
|
|
||||||
|
def _GetTipOfTrunkVersion():
|
||||||
|
"""Returns the current Chrome version."""
|
||||||
|
svn_url = _GetSvnUrl()
|
||||||
|
chrome_version_file = urllib.urlopen(os.path.join(svn_url, 'src', 'chrome',
|
||||||
|
'VERSION'))
|
||||||
|
chrome_version_info = chrome_version_file.read()
|
||||||
|
chrome_version_file.close()
|
||||||
|
|
||||||
|
# Sanity check.
|
||||||
|
if '404 Not Found' in chrome_version_info:
|
||||||
|
raise Exception('Url %s does not have version file.' % svn_url)
|
||||||
|
|
||||||
|
chrome_version_array = []
|
||||||
|
|
||||||
|
for line in chrome_version_info.splitlines():
|
||||||
|
chrome_version_array.append(line.rpartition('=')[2])
|
||||||
|
|
||||||
|
return '.'.join(chrome_version_array)
|
||||||
|
|
||||||
|
|
||||||
|
def _GetLatestRelease(branch=None):
|
||||||
|
"""Gets the latest release version from the buildspec_url for the branch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
branch: If set, gets the latest release for branch, otherwise latest
|
||||||
|
release.
|
||||||
|
Returns:
|
||||||
|
Latest version string.
|
||||||
|
"""
|
||||||
|
buildspec_url = 'http://src.chromium.org/svn/releases'
|
||||||
|
svn_ls = RunCommand(['svn', 'ls', buildspec_url], redirect_stdout=True)
|
||||||
|
sorted_ls = RunCommand(['sort', '--version-sort'], input=svn_ls,
|
||||||
|
redirect_stdout=True)
|
||||||
|
if branch:
|
||||||
|
chrome_version_re = re.compile('^%s\.\d+.*' % branch)
|
||||||
|
else:
|
||||||
|
chrome_version_re = re.compile('^[0-9]\..*')
|
||||||
|
for chrome_version in sorted_ls.splitlines():
|
||||||
|
if chrome_version_re.match(chrome_version):
|
||||||
|
current_version = chrome_version
|
||||||
|
|
||||||
|
return current_version.rstrip('/')
|
||||||
|
|
||||||
|
|
||||||
|
def _GetStickyVersion(stable_ebuilds):
|
||||||
|
"""Discovers the sticky version from the current stable_ebuilds."""
|
||||||
|
sticky_ebuilds = []
|
||||||
|
non_sticky_re = re.compile(_NON_STICKY_REGEX)
|
||||||
|
for ebuild in stable_ebuilds:
|
||||||
|
if not non_sticky_re.match(ebuild.version):
|
||||||
|
sticky_ebuilds.append(ebuild)
|
||||||
|
|
||||||
|
if not sticky_ebuilds:
|
||||||
|
raise Exception('No sticky ebuilds found')
|
||||||
|
elif len(sticky_ebuilds) > 1:
|
||||||
|
Warning('More than one sticky ebuild found')
|
||||||
|
|
||||||
|
return cros_mark_as_stable.BestEBuild(sticky_ebuilds).chrome_version
|
||||||
|
|
||||||
|
|
||||||
|
class ChromeEBuild(cros_mark_as_stable.EBuild):
|
||||||
|
"""Thin sub-class of EBuild that adds a chrome_version field."""
|
||||||
|
chrome_version_re = re.compile('.*chromeos-chrome-(%s|9999).*' % (
|
||||||
|
_CHROME_VERSION_REGEX))
|
||||||
|
chrome_version = ''
|
||||||
|
|
||||||
|
def __init__(self, path):
|
||||||
|
cros_mark_as_stable.EBuild.__init__(self, path)
|
||||||
|
re_match = self.chrome_version_re.match(self.ebuild_path_no_revision)
|
||||||
|
if re_match:
|
||||||
|
self.chrome_version = re_match.group(1)
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
"""Use ebuild paths for comparison."""
|
||||||
|
if self.ebuild_path == other.ebuild_path:
|
||||||
|
return 0
|
||||||
|
elif self.ebuild_path > other.ebuild_path:
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
return (-1)
|
||||||
|
|
||||||
|
|
||||||
|
def FindChromeCandidates(overlay_dir):
|
||||||
|
"""Return a tuple of chrome's unstable ebuild and stable ebuilds.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
overlay_dir: The path to chrome's portage overlay dir.
|
||||||
|
Returns:
|
||||||
|
Tuple [unstable_ebuild, stable_ebuilds].
|
||||||
|
Raises:
|
||||||
|
Exception: if no unstable ebuild exists for Chrome.
|
||||||
|
"""
|
||||||
|
stable_ebuilds = []
|
||||||
|
unstable_ebuilds = []
|
||||||
|
for path in [
|
||||||
|
os.path.join(overlay_dir, entry) for entry in os.listdir(overlay_dir)]:
|
||||||
|
if path.endswith('.ebuild'):
|
||||||
|
ebuild = ChromeEBuild(path)
|
||||||
|
if not ebuild.chrome_version:
|
||||||
|
Warning('Poorly formatted ebuild found at %s' % path)
|
||||||
|
else:
|
||||||
|
if not ebuild.is_stable:
|
||||||
|
unstable_ebuilds.append(ebuild)
|
||||||
|
else:
|
||||||
|
stable_ebuilds.append(ebuild)
|
||||||
|
|
||||||
|
# Apply some sanity checks.
|
||||||
|
if not unstable_ebuilds:
|
||||||
|
raise Exception('Missing 9999 ebuild for %s' % overlay_dir)
|
||||||
|
if not stable_ebuilds:
|
||||||
|
Warning('Missing stable ebuild for %s' % overlay_dir)
|
||||||
|
|
||||||
|
return cros_mark_as_stable.BestEBuild(unstable_ebuilds), stable_ebuilds
|
||||||
|
|
||||||
|
|
||||||
|
def FindChromeUprevCandidate(stable_ebuilds, chrome_rev, sticky_branch):
|
||||||
|
"""Finds the Chrome uprev candidate for the given chrome_rev.
|
||||||
|
|
||||||
|
Using the pre-flight logic, this means the stable ebuild you are uprevving
|
||||||
|
from. The difference here is that the version could be different and in
|
||||||
|
that case we want to find it to delete it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stable_ebuilds: A list of stable ebuilds.
|
||||||
|
chrome_rev: The chrome_rev designating which candidate to find.
|
||||||
|
sticky_branch: The the branch that is currently sticky with Major/Minor
|
||||||
|
components. For example: 9.0.553
|
||||||
|
Returns:
|
||||||
|
Returns the EBuild, otherwise None if none found.
|
||||||
|
"""
|
||||||
|
candidates = []
|
||||||
|
if chrome_rev == TIP_OF_TRUNK:
|
||||||
|
chrome_branch_re = re.compile('%s.*_alpha.*' % _CHROME_VERSION_REGEX)
|
||||||
|
for ebuild in stable_ebuilds:
|
||||||
|
if chrome_branch_re.search(ebuild.version):
|
||||||
|
candidates.append(ebuild)
|
||||||
|
|
||||||
|
elif chrome_rev == STICKY:
|
||||||
|
chrome_branch_re = re.compile('%s\.\d+.*_rc.*' % sticky_branch)
|
||||||
|
for ebuild in stable_ebuilds:
|
||||||
|
if chrome_branch_re.search(ebuild.version):
|
||||||
|
candidates.append(ebuild)
|
||||||
|
|
||||||
|
else:
|
||||||
|
chrome_branch_re = re.compile('%s.*_rc.*' % _CHROME_VERSION_REGEX)
|
||||||
|
for ebuild in stable_ebuilds:
|
||||||
|
if chrome_branch_re.search(ebuild.version) and (
|
||||||
|
not ebuild.chrome_version.startswith(sticky_branch)):
|
||||||
|
candidates.append(ebuild)
|
||||||
|
|
||||||
|
if candidates:
|
||||||
|
return cros_mark_as_stable.BestEBuild(candidates)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def MarkChromeEBuildAsStable(stable_candidate, unstable_ebuild, chrome_rev,
|
||||||
|
chrome_version, commit, overlay_dir):
|
||||||
|
"""Uprevs the chrome ebuild specified by chrome_rev.
|
||||||
|
|
||||||
|
This is the main function that uprevs the chrome_rev from a stable candidate
|
||||||
|
to its new version.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stable_candidate: ebuild that corresponds to the stable ebuild we are
|
||||||
|
revving from. If None, builds the a new ebuild given the version
|
||||||
|
and logic for chrome_rev type with revision set to 1.
|
||||||
|
unstable_ebuild: ebuild corresponding to the unstable ebuild for chrome.
|
||||||
|
chrome_rev: one of CHROME_REV
|
||||||
|
TIP_OF_TRUNK - Requires commit value. Revs the ebuild for the TOT
|
||||||
|
version and uses the portage suffix of _alpha.
|
||||||
|
LATEST_RELEASE - This uses the portage suffix of _rc as they are release
|
||||||
|
candidates for the next sticky version.
|
||||||
|
STICKY - Revs the sticky version.
|
||||||
|
chrome_version: The \d.\d.\d.\d version of Chrome.
|
||||||
|
commit: Used with TIP_OF_TRUNK. The svn revision of chrome.
|
||||||
|
overlay_dir: Path to the chromeos-chrome package dir.
|
||||||
|
Returns:
|
||||||
|
Full portage version atom (including rc's, etc) that was revved.
|
||||||
|
"""
|
||||||
|
base_path = os.path.join(overlay_dir, 'chromeos-chrome-%s' % chrome_version)
|
||||||
|
# Case where we have the last stable candidate with same version just rev.
|
||||||
|
if stable_candidate and stable_candidate.chrome_version == chrome_version:
|
||||||
|
new_ebuild_path = '%s-r%d.ebuild' % (
|
||||||
|
stable_candidate.ebuild_path_no_revision,
|
||||||
|
stable_candidate.current_revision + 1)
|
||||||
|
else:
|
||||||
|
if chrome_rev == TIP_OF_TRUNK:
|
||||||
|
portage_suffix = '_alpha'
|
||||||
|
else:
|
||||||
|
portage_suffix = '_rc'
|
||||||
|
|
||||||
|
new_ebuild_path = base_path + ('%s-r1.ebuild' % portage_suffix)
|
||||||
|
|
||||||
|
cros_mark_as_stable.EBuildStableMarker.MarkAsStable(
|
||||||
|
unstable_ebuild.ebuild_path, new_ebuild_path, 'CROS_SVN_COMMIT', commit)
|
||||||
|
RunCommand(['git', 'add', new_ebuild_path])
|
||||||
|
if stable_candidate:
|
||||||
|
RunCommand(['git', 'rm', stable_candidate.ebuild_path])
|
||||||
|
|
||||||
|
cros_mark_as_stable.EBuildStableMarker.CommitChange(
|
||||||
|
_GIT_COMMIT_MESSAGE % {'chrome_rev': chrome_rev,
|
||||||
|
'chrome_version': chrome_version})
|
||||||
|
|
||||||
|
new_ebuild = ChromeEBuild(new_ebuild_path)
|
||||||
|
return '%s-%s' % (new_ebuild.package, new_ebuild.version)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
usage = '%s OPTIONS [%s]' % (__file__, '|'.join(CHROME_REV))
|
||||||
|
parser = optparse.OptionParser(usage)
|
||||||
|
parser.add_option('-s', '--srcroot', default=os.path.join(os.environ['HOME'],
|
||||||
|
'trunk', 'src'),
|
||||||
|
help='Path to the src directory')
|
||||||
|
parser.add_option('-t', '--tracking_branch', default='cros/master',
|
||||||
|
help='Branch we are tracking changes against')
|
||||||
|
(options, args) = parser.parse_args()
|
||||||
|
|
||||||
|
if len(args) != 1 or args[0] not in CHROME_REV:
|
||||||
|
parser.error('Commit requires arg set to one of %s.' % CHROME_REV)
|
||||||
|
|
||||||
|
overlay_dir = os.path.abspath(_CHROME_OVERLAY_DIR %
|
||||||
|
{'srcroot': options.srcroot})
|
||||||
|
chrome_rev = args[0]
|
||||||
|
version_to_uprev = None
|
||||||
|
commit_to_use = None
|
||||||
|
|
||||||
|
(unstable_ebuild, stable_ebuilds) = FindChromeCandidates(overlay_dir)
|
||||||
|
sticky_version = _GetStickyVersion(stable_ebuilds)
|
||||||
|
sticky_branch = sticky_version.rpartition('.')[0]
|
||||||
|
|
||||||
|
|
||||||
|
if chrome_rev == TIP_OF_TRUNK:
|
||||||
|
version_to_uprev = _GetTipOfTrunkVersion()
|
||||||
|
commit_to_use = _GetTipOfTrunkSvnRevision()
|
||||||
|
elif chrome_rev == LATEST_RELEASE:
|
||||||
|
version_to_uprev = _GetLatestRelease()
|
||||||
|
else:
|
||||||
|
version_to_uprev = _GetLatestRelease(sticky_branch)
|
||||||
|
|
||||||
|
stable_candidate = FindChromeUprevCandidate(stable_ebuilds, chrome_rev,
|
||||||
|
sticky_branch)
|
||||||
|
# There are some cases we don't need to do anything. Check for them.
|
||||||
|
if stable_candidate and (version_to_uprev == stable_candidate.chrome_version
|
||||||
|
and not commit_to_use):
|
||||||
|
Info('Found nothing to do for chrome_rev %s with version %s.' % (
|
||||||
|
chrome_rev, version_to_uprev))
|
||||||
|
else:
|
||||||
|
os.chdir(overlay_dir)
|
||||||
|
work_branch = cros_mark_as_stable.GitBranch(
|
||||||
|
cros_mark_as_stable.STABLE_BRANCH_NAME, options.tracking_branch)
|
||||||
|
work_branch.CreateBranch()
|
||||||
|
try:
|
||||||
|
chrome_version_atom = MarkChromeEBuildAsStable(
|
||||||
|
stable_candidate, unstable_ebuild, chrome_rev, version_to_uprev,
|
||||||
|
commit_to_use, overlay_dir)
|
||||||
|
# Explicit print to communicate to caller.
|
||||||
|
print 'CHROME_VERSION_ATOM=%s' % chrome_version_atom
|
||||||
|
except:
|
||||||
|
work_branch.Delete()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
269
bin/cros_mark_chrome_as_stable_unittest.py
Executable file
269
bin/cros_mark_chrome_as_stable_unittest.py
Executable file
@ -0,0 +1,269 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
"""Unit tests for cros_mark_chrome_as_stable.py."""
|
||||||
|
|
||||||
|
import cros_mark_chrome_as_stable
|
||||||
|
import mox
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||||
|
import cros_mark_as_stable
|
||||||
|
|
||||||
|
unstable_data = 'KEYWORDS=~x86 ~arm'
|
||||||
|
stable_data = 'KEYWORDS=x86 arm'
|
||||||
|
|
||||||
|
def _TouchAndWrite(path, data=None):
|
||||||
|
"""Writes data (if it exists) to the file specified by the path."""
|
||||||
|
fh = open(path, 'w')
|
||||||
|
if data:
|
||||||
|
fh.write(data)
|
||||||
|
|
||||||
|
fh.close()
|
||||||
|
|
||||||
|
|
||||||
|
class CrosMarkChromeAsStable(mox.MoxTestBase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""Setup vars and create mock dir."""
|
||||||
|
mox.MoxTestBase.setUp(self)
|
||||||
|
self.tmp_overlay = tempfile.mkdtemp(prefix='chromiumos-overlay')
|
||||||
|
self.mock_chrome_dir = os.path.join(self.tmp_overlay, 'chromeos-base',
|
||||||
|
'chromeos-chrome')
|
||||||
|
os.makedirs(self.mock_chrome_dir)
|
||||||
|
|
||||||
|
self.unstable = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-9999.ebuild')
|
||||||
|
self.sticky_branch = '8.0.224'
|
||||||
|
self.sticky_version = '%s.503' % self.sticky_branch
|
||||||
|
self.sticky = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s.ebuild' %
|
||||||
|
self.sticky_version)
|
||||||
|
self.sticky_rc_version = '%s.504' % self.sticky_branch
|
||||||
|
self.sticky_rc = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s_rc-r1.ebuild' %
|
||||||
|
self.sticky_rc_version)
|
||||||
|
self.latest_stable_version = '8.0.300.1'
|
||||||
|
self.latest_stable = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s_rc-r2.ebuild' %
|
||||||
|
self.latest_stable_version)
|
||||||
|
self.tot_stable_version = '9.0.305.0'
|
||||||
|
self.tot_stable = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s_alpha-r1.ebuild' %
|
||||||
|
self.tot_stable_version)
|
||||||
|
|
||||||
|
self.sticky_new_rc_version = '%s.520' % self.sticky_branch
|
||||||
|
self.sticky_new_rc = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s_rc-r1.ebuild' %
|
||||||
|
self.sticky_new_rc_version)
|
||||||
|
self.latest_new_version = '9.0.305.1'
|
||||||
|
self.latest_new = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s_rc-r1.ebuild' %
|
||||||
|
self.latest_new_version)
|
||||||
|
self.tot_new_version = '9.0.306.0'
|
||||||
|
self.tot_new = os.path.join(self.mock_chrome_dir,
|
||||||
|
'chromeos-chrome-%s_alpha-r1.ebuild' %
|
||||||
|
self.tot_new_version)
|
||||||
|
|
||||||
|
_TouchAndWrite(self.unstable, unstable_data)
|
||||||
|
_TouchAndWrite(self.sticky, stable_data)
|
||||||
|
_TouchAndWrite(self.sticky_rc, stable_data)
|
||||||
|
_TouchAndWrite(self.latest_stable, stable_data)
|
||||||
|
_TouchAndWrite(self.tot_stable, stable_data)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
"""Cleans up mock dir."""
|
||||||
|
shutil.rmtree(self.tmp_overlay)
|
||||||
|
|
||||||
|
def testFindChromeCandidates(self):
|
||||||
|
"""Test creation of stable ebuilds from mock dir."""
|
||||||
|
unstable, stable_ebuilds = cros_mark_chrome_as_stable.FindChromeCandidates(
|
||||||
|
self.mock_chrome_dir)
|
||||||
|
|
||||||
|
self.assertEqual(unstable.ebuild_path, self.unstable)
|
||||||
|
self.assertEqual(len(stable_ebuilds), 4)
|
||||||
|
self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.sticky) in
|
||||||
|
stable_ebuilds)
|
||||||
|
self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.sticky_rc) in
|
||||||
|
stable_ebuilds)
|
||||||
|
self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.latest_stable)
|
||||||
|
in stable_ebuilds)
|
||||||
|
self.assertTrue(cros_mark_chrome_as_stable.ChromeEBuild(self.tot_stable) in
|
||||||
|
stable_ebuilds)
|
||||||
|
|
||||||
|
def _GetStableEBuilds(self):
|
||||||
|
"""Common helper to create a list of stable ebuilds."""
|
||||||
|
return [
|
||||||
|
cros_mark_chrome_as_stable.ChromeEBuild(self.sticky),
|
||||||
|
cros_mark_chrome_as_stable.ChromeEBuild(self.sticky_rc),
|
||||||
|
cros_mark_chrome_as_stable.ChromeEBuild(self.latest_stable),
|
||||||
|
cros_mark_chrome_as_stable.ChromeEBuild(self.tot_stable),
|
||||||
|
]
|
||||||
|
|
||||||
|
def testTOTFindChromeUprevCandidate(self):
|
||||||
|
"""Tests if we can find tot uprev candidate from our mock dir data."""
|
||||||
|
stable_ebuilds = self._GetStableEBuilds()
|
||||||
|
|
||||||
|
candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate(
|
||||||
|
stable_ebuilds, cros_mark_chrome_as_stable.TIP_OF_TRUNK,
|
||||||
|
self.sticky_branch)
|
||||||
|
|
||||||
|
self.assertEqual(candidate.ebuild_path, self.tot_stable)
|
||||||
|
|
||||||
|
def testLatestFindChromeUprevCandidate(self):
|
||||||
|
"""Tests if we can find latest uprev candidate from our mock dir data."""
|
||||||
|
stable_ebuilds = self._GetStableEBuilds()
|
||||||
|
|
||||||
|
candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate(
|
||||||
|
stable_ebuilds, cros_mark_chrome_as_stable.LATEST_RELEASE,
|
||||||
|
self.sticky_branch)
|
||||||
|
|
||||||
|
self.assertEqual(candidate.ebuild_path, self.latest_stable)
|
||||||
|
|
||||||
|
def testStickyFindChromeUprevCandidate(self):
|
||||||
|
"""Tests if we can find sticky uprev candidate from our mock dir data."""
|
||||||
|
stable_ebuilds = self._GetStableEBuilds()
|
||||||
|
|
||||||
|
candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate(
|
||||||
|
stable_ebuilds, cros_mark_chrome_as_stable.STICKY,
|
||||||
|
self.sticky_branch)
|
||||||
|
|
||||||
|
self.assertEqual(candidate.ebuild_path, self.sticky_rc)
|
||||||
|
|
||||||
|
def testGetTipOfTrunkSvnRevision(self):
|
||||||
|
"""Tests if we can get the latest svn revision from TOT."""
|
||||||
|
self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand')
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(
|
||||||
|
['svn', 'info', cros_mark_chrome_as_stable._GetSvnUrl()],
|
||||||
|
redirect_stdout=True).AndReturn(
|
||||||
|
'Some Junk 2134\nRevision: 12345\nOtherInfo: test_data')
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
revision = cros_mark_chrome_as_stable._GetTipOfTrunkSvnRevision()
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEquals(revision, '12345')
|
||||||
|
|
||||||
|
def testGetTipOfTrunkVersion(self):
|
||||||
|
"""Tests if we get the latest version from TOT."""
|
||||||
|
self.mox.StubOutWithMock(urllib, 'urlopen')
|
||||||
|
mock_file = self.mox.CreateMock(file)
|
||||||
|
urllib.urlopen(os.path.join(cros_mark_chrome_as_stable._GetSvnUrl(), 'src',
|
||||||
|
'chrome', 'VERSION')).AndReturn(mock_file)
|
||||||
|
mock_file.read().AndReturn('A=8\nB=0\nC=256\nD=0')
|
||||||
|
mock_file.close()
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
version = cros_mark_chrome_as_stable._GetTipOfTrunkVersion()
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEquals(version, '8.0.256.0')
|
||||||
|
|
||||||
|
def testGetLatestRelease(self):
|
||||||
|
"""Tests if we can find the latest release from our mock url data."""
|
||||||
|
test_data = '\n'.join(['7.0.224.1/',
|
||||||
|
'7.0.224.2/',
|
||||||
|
'8.0.365.5/',
|
||||||
|
'LATEST.txt'])
|
||||||
|
self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand')
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(
|
||||||
|
['svn', 'ls', 'http://src.chromium.org/svn/releases'],
|
||||||
|
redirect_stdout=True).AndReturn('some_data')
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(
|
||||||
|
['sort', '--version-sort'], input='some_data',
|
||||||
|
redirect_stdout=True).AndReturn(test_data)
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
release = cros_mark_chrome_as_stable._GetLatestRelease()
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEqual('8.0.365.5', release)
|
||||||
|
|
||||||
|
def testGetLatestStickyRelease(self):
|
||||||
|
"""Tests if we can find the latest sticky release from our mock url data."""
|
||||||
|
test_data = '\n'.join(['7.0.222.1/',
|
||||||
|
'8.0.224.2/',
|
||||||
|
'8.0.365.5/',
|
||||||
|
'LATEST.txt'])
|
||||||
|
self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand')
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(
|
||||||
|
['svn', 'ls', 'http://src.chromium.org/svn/releases'],
|
||||||
|
redirect_stdout=True).AndReturn('some_data')
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(
|
||||||
|
['sort', '--version-sort'], input='some_data',
|
||||||
|
redirect_stdout=True).AndReturn(test_data)
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
release = cros_mark_chrome_as_stable._GetLatestRelease(self.sticky_branch)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEqual('8.0.224.2', release)
|
||||||
|
|
||||||
|
def testStickyVersion(self):
|
||||||
|
"""Tests if we can find the sticky version from our mock directories."""
|
||||||
|
stable_ebuilds = self._GetStableEBuilds()
|
||||||
|
sticky_version = cros_mark_chrome_as_stable._GetStickyVersion(
|
||||||
|
stable_ebuilds)
|
||||||
|
self.assertEqual(sticky_version, self.sticky_version)
|
||||||
|
|
||||||
|
def testChromeEBuildInit(self):
|
||||||
|
"""Tests if the chrome_version is set correctly in a ChromeEBuild."""
|
||||||
|
ebuild = cros_mark_chrome_as_stable.ChromeEBuild(self.sticky)
|
||||||
|
self.assertEqual(ebuild.chrome_version, self.sticky_version)
|
||||||
|
|
||||||
|
def _CommonMarkAsStableTest(self, chrome_rev, new_version, old_ebuild_path,
|
||||||
|
new_ebuild_path, commit_string_indicator):
|
||||||
|
"""Common function used for test functions for MarkChromeEBuildAsStable.
|
||||||
|
|
||||||
|
This function stubs out others calls, and runs MarkChromeEBuildAsStable
|
||||||
|
with the specified args.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chrome_rev: standard chrome_rev argument
|
||||||
|
new_version: version we are revving up to
|
||||||
|
old_ebuild_path: path to the stable ebuild
|
||||||
|
new_ebuild_path: path to the to be created path
|
||||||
|
commit_string_indicator: a string that the commit message must contain
|
||||||
|
"""
|
||||||
|
self.mox.StubOutWithMock(cros_mark_chrome_as_stable, 'RunCommand')
|
||||||
|
self.mox.StubOutWithMock(cros_mark_as_stable.EBuildStableMarker,
|
||||||
|
'CommitChange')
|
||||||
|
stable_candidate = cros_mark_chrome_as_stable.ChromeEBuild(old_ebuild_path)
|
||||||
|
unstable_ebuild = cros_mark_chrome_as_stable.ChromeEBuild(self.unstable)
|
||||||
|
chrome_version = new_version
|
||||||
|
commit = None
|
||||||
|
overlay_dir = self.mock_chrome_dir
|
||||||
|
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(['git', 'add', new_ebuild_path])
|
||||||
|
cros_mark_chrome_as_stable.RunCommand(['git', 'rm', old_ebuild_path])
|
||||||
|
cros_mark_as_stable.EBuildStableMarker.CommitChange(
|
||||||
|
mox.StrContains(commit_string_indicator))
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
cros_mark_chrome_as_stable.MarkChromeEBuildAsStable(
|
||||||
|
stable_candidate, unstable_ebuild, chrome_rev, chrome_version, commit,
|
||||||
|
overlay_dir)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
def testStickyMarkAsStable(self):
|
||||||
|
"""Tests to see if we can mark chrome as stable for a new sticky release."""
|
||||||
|
self._CommonMarkAsStableTest(cros_mark_chrome_as_stable.STICKY,
|
||||||
|
self.sticky_new_rc_version, self.sticky_rc,
|
||||||
|
self.sticky_new_rc, 'sticky_release')
|
||||||
|
|
||||||
|
def testLatestMarkAsStable(self):
|
||||||
|
"""Tests to see if we can mark chrome for a latest release."""
|
||||||
|
self._CommonMarkAsStableTest(cros_mark_chrome_as_stable.LATEST_RELEASE,
|
||||||
|
self.latest_new_version, self.latest_stable,
|
||||||
|
self.latest_new, 'latest_release')
|
||||||
|
|
||||||
|
def testTotMarkAsStable(self):
|
||||||
|
"""Tests to see if we can mark chrome for tot."""
|
||||||
|
self._CommonMarkAsStableTest(cros_mark_chrome_as_stable.TIP_OF_TRUNK,
|
||||||
|
self.tot_new_version, self.tot_stable,
|
||||||
|
self.tot_new, 'tot')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
@ -26,9 +26,9 @@ def main():
|
|||||||
if options.buildroot:
|
if options.buildroot:
|
||||||
if options.clobber:
|
if options.clobber:
|
||||||
cbuildbot._FullCheckout(options.buildroot, options.tracking_branch,
|
cbuildbot._FullCheckout(options.buildroot, options.tracking_branch,
|
||||||
rw_checkout=False, retries=_NUMBER_OF_RETRIES)
|
retries=_NUMBER_OF_RETRIES)
|
||||||
else:
|
else:
|
||||||
cbuildbot._IncrementalCheckout(options.buildroot, rw_checkout=False,
|
cbuildbot._IncrementalCheckout(options.buildroot,
|
||||||
retries=_NUMBER_OF_RETRIES)
|
retries=_NUMBER_OF_RETRIES)
|
||||||
else:
|
else:
|
||||||
print >> sys.stderr, 'ERROR: Must set buildroot'
|
print >> sys.stderr, 'ERROR: Must set buildroot'
|
||||||
|
93
bin/cros_workon_make
Executable file
93
bin/cros_workon_make
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
#
|
||||||
|
# Simple wrapper script to build a cros_workon package incrementally.
|
||||||
|
# You must already be cros_workon'ing the package in question.
|
||||||
|
|
||||||
|
. "$(dirname $0)/../common.sh"
|
||||||
|
|
||||||
|
# Script must be run inside the chroot.
|
||||||
|
assert_inside_chroot
|
||||||
|
|
||||||
|
get_default_board
|
||||||
|
|
||||||
|
DEFINE_string board "${DEFAULT_BOARD}" \
|
||||||
|
"Board for which to build the package."
|
||||||
|
DEFINE_boolean test "${FLAGS_FALSE}" \
|
||||||
|
"Compile and run tests as well."
|
||||||
|
DEFINE_boolean reconf "${FLAGS_FALSE}" \
|
||||||
|
"Re-run configure and prepare steps."
|
||||||
|
DEFINE_boolean install "${FLAGS_FALSE}" \
|
||||||
|
"Incrementally build and install your package."
|
||||||
|
DEFINE_boolean scrub "${FLAGS_FALSE}" \
|
||||||
|
"Blow away all in-tree files not managed by git."
|
||||||
|
|
||||||
|
set -e
|
||||||
|
# Parse command line.
|
||||||
|
FLAGS "$@" || exit 1
|
||||||
|
eval set -- "${FLAGS_ARGV}"
|
||||||
|
|
||||||
|
if [ $# -lt 1 ]; then
|
||||||
|
echo "Usage: ${0} [OPTIONS] <package (read: ebuild) basename>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${FLAGS_board}" ]; then
|
||||||
|
BOARD_DIR=/build/"${FLAGS_board}"
|
||||||
|
EBUILDCMD=ebuild-"${FLAGS_board}"
|
||||||
|
EMERGECMD=emerge-"${FLAGS_board}"
|
||||||
|
EQUERYCMD=equery-"${FLAGS_board}"
|
||||||
|
BOARD_STR="${FLAGS_board}"
|
||||||
|
BOARD_KEYWORD="$(portageq-${FLAGS_board} envvar ARCH)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
unstable_suffix="9999"
|
||||||
|
workon_name="${1}-${unstable_suffix}"
|
||||||
|
pkgfile=
|
||||||
|
workpath=
|
||||||
|
|
||||||
|
if ! pkgfile=$("${EQUERYCMD}" which "${workon_name}" 2> /dev/null); then
|
||||||
|
if ACCEPT_KEYWORDS="~${BOARD_KEYWORD}" "${EQUERYCMD}" which "${workon_name}" \
|
||||||
|
> /dev/null 2>&1; then
|
||||||
|
die "run './cros_workon --board ${BOARD_STR} start ${1}' first!" 1>&2
|
||||||
|
fi
|
||||||
|
die "error looking up package $1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${FLAGS_scrub}" = "${FLAGS_TRUE}" ]; then
|
||||||
|
eval $(${EBUILDCMD} $(${EQUERYCMD} which ${workon_name}) info)
|
||||||
|
srcdir=$(readlink -m ${CROS_WORKON_SRCDIR})
|
||||||
|
trunkdir=$(readlink -m ${CHROOT_TRUNK_DIR})
|
||||||
|
project_path=${srcdir#${trunkdir}/}
|
||||||
|
if ! (cd "${GCLIENT_ROOT}/${project_path}" && git clean -xf); then
|
||||||
|
die "Could not scrub source directory"
|
||||||
|
fi
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
workpath=$(\
|
||||||
|
echo "${pkgfile}" | \
|
||||||
|
awk -F '/' '{ print $(NF-2) "/" $(NF-1) }')-"${unstable_suffix}"
|
||||||
|
|
||||||
|
emerge_features=
|
||||||
|
to_do="compile"
|
||||||
|
if [ "${FLAGS_test}" = "${FLAGS_TRUE}" ]; then
|
||||||
|
to_do="test"
|
||||||
|
emerge_features="test"
|
||||||
|
fi
|
||||||
|
if [ "${FLAGS_install}" = "${FLAGS_TRUE}" ]; then
|
||||||
|
FEATURES="${emerge_features}" "${EMERGECMD}" "${1}"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
|
||||||
|
clean=
|
||||||
|
if [ "${FLAGS_reconf}" = "${FLAGS_TRUE}" ]; then
|
||||||
|
clean="clean"
|
||||||
|
else
|
||||||
|
rm -f "/build/${BOARD_STR}/tmp/portage/${workpath}/.compiled"
|
||||||
|
fi
|
||||||
|
SANDBOX_WRITE=~/trunk CROS_WORKON_INPLACE=1 \
|
||||||
|
"${EBUILDCMD}" "${pkgfile}" ${clean} "${to_do}"
|
@ -21,9 +21,10 @@ DEFINE_boolean keep_logs "$FLAGS_FALSE" "keep autotest results" k
|
|||||||
RUN_TEST="$SCRIPT_DIR/run_remote_tests.sh"
|
RUN_TEST="$SCRIPT_DIR/run_remote_tests.sh"
|
||||||
TEST=server/site_tests/platform_BootPerfServer/control
|
TEST=server/site_tests/platform_BootPerfServer/control
|
||||||
TMP_RESULTS="/tmp/bootperf.$(date '+%Y%j%H%M').$$"
|
TMP_RESULTS="/tmp/bootperf.$(date '+%Y%j%H%M').$$"
|
||||||
RESULTS_KEYVAL=platform_BootPerfServer/platform_BootPerfServer/results/keyval
|
RESULTS_DIR=platform_BootPerfServer/platform_BootPerfServer/results
|
||||||
|
RESULTS_KEYVAL=$RESULTS_DIR/keyval
|
||||||
RESULTS_SUMMARY_FILES=(
|
RESULTS_SUMMARY_FILES=(
|
||||||
$RESULTS_KEYVAL
|
$RESULTS_DIR
|
||||||
platform_BootPerfServer/keyval
|
platform_BootPerfServer/keyval
|
||||||
platform_BootPerfServer/platform_BootPerfServer/keyval
|
platform_BootPerfServer/platform_BootPerfServer/keyval
|
||||||
platform_BootPerfServer/platform_BootPerfServer/platform_BootPerf/keyval
|
platform_BootPerfServer/platform_BootPerfServer/platform_BootPerf/keyval
|
||||||
@ -135,13 +136,19 @@ run_boot_test() {
|
|||||||
|
|
||||||
mkdir $iter_rundir
|
mkdir $iter_rundir
|
||||||
echo "run $iter start at $(date)"
|
echo "run $iter start at $(date)"
|
||||||
$RUN_TEST --results_dir_root="$TMP_RESULTS" \
|
|
||||||
|
# BEWARE: The --use_emerged option means that you must manually
|
||||||
|
# emerge chromeos-base/autotest-tests if a) you are working on
|
||||||
|
# the package, and b) you also want use this script to test your
|
||||||
|
# changes to the package. (The option is here because IMO the
|
||||||
|
# alternative is a bigger nuisance.)
|
||||||
|
$RUN_TEST --use_emerged --results_dir_root="$TMP_RESULTS" \
|
||||||
--remote="$remote" $TEST >$logfile 2>&1
|
--remote="$remote" $TEST >$logfile 2>&1
|
||||||
if [ ! -e "$TMP_RESULTS/$RESULTS_KEYVAL" ]; then
|
if [ ! -e "$TMP_RESULTS/$RESULTS_KEYVAL" ]; then
|
||||||
error "No results file; terminating test runs."
|
error "No results file; terminating test runs."
|
||||||
error "Check $logfile for output from the test run,"
|
error "Check $(pwd)/$logfile for output from the test run,"
|
||||||
error "and see $TMP_RESULTS for full test logs and output."
|
error "and see $TMP_RESULTS for full test logs and output."
|
||||||
break
|
return
|
||||||
fi
|
fi
|
||||||
mkdir $summary_dir
|
mkdir $summary_dir
|
||||||
tar cf - -C $TMP_RESULTS "${RESULTS_SUMMARY_FILES[@]}" |
|
tar cf - -C $TMP_RESULTS "${RESULTS_SUMMARY_FILES[@]}" |
|
||||||
|
@ -180,7 +180,7 @@ fi
|
|||||||
# We don't allow building from source with the image as a target,
|
# We don't allow building from source with the image as a target,
|
||||||
# and it's not possible to store prebuilts for the same package
|
# and it's not possible to store prebuilts for the same package
|
||||||
# with different use flags.
|
# with different use flags.
|
||||||
USE="${EXTRA_USE}" emerge-${FLAGS_board} \
|
USE="${EXTRA_USE} ${USE}" emerge-${FLAGS_board} \
|
||||||
-uNDvg --binpkg-respect-use=y virtual/kernel
|
-uNDvg --binpkg-respect-use=y virtual/kernel
|
||||||
|
|
||||||
# Use canonical path since some tools (e.g. mount) do not like symlinks.
|
# Use canonical path since some tools (e.g. mount) do not like symlinks.
|
||||||
|
@ -26,7 +26,7 @@ export CHROMEOS_VERSION_MINOR=9
|
|||||||
# Increment by 2 in trunk after making a release branch.
|
# Increment by 2 in trunk after making a release branch.
|
||||||
# Does not reset on a major/minor change (always increases).
|
# Does not reset on a major/minor change (always increases).
|
||||||
# (Trunk is always odd; branches are always even).
|
# (Trunk is always odd; branches are always even).
|
||||||
export CHROMEOS_VERSION_BRANCH=115
|
export CHROMEOS_VERSION_BRANCH=129
|
||||||
|
|
||||||
# Patch number.
|
# Patch number.
|
||||||
# Increment by 1 each release on a branch.
|
# Increment by 1 each release on a branch.
|
||||||
|
@ -26,7 +26,6 @@ STATE_LOOP_DEV=""
|
|||||||
# Pass an arg to not exit 1 at the end
|
# Pass an arg to not exit 1 at the end
|
||||||
cleanup() {
|
cleanup() {
|
||||||
set +e
|
set +e
|
||||||
echo "Cleaning up"
|
|
||||||
if [ -n "$SRC_MNT" ]; then
|
if [ -n "$SRC_MNT" ]; then
|
||||||
sudo umount -d "$SRC_MNT"
|
sudo umount -d "$SRC_MNT"
|
||||||
[ -d "$SRC_MNT" ] && rmdir "$SRC_MNT"
|
[ -d "$SRC_MNT" ] && rmdir "$SRC_MNT"
|
||||||
@ -74,13 +73,16 @@ extract_partition_to_temp_file() {
|
|||||||
else
|
else
|
||||||
warn "partition offset or length not at 2MiB boundary"
|
warn "partition offset or length not at 2MiB boundary"
|
||||||
fi
|
fi
|
||||||
dd if="$filename" of="$temp_file" bs=$bs count="$length" skip="$offset"
|
dd if="$filename" of="$temp_file" bs=$bs count="$length" \
|
||||||
|
skip="$offset" 2>/dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
patch_kernel() {
|
patch_kernel() {
|
||||||
local IMAGE="$1"
|
local IMAGE="$1"
|
||||||
local KERN_FILE="$2"
|
local KERN_FILE="$2"
|
||||||
|
|
||||||
|
echo "Patching kernel" $KERN_FILE
|
||||||
|
echo " into" $IMAGE
|
||||||
STATE_LOOP_DEV=$(sudo losetup -f)
|
STATE_LOOP_DEV=$(sudo losetup -f)
|
||||||
[ -n "$STATE_LOOP_DEV" ] || die "no free loop device"
|
[ -n "$STATE_LOOP_DEV" ] || die "no free loop device"
|
||||||
local offset=$(partoffset "${IMAGE}" 1)
|
local offset=$(partoffset "${IMAGE}" 1)
|
||||||
@ -88,7 +90,7 @@ patch_kernel() {
|
|||||||
sudo losetup -o "$offset" "$STATE_LOOP_DEV" "$IMAGE"
|
sudo losetup -o "$offset" "$STATE_LOOP_DEV" "$IMAGE"
|
||||||
STATE_MNT=$(mktemp -d /tmp/state.XXXXXX)
|
STATE_MNT=$(mktemp -d /tmp/state.XXXXXX)
|
||||||
sudo mount --read-only "$STATE_LOOP_DEV" "$STATE_MNT"
|
sudo mount --read-only "$STATE_LOOP_DEV" "$STATE_MNT"
|
||||||
dd if="$STATE_MNT"/vmlinuz_hd.vblock of="$KERN_FILE" conv=notrunc
|
dd if="$STATE_MNT"/vmlinuz_hd.vblock of="$KERN_FILE" conv=notrunc 2>/dev/null
|
||||||
sudo umount "$STATE_MNT"
|
sudo umount "$STATE_MNT"
|
||||||
STATE_MNT=""
|
STATE_MNT=""
|
||||||
sudo losetup -d "$STATE_LOOP_DEV"
|
sudo losetup -d "$STATE_LOOP_DEV"
|
||||||
@ -163,11 +165,6 @@ DELTA=$FLAGS_TRUE
|
|||||||
|
|
||||||
if [ -z "$FLAGS_src_image" ]; then
|
if [ -z "$FLAGS_src_image" ]; then
|
||||||
DELTA=$FLAGS_FALSE
|
DELTA=$FLAGS_FALSE
|
||||||
if [ "$FLAGS_old_style" = "$FLAGS_TRUE" ]; then
|
|
||||||
echo "Generating an old-style full update"
|
|
||||||
else
|
|
||||||
echo "Generating a new-style full update"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DELTA" -eq "$FLAGS_TRUE" -o "$FLAGS_old_style" -eq "$FLAGS_FALSE" ]; then
|
if [ "$DELTA" -eq "$FLAGS_TRUE" -o "$FLAGS_old_style" -eq "$FLAGS_FALSE" ]; then
|
||||||
@ -227,7 +224,7 @@ if [ "$DELTA" -eq "$FLAGS_TRUE" -o "$FLAGS_old_style" -eq "$FLAGS_FALSE" ]; then
|
|||||||
echo "Done generating new style full update."
|
echo "Done generating new style full update."
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "Generating full update"
|
echo "Generating old-style full update"
|
||||||
|
|
||||||
trap cleanup INT TERM EXIT
|
trap cleanup INT TERM EXIT
|
||||||
DST_KERNEL=$(extract_partition_to_temp_file "$FLAGS_image" 2)
|
DST_KERNEL=$(extract_partition_to_temp_file "$FLAGS_image" 2)
|
||||||
|
@ -18,36 +18,36 @@ import sys
|
|||||||
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
|
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
|
||||||
from cros_build_lib import Info, RunCommand, Warning, Die
|
from cros_build_lib import Info, RunCommand, Warning, Die
|
||||||
|
|
||||||
|
gflags.DEFINE_boolean('all', False,
|
||||||
|
'Mark all packages as stable.')
|
||||||
gflags.DEFINE_string('board', '',
|
gflags.DEFINE_string('board', '',
|
||||||
'Board for which the package belongs.', short_name='b')
|
'Board for which the package belongs.', short_name='b')
|
||||||
|
gflags.DEFINE_string('drop_file', None,
|
||||||
|
'File to list packages that were revved.')
|
||||||
|
gflags.DEFINE_boolean('dryrun', False,
|
||||||
|
'Passes dry-run to git push if pushing a change.')
|
||||||
gflags.DEFINE_string('overlays', '',
|
gflags.DEFINE_string('overlays', '',
|
||||||
'Colon-separated list of overlays to modify.',
|
'Colon-separated list of overlays to modify.',
|
||||||
short_name='o')
|
short_name='o')
|
||||||
gflags.DEFINE_string('packages', '',
|
gflags.DEFINE_string('packages', '',
|
||||||
'Colon-separated list of packages to mark as stable.',
|
'Colon-separated list of packages to mark as stable.',
|
||||||
short_name='p')
|
short_name='p')
|
||||||
gflags.DEFINE_string('push_options', '',
|
|
||||||
'Options to use with git-cl push using push command.')
|
|
||||||
gflags.DEFINE_string('srcroot', '%s/trunk/src' % os.environ['HOME'],
|
gflags.DEFINE_string('srcroot', '%s/trunk/src' % os.environ['HOME'],
|
||||||
'Path to root src directory.',
|
'Path to root src directory.',
|
||||||
short_name='r')
|
short_name='r')
|
||||||
gflags.DEFINE_string('tracking_branch', 'cros/master',
|
gflags.DEFINE_string('tracking_branch', 'cros/master',
|
||||||
'Used with commit to specify branch to track against.',
|
'Used with commit to specify branch to track against.',
|
||||||
short_name='t')
|
short_name='t')
|
||||||
gflags.DEFINE_boolean('all', False,
|
|
||||||
'Mark all packages as stable.')
|
|
||||||
gflags.DEFINE_boolean('verbose', False,
|
gflags.DEFINE_boolean('verbose', False,
|
||||||
'Prints out verbose information about what is going on.',
|
'Prints out verbose information about what is going on.',
|
||||||
short_name='v')
|
short_name='v')
|
||||||
|
|
||||||
|
|
||||||
# Takes two strings, package_name and commit_id.
|
# Takes two strings, package_name and commit_id.
|
||||||
_GIT_COMMIT_MESSAGE = \
|
_GIT_COMMIT_MESSAGE = 'Marking 9999 ebuild for %s with commit %s as stable.'
|
||||||
'Marking 9999 ebuild for %s with commit %s as stable.'
|
|
||||||
|
|
||||||
# Dictionary of valid commands with usage information.
|
# Dictionary of valid commands with usage information.
|
||||||
_COMMAND_DICTIONARY = {
|
COMMAND_DICTIONARY = {
|
||||||
'clean':
|
'clean':
|
||||||
'Cleans up previous calls to either commit or push',
|
'Cleans up previous calls to either commit or push',
|
||||||
'commit':
|
'commit':
|
||||||
@ -57,7 +57,17 @@ _COMMAND_DICTIONARY = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Name used for stabilizing branch.
|
# Name used for stabilizing branch.
|
||||||
_STABLE_BRANCH_NAME = 'stabilizing_branch'
|
STABLE_BRANCH_NAME = 'stabilizing_branch'
|
||||||
|
|
||||||
|
|
||||||
|
def BestEBuild(ebuilds):
|
||||||
|
"""Returns the newest EBuild from a list of EBuild objects."""
|
||||||
|
from portage.versions import vercmp
|
||||||
|
winner = ebuilds[0]
|
||||||
|
for ebuild in ebuilds[1:]:
|
||||||
|
if vercmp(winner.version, ebuild.version) < 0:
|
||||||
|
winner = ebuild
|
||||||
|
return winner
|
||||||
|
|
||||||
# ======================= Global Helper Functions ========================
|
# ======================= Global Helper Functions ========================
|
||||||
|
|
||||||
@ -83,16 +93,6 @@ def _CleanStalePackages(board, package_array):
|
|||||||
RunCommand(['sudo', 'eclean', '-d', 'packages'], redirect_stderr=True)
|
RunCommand(['sudo', 'eclean', '-d', 'packages'], redirect_stderr=True)
|
||||||
|
|
||||||
|
|
||||||
def _BestEBuild(ebuilds):
|
|
||||||
"""Returns the newest EBuild from a list of EBuild objects."""
|
|
||||||
from portage.versions import vercmp
|
|
||||||
winner = ebuilds[0]
|
|
||||||
for ebuild in ebuilds[1:]:
|
|
||||||
if vercmp(winner.version, ebuild.version) < 0:
|
|
||||||
winner = ebuild
|
|
||||||
return winner
|
|
||||||
|
|
||||||
|
|
||||||
def _FindUprevCandidates(files):
|
def _FindUprevCandidates(files):
|
||||||
"""Return a list of uprev candidates from specified list of files.
|
"""Return a list of uprev candidates from specified list of files.
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ def _FindUprevCandidates(files):
|
|||||||
unstable_ebuilds = []
|
unstable_ebuilds = []
|
||||||
for path in files:
|
for path in files:
|
||||||
if path.endswith('.ebuild') and not os.path.islink(path):
|
if path.endswith('.ebuild') and not os.path.islink(path):
|
||||||
ebuild = _EBuild(path)
|
ebuild = EBuild(path)
|
||||||
if ebuild.is_workon:
|
if ebuild.is_workon:
|
||||||
workon_dir = True
|
workon_dir = True
|
||||||
if ebuild.is_stable:
|
if ebuild.is_stable:
|
||||||
@ -121,7 +121,7 @@ def _FindUprevCandidates(files):
|
|||||||
if len(unstable_ebuilds) > 1:
|
if len(unstable_ebuilds) > 1:
|
||||||
Die('Found multiple unstable ebuilds in %s' % os.path.dirname(path))
|
Die('Found multiple unstable ebuilds in %s' % os.path.dirname(path))
|
||||||
if len(stable_ebuilds) > 1:
|
if len(stable_ebuilds) > 1:
|
||||||
stable_ebuilds = [_BestEBuild(stable_ebuilds)]
|
stable_ebuilds = [BestEBuild(stable_ebuilds)]
|
||||||
|
|
||||||
# Print a warning if multiple stable ebuilds are found in the same
|
# Print a warning if multiple stable ebuilds are found in the same
|
||||||
# directory. Storing multiple stable ebuilds is error-prone because
|
# directory. Storing multiple stable ebuilds is error-prone because
|
||||||
@ -166,15 +166,15 @@ def _BuildEBuildDictionary(overlays, all, packages):
|
|||||||
overlays[overlay].append(ebuild)
|
overlays[overlay].append(ebuild)
|
||||||
|
|
||||||
|
|
||||||
def _CheckOnStabilizingBranch():
|
def _CheckOnStabilizingBranch(stable_branch):
|
||||||
"""Returns true if the git branch is on the stabilizing branch."""
|
"""Returns true if the git branch is on the stabilizing branch."""
|
||||||
current_branch = _SimpleRunCommand('git branch | grep \*').split()[1]
|
current_branch = _SimpleRunCommand('git branch | grep \*').split()[1]
|
||||||
return current_branch == _STABLE_BRANCH_NAME
|
return current_branch == stable_branch
|
||||||
|
|
||||||
|
|
||||||
def _CheckSaneArguments(package_list, command):
|
def _CheckSaneArguments(package_list, command):
|
||||||
"""Checks to make sure the flags are sane. Dies if arguments are not sane."""
|
"""Checks to make sure the flags are sane. Dies if arguments are not sane."""
|
||||||
if not command in _COMMAND_DICTIONARY.keys():
|
if not command in COMMAND_DICTIONARY.keys():
|
||||||
_PrintUsageAndDie('%s is not a valid command' % command)
|
_PrintUsageAndDie('%s is not a valid command' % command)
|
||||||
if not gflags.FLAGS.packages and command == 'commit' and not gflags.FLAGS.all:
|
if not gflags.FLAGS.packages and command == 'commit' and not gflags.FLAGS.all:
|
||||||
_PrintUsageAndDie('Please specify at least one package')
|
_PrintUsageAndDie('Please specify at least one package')
|
||||||
@ -185,19 +185,13 @@ def _CheckSaneArguments(package_list, command):
|
|||||||
gflags.FLAGS.srcroot = os.path.abspath(gflags.FLAGS.srcroot)
|
gflags.FLAGS.srcroot = os.path.abspath(gflags.FLAGS.srcroot)
|
||||||
|
|
||||||
|
|
||||||
def _Clean():
|
|
||||||
"""Cleans up uncommitted changes on either stabilizing branch or master."""
|
|
||||||
_SimpleRunCommand('git reset HEAD --hard')
|
|
||||||
_SimpleRunCommand('git checkout %s' % gflags.FLAGS.tracking_branch)
|
|
||||||
|
|
||||||
|
|
||||||
def _PrintUsageAndDie(error_message=''):
|
def _PrintUsageAndDie(error_message=''):
|
||||||
"""Prints optional error_message the usage and returns an error exit code."""
|
"""Prints optional error_message the usage and returns an error exit code."""
|
||||||
command_usage = 'Commands: \n'
|
command_usage = 'Commands: \n'
|
||||||
# Add keys and usage information from dictionary.
|
# Add keys and usage information from dictionary.
|
||||||
commands = sorted(_COMMAND_DICTIONARY.keys())
|
commands = sorted(COMMAND_DICTIONARY.keys())
|
||||||
for command in commands:
|
for command in commands:
|
||||||
command_usage += ' %s: %s\n' % (command, _COMMAND_DICTIONARY[command])
|
command_usage += ' %s: %s\n' % (command, COMMAND_DICTIONARY[command])
|
||||||
commands_str = '|'.join(commands)
|
commands_str = '|'.join(commands)
|
||||||
Warning('Usage: %s FLAGS [%s]\n\n%s\nFlags:%s' % (sys.argv[0], commands_str,
|
Warning('Usage: %s FLAGS [%s]\n\n%s\nFlags:%s' % (sys.argv[0], commands_str,
|
||||||
command_usage, gflags.FLAGS))
|
command_usage, gflags.FLAGS))
|
||||||
@ -206,40 +200,6 @@ def _PrintUsageAndDie(error_message=''):
|
|||||||
else:
|
else:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def _PushChange():
|
|
||||||
"""Pushes changes to the git repository.
|
|
||||||
|
|
||||||
Pushes locals commits from calls to CommitChange to the remote git
|
|
||||||
repository specified by os.pwd.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
OSError: Error occurred while pushing.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO(sosa) - Add logic for buildbot to check whether other slaves have
|
|
||||||
# completed and push this change only if they have.
|
|
||||||
|
|
||||||
# Sanity check to make sure we're on a stabilizing branch before pushing.
|
|
||||||
if not _CheckOnStabilizingBranch():
|
|
||||||
Info('Not on branch %s so no work found to push. Exiting' % \
|
|
||||||
_STABLE_BRANCH_NAME)
|
|
||||||
return
|
|
||||||
|
|
||||||
description = _SimpleRunCommand('git log --format=format:%s%n%n%b ' +
|
|
||||||
gflags.FLAGS.tracking_branch + '..')
|
|
||||||
description = 'Marking set of ebuilds as stable\n\n%s' % description
|
|
||||||
merge_branch_name = 'merge_branch'
|
|
||||||
_SimpleRunCommand('git remote update')
|
|
||||||
merge_branch = _GitBranch(merge_branch_name)
|
|
||||||
merge_branch.CreateBranch()
|
|
||||||
if not merge_branch.Exists():
|
|
||||||
Die('Unable to create merge branch.')
|
|
||||||
_SimpleRunCommand('git merge --squash %s' % _STABLE_BRANCH_NAME)
|
|
||||||
_SimpleRunCommand('git commit -m "%s"' % description)
|
|
||||||
# Ugh. There has got to be an easier way to push to a tracking branch
|
|
||||||
_SimpleRunCommand('git config push.default tracking')
|
|
||||||
_SimpleRunCommand('git push')
|
|
||||||
|
|
||||||
|
|
||||||
def _SimpleRunCommand(command):
|
def _SimpleRunCommand(command):
|
||||||
"""Runs a shell command and returns stdout back to caller."""
|
"""Runs a shell command and returns stdout back to caller."""
|
||||||
@ -248,19 +208,79 @@ def _SimpleRunCommand(command):
|
|||||||
stdout = proc_handle.communicate()[0]
|
stdout = proc_handle.communicate()[0]
|
||||||
retcode = proc_handle.wait()
|
retcode = proc_handle.wait()
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
raise subprocess.CalledProcessError(retcode, command, output=stdout)
|
_Print(stdout)
|
||||||
|
raise subprocess.CalledProcessError(retcode, command)
|
||||||
return stdout
|
return stdout
|
||||||
|
|
||||||
|
|
||||||
# ======================= End Global Helper Functions ========================
|
# ======================= End Global Helper Functions ========================
|
||||||
|
|
||||||
|
|
||||||
class _GitBranch(object):
|
def Clean(tracking_branch):
|
||||||
|
"""Cleans up uncommitted changes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tracking_branch: The tracking branch we want to return to after the call.
|
||||||
|
"""
|
||||||
|
_SimpleRunCommand('git reset HEAD --hard')
|
||||||
|
_SimpleRunCommand('git checkout %s' % tracking_branch)
|
||||||
|
|
||||||
|
|
||||||
|
def PushChange(stable_branch, tracking_branch):
|
||||||
|
"""Pushes commits in the stable_branch to the remote git repository.
|
||||||
|
|
||||||
|
Pushes locals commits from calls to CommitChange to the remote git
|
||||||
|
repository specified by current working directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stable_branch: The local branch with commits we want to push.
|
||||||
|
tracking_branch: The tracking branch of the local branch.
|
||||||
|
Raises:
|
||||||
|
OSError: Error occurred while pushing.
|
||||||
|
"""
|
||||||
|
num_retries = 5
|
||||||
|
|
||||||
|
# Sanity check to make sure we're on a stabilizing branch before pushing.
|
||||||
|
if not _CheckOnStabilizingBranch(stable_branch):
|
||||||
|
Info('Not on branch %s so no work found to push. Exiting' % stable_branch)
|
||||||
|
return
|
||||||
|
|
||||||
|
description = _SimpleRunCommand('git log --format=format:%s%n%n%b ' +
|
||||||
|
tracking_branch + '..')
|
||||||
|
description = 'Marking set of ebuilds as stable\n\n%s' % description
|
||||||
|
Info('Using description %s' % description)
|
||||||
|
merge_branch_name = 'merge_branch'
|
||||||
|
for push_try in range(num_retries + 1):
|
||||||
|
try:
|
||||||
|
_SimpleRunCommand('git remote update')
|
||||||
|
merge_branch = GitBranch(merge_branch_name, tracking_branch)
|
||||||
|
merge_branch.CreateBranch()
|
||||||
|
if not merge_branch.Exists():
|
||||||
|
Die('Unable to create merge branch.')
|
||||||
|
_SimpleRunCommand('git merge --squash %s' % stable_branch)
|
||||||
|
_SimpleRunCommand('git commit -m "%s"' % description)
|
||||||
|
_SimpleRunCommand('git config push.default tracking')
|
||||||
|
if gflags.FLAGS.dryrun:
|
||||||
|
_SimpleRunCommand('git push --dry-run')
|
||||||
|
else:
|
||||||
|
_SimpleRunCommand('git push')
|
||||||
|
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
if push_try < num_retries:
|
||||||
|
Warning('Failed to push change, performing retry (%s/%s)' % (
|
||||||
|
push_try + 1, num_retries))
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class GitBranch(object):
|
||||||
"""Wrapper class for a git branch."""
|
"""Wrapper class for a git branch."""
|
||||||
|
|
||||||
def __init__(self, branch_name):
|
def __init__(self, branch_name, tracking_branch):
|
||||||
"""Sets up variables but does not create the branch."""
|
"""Sets up variables but does not create the branch."""
|
||||||
self.branch_name = branch_name
|
self.branch_name = branch_name
|
||||||
|
self.tracking_branch = tracking_branch
|
||||||
|
|
||||||
def CreateBranch(self):
|
def CreateBranch(self):
|
||||||
"""Creates a new git branch or replaces an existing one."""
|
"""Creates a new git branch or replaces an existing one."""
|
||||||
@ -271,7 +291,7 @@ class _GitBranch(object):
|
|||||||
def _Checkout(self, target, create=True):
|
def _Checkout(self, target, create=True):
|
||||||
"""Function used internally to create and move between branches."""
|
"""Function used internally to create and move between branches."""
|
||||||
if create:
|
if create:
|
||||||
git_cmd = 'git checkout -b %s %s' % (target, gflags.FLAGS.tracking_branch)
|
git_cmd = 'git checkout -b %s %s' % (target, self.tracking_branch)
|
||||||
else:
|
else:
|
||||||
git_cmd = 'git checkout %s' % target
|
git_cmd = 'git checkout %s' % target
|
||||||
_SimpleRunCommand(git_cmd)
|
_SimpleRunCommand(git_cmd)
|
||||||
@ -287,30 +307,30 @@ class _GitBranch(object):
|
|||||||
|
|
||||||
Returns True on success.
|
Returns True on success.
|
||||||
"""
|
"""
|
||||||
self._Checkout(gflags.FLAGS.tracking_branch, create=False)
|
self._Checkout(self.tracking_branch, create=False)
|
||||||
delete_cmd = 'git branch -D %s' % self.branch_name
|
delete_cmd = 'git branch -D %s' % self.branch_name
|
||||||
_SimpleRunCommand(delete_cmd)
|
_SimpleRunCommand(delete_cmd)
|
||||||
|
|
||||||
|
|
||||||
class _EBuild(object):
|
class EBuild(object):
|
||||||
"""Wrapper class for an ebuild."""
|
"""Wrapper class for information about an ebuild."""
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
"""Initializes all data about an ebuild.
|
"""Sets up data about an ebuild from its path."""
|
||||||
|
|
||||||
Uses equery to find the ebuild path and sets data about an ebuild for
|
|
||||||
easy reference.
|
|
||||||
"""
|
|
||||||
from portage.versions import pkgsplit
|
from portage.versions import pkgsplit
|
||||||
self.ebuild_path = path
|
unused_path, self.category, self.pkgname, filename = path.rsplit('/', 3)
|
||||||
(self.ebuild_path_no_revision,
|
unused_pkgname, version_no_rev, rev = pkgsplit(
|
||||||
self.ebuild_path_no_version,
|
filename.replace('.ebuild', ''))
|
||||||
self.current_revision) = self._ParseEBuildPath(self.ebuild_path)
|
|
||||||
_, self.category, pkgpath, filename = path.rsplit('/', 3)
|
self.ebuild_path_no_version = os.path.join(
|
||||||
filename_no_suffix = os.path.join(filename.replace('.ebuild', ''))
|
os.path.dirname(path), self.pkgname)
|
||||||
self.pkgname, version_no_rev, rev = pkgsplit(filename_no_suffix)
|
self.ebuild_path_no_revision = '%s-%s' % (self.ebuild_path_no_version,
|
||||||
|
version_no_rev)
|
||||||
|
self.current_revision = int(rev.replace('r', ''))
|
||||||
self.version = '%s-%s' % (version_no_rev, rev)
|
self.version = '%s-%s' % (version_no_rev, rev)
|
||||||
self.package = '%s/%s' % (self.category, self.pkgname)
|
self.package = '%s/%s' % (self.category, self.pkgname)
|
||||||
|
self.ebuild_path = path
|
||||||
|
|
||||||
self.is_workon = False
|
self.is_workon = False
|
||||||
self.is_stable = False
|
self.is_stable = False
|
||||||
|
|
||||||
@ -324,7 +344,6 @@ class _EBuild(object):
|
|||||||
|
|
||||||
def GetCommitId(self):
|
def GetCommitId(self):
|
||||||
"""Get the commit id for this ebuild."""
|
"""Get the commit id for this ebuild."""
|
||||||
|
|
||||||
# Grab and evaluate CROS_WORKON variables from this ebuild.
|
# Grab and evaluate CROS_WORKON variables from this ebuild.
|
||||||
unstable_ebuild = '%s-9999.ebuild' % self.ebuild_path_no_version
|
unstable_ebuild = '%s-9999.ebuild' % self.ebuild_path_no_version
|
||||||
cmd = ('export CROS_WORKON_LOCALNAME="%s" CROS_WORKON_PROJECT="%s"; '
|
cmd = ('export CROS_WORKON_LOCALNAME="%s" CROS_WORKON_PROJECT="%s"; '
|
||||||
@ -367,39 +386,53 @@ class _EBuild(object):
|
|||||||
Die('Missing commit id for %s' % self.ebuild_path)
|
Die('Missing commit id for %s' % self.ebuild_path)
|
||||||
return output.rstrip()
|
return output.rstrip()
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _ParseEBuildPath(cls, ebuild_path):
|
|
||||||
"""Static method that parses the path of an ebuild
|
|
||||||
|
|
||||||
Returns a tuple containing the (ebuild path without the revision
|
|
||||||
string, without the version string, and the current revision number for
|
|
||||||
the ebuild).
|
|
||||||
"""
|
|
||||||
# Get the ebuild name without the revision string.
|
|
||||||
(ebuild_no_rev, _, rev_string) = ebuild_path.rpartition('-')
|
|
||||||
|
|
||||||
# Verify the revision string starts with the revision character.
|
|
||||||
if rev_string.startswith('r'):
|
|
||||||
# Get the ebuild name without the revision and version strings.
|
|
||||||
ebuild_no_version = ebuild_no_rev.rpartition('-')[0]
|
|
||||||
rev_string = rev_string[1:].rpartition('.ebuild')[0]
|
|
||||||
else:
|
|
||||||
# Has no revision so we stripped the version number instead.
|
|
||||||
ebuild_no_version = ebuild_no_rev
|
|
||||||
ebuild_no_rev = ebuild_path.rpartition('9999.ebuild')[0] + '0.0.1'
|
|
||||||
rev_string = '0'
|
|
||||||
revision = int(rev_string)
|
|
||||||
return (ebuild_no_rev, ebuild_no_version, revision)
|
|
||||||
|
|
||||||
|
|
||||||
class EBuildStableMarker(object):
|
class EBuildStableMarker(object):
|
||||||
"""Class that revs the ebuild and commits locally or pushes the change."""
|
"""Class that revs the ebuild and commits locally or pushes the change."""
|
||||||
|
|
||||||
def __init__(self, ebuild):
|
def __init__(self, ebuild):
|
||||||
|
assert ebuild
|
||||||
self._ebuild = ebuild
|
self._ebuild = ebuild
|
||||||
|
|
||||||
def RevEBuild(self, commit_id='', redirect_file=None):
|
@classmethod
|
||||||
"""Revs an ebuild given the git commit id.
|
def MarkAsStable(cls, unstable_ebuild_path, new_stable_ebuild_path,
|
||||||
|
commit_keyword, commit_value, redirect_file=None):
|
||||||
|
"""Static function that creates a revved stable ebuild.
|
||||||
|
|
||||||
|
This function assumes you have already figured out the name of the new
|
||||||
|
stable ebuild path and then creates that file from the given unstable
|
||||||
|
ebuild and marks it as stable. If the commit_value is set, it also
|
||||||
|
set the commit_keyword=commit_value pair in the ebuild.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unstable_ebuild_path: The path to the unstable ebuild.
|
||||||
|
new_stable_ebuild_path: The path you want to use for the new stable
|
||||||
|
ebuild.
|
||||||
|
commit_keyword: Optional keyword to set in the ebuild to mark it as
|
||||||
|
stable.
|
||||||
|
commit_value: Value to set the above keyword to.
|
||||||
|
redirect_file: Optionally redirect output of new ebuild somewhere else.
|
||||||
|
"""
|
||||||
|
shutil.copyfile(unstable_ebuild_path, new_stable_ebuild_path)
|
||||||
|
for line in fileinput.input(new_stable_ebuild_path, inplace=1):
|
||||||
|
# Has to be done here to get changes to sys.stdout from fileinput.input.
|
||||||
|
if not redirect_file:
|
||||||
|
redirect_file = sys.stdout
|
||||||
|
if line.startswith('KEYWORDS'):
|
||||||
|
# Actually mark this file as stable by removing ~'s.
|
||||||
|
redirect_file.write(line.replace('~', ''))
|
||||||
|
elif line.startswith('EAPI'):
|
||||||
|
# Always add new commit_id after EAPI definition.
|
||||||
|
redirect_file.write(line)
|
||||||
|
if commit_keyword and commit_value:
|
||||||
|
redirect_file.write('%s="%s"\n' % (commit_keyword, commit_value))
|
||||||
|
elif not line.startswith(commit_keyword):
|
||||||
|
# Skip old commit_keyword definition.
|
||||||
|
redirect_file.write(line)
|
||||||
|
fileinput.close()
|
||||||
|
|
||||||
|
def RevWorkOnEBuild(self, commit_id, redirect_file=None):
|
||||||
|
"""Revs a workon ebuild given the git commit hash.
|
||||||
|
|
||||||
By default this class overwrites a new ebuild given the normal
|
By default this class overwrites a new ebuild given the normal
|
||||||
ebuild rev'ing logic. However, a user can specify a redirect_file
|
ebuild rev'ing logic. However, a user can specify a redirect_file
|
||||||
@ -418,44 +451,34 @@ class EBuildStableMarker(object):
|
|||||||
Returns:
|
Returns:
|
||||||
True if the revved package is different than the old ebuild.
|
True if the revved package is different than the old ebuild.
|
||||||
"""
|
"""
|
||||||
# TODO(sosa): Change to a check.
|
if self._ebuild.is_stable:
|
||||||
if not self._ebuild:
|
new_stable_ebuild_path = '%s-r%d.ebuild' % (
|
||||||
Die('Invalid ebuild given to EBuildStableMarker')
|
self._ebuild.ebuild_path_no_revision,
|
||||||
|
self._ebuild.current_revision + 1)
|
||||||
new_ebuild_path = '%s-r%d.ebuild' % (self._ebuild.ebuild_path_no_revision,
|
else:
|
||||||
|
# If given unstable ebuild, use 0.0.1 rather than 9999.
|
||||||
|
new_stable_ebuild_path = '%s-0.0.1-r%d.ebuild' % (
|
||||||
|
self._ebuild.ebuild_path_no_version,
|
||||||
self._ebuild.current_revision + 1)
|
self._ebuild.current_revision + 1)
|
||||||
|
|
||||||
_Print('Creating new stable ebuild %s' % new_ebuild_path)
|
_Print('Creating new stable ebuild %s' % new_stable_ebuild_path)
|
||||||
workon_ebuild = '%s-9999.ebuild' % self._ebuild.ebuild_path_no_version
|
unstable_ebuild_path = ('%s-9999.ebuild' %
|
||||||
if not os.path.exists(workon_ebuild):
|
self._ebuild.ebuild_path_no_version)
|
||||||
Die('Missing 9999 ebuild: %s' % workon_ebuild)
|
if not os.path.exists(unstable_ebuild_path):
|
||||||
shutil.copyfile(workon_ebuild, new_ebuild_path)
|
Die('Missing unstable ebuild: %s' % unstable_ebuild_path)
|
||||||
|
|
||||||
for line in fileinput.input(new_ebuild_path, inplace=1):
|
self.MarkAsStable(unstable_ebuild_path, new_stable_ebuild_path,
|
||||||
# Has to be done here to get changes to sys.stdout from fileinput.input.
|
'CROS_WORKON_COMMIT', commit_id, redirect_file)
|
||||||
if not redirect_file:
|
|
||||||
redirect_file = sys.stdout
|
|
||||||
if line.startswith('KEYWORDS'):
|
|
||||||
# Actually mark this file as stable by removing ~'s.
|
|
||||||
redirect_file.write(line.replace('~', ''))
|
|
||||||
elif line.startswith('EAPI'):
|
|
||||||
# Always add new commit_id after EAPI definition.
|
|
||||||
redirect_file.write(line)
|
|
||||||
redirect_file.write('CROS_WORKON_COMMIT="%s"\n' % commit_id)
|
|
||||||
elif not line.startswith('CROS_WORKON_COMMIT'):
|
|
||||||
# Skip old CROS_WORKON_COMMIT definition.
|
|
||||||
redirect_file.write(line)
|
|
||||||
fileinput.close()
|
|
||||||
|
|
||||||
old_ebuild_path = self._ebuild.ebuild_path
|
old_ebuild_path = self._ebuild.ebuild_path
|
||||||
diff_cmd = ['diff', '-Bu', old_ebuild_path, new_ebuild_path]
|
diff_cmd = ['diff', '-Bu', old_ebuild_path, new_stable_ebuild_path]
|
||||||
if 0 == RunCommand(diff_cmd, exit_code=True, redirect_stdout=True,
|
if 0 == RunCommand(diff_cmd, exit_code=True, redirect_stdout=True,
|
||||||
redirect_stderr=True, print_cmd=gflags.FLAGS.verbose):
|
redirect_stderr=True, print_cmd=gflags.FLAGS.verbose):
|
||||||
os.unlink(new_ebuild_path)
|
os.unlink(new_stable_ebuild_path)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
_Print('Adding new stable ebuild to git')
|
_Print('Adding new stable ebuild to git')
|
||||||
_SimpleRunCommand('git add %s' % new_ebuild_path)
|
_SimpleRunCommand('git add %s' % new_stable_ebuild_path)
|
||||||
|
|
||||||
if self._ebuild.is_stable:
|
if self._ebuild.is_stable:
|
||||||
_Print('Removing old ebuild from git')
|
_Print('Removing old ebuild from git')
|
||||||
@ -463,11 +486,9 @@ class EBuildStableMarker(object):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def CommitChange(self, message):
|
@classmethod
|
||||||
"""Commits current changes in git locally.
|
def CommitChange(cls, message):
|
||||||
|
"""Commits current changes in git locally with given commit message.
|
||||||
This method will take any changes from invocations to RevEBuild
|
|
||||||
and commits them locally in the git repository that contains os.pwd.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
message: the commit string to write when committing to git.
|
message: the commit string to write when committing to git.
|
||||||
@ -475,8 +496,7 @@ class EBuildStableMarker(object):
|
|||||||
Raises:
|
Raises:
|
||||||
OSError: Error occurred while committing.
|
OSError: Error occurred while committing.
|
||||||
"""
|
"""
|
||||||
_Print('Committing changes for %s with commit message %s' % \
|
Info('Committing changes with commit message: %s' % message)
|
||||||
(self._ebuild.package, message))
|
|
||||||
git_commit_cmd = 'git commit -am "%s"' % message
|
git_commit_cmd = 'git commit -am "%s"' % message
|
||||||
_SimpleRunCommand(git_commit_cmd)
|
_SimpleRunCommand(git_commit_cmd)
|
||||||
|
|
||||||
@ -520,11 +540,11 @@ def main(argv):
|
|||||||
os.chdir(overlay)
|
os.chdir(overlay)
|
||||||
|
|
||||||
if command == 'clean':
|
if command == 'clean':
|
||||||
_Clean()
|
Clean(gflags.FLAGS.tracking_branch)
|
||||||
elif command == 'push':
|
elif command == 'push':
|
||||||
_PushChange()
|
PushChange(STABLE_BRANCH_NAME, gflags.FLAGS.tracking_branch)
|
||||||
elif command == 'commit' and ebuilds:
|
elif command == 'commit' and ebuilds:
|
||||||
work_branch = _GitBranch(_STABLE_BRANCH_NAME)
|
work_branch = GitBranch(STABLE_BRANCH_NAME, gflags.FLAGS.tracking_branch)
|
||||||
work_branch.CreateBranch()
|
work_branch.CreateBranch()
|
||||||
if not work_branch.Exists():
|
if not work_branch.Exists():
|
||||||
Die('Unable to create stabilizing branch in %s' % overlay)
|
Die('Unable to create stabilizing branch in %s' % overlay)
|
||||||
@ -536,7 +556,7 @@ def main(argv):
|
|||||||
_Print('Working on %s' % ebuild.package)
|
_Print('Working on %s' % ebuild.package)
|
||||||
worker = EBuildStableMarker(ebuild)
|
worker = EBuildStableMarker(ebuild)
|
||||||
commit_id = ebuild.GetCommitId()
|
commit_id = ebuild.GetCommitId()
|
||||||
if worker.RevEBuild(commit_id):
|
if worker.RevWorkOnEBuild(commit_id):
|
||||||
message = _GIT_COMMIT_MESSAGE % (ebuild.package, commit_id)
|
message = _GIT_COMMIT_MESSAGE % (ebuild.package, commit_id)
|
||||||
worker.CommitChange(message)
|
worker.CommitChange(message)
|
||||||
revved_packages.append(ebuild.package)
|
revved_packages.append(ebuild.package)
|
||||||
@ -549,6 +569,10 @@ def main(argv):
|
|||||||
|
|
||||||
if revved_packages:
|
if revved_packages:
|
||||||
_CleanStalePackages(gflags.FLAGS.board, revved_packages)
|
_CleanStalePackages(gflags.FLAGS.board, revved_packages)
|
||||||
|
if gflags.FLAGS.drop_file:
|
||||||
|
fh = open(gflags.FLAGS.drop_file, 'w')
|
||||||
|
fh.write(' '.join(revved_packages))
|
||||||
|
fh.close()
|
||||||
else:
|
else:
|
||||||
work_branch.Delete()
|
work_branch.Delete()
|
||||||
|
|
||||||
|
@ -6,16 +6,45 @@
|
|||||||
|
|
||||||
"""Unit tests for cros_mark_as_stable.py."""
|
"""Unit tests for cros_mark_as_stable.py."""
|
||||||
|
|
||||||
|
import fileinput
|
||||||
import mox
|
import mox
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
# Required to include '.' in the python path.
|
|
||||||
sys.path.append(os.path.dirname(__file__))
|
|
||||||
import cros_mark_as_stable
|
import cros_mark_as_stable
|
||||||
|
|
||||||
|
class NonClassTests(mox.MoxTestBase):
|
||||||
|
def setUp(self):
|
||||||
|
mox.MoxTestBase.setUp(self)
|
||||||
|
self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand')
|
||||||
|
self._branch = 'test_branch'
|
||||||
|
self._tracking_branch = 'cros/test'
|
||||||
|
|
||||||
|
def testPushChange(self):
|
||||||
|
git_log = 'Marking test_one as stable\nMarking test_two as stable\n'
|
||||||
|
fake_description = 'Marking set of ebuilds as stable\n\n%s' % git_log
|
||||||
|
self.mox.StubOutWithMock(cros_mark_as_stable, '_CheckOnStabilizingBranch')
|
||||||
|
self.mox.StubOutWithMock(cros_mark_as_stable.GitBranch, 'CreateBranch')
|
||||||
|
self.mox.StubOutWithMock(cros_mark_as_stable.GitBranch, 'Exists')
|
||||||
|
|
||||||
|
cros_mark_as_stable._CheckOnStabilizingBranch(self._branch).AndReturn(True)
|
||||||
|
cros_mark_as_stable.GitBranch.CreateBranch()
|
||||||
|
cros_mark_as_stable.GitBranch.Exists().AndReturn(True)
|
||||||
|
cros_mark_as_stable._SimpleRunCommand('git log --format=format:%s%n%n%b ' +
|
||||||
|
self._tracking_branch + '..').AndReturn(git_log)
|
||||||
|
cros_mark_as_stable._SimpleRunCommand('git remote update')
|
||||||
|
cros_mark_as_stable._SimpleRunCommand('git merge --squash %s' %
|
||||||
|
self._branch)
|
||||||
|
cros_mark_as_stable._SimpleRunCommand('git commit -m "%s"' %
|
||||||
|
fake_description)
|
||||||
|
cros_mark_as_stable._SimpleRunCommand('git config push.default tracking')
|
||||||
|
cros_mark_as_stable._SimpleRunCommand('git push')
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
cros_mark_as_stable.PushChange(self._branch, self._tracking_branch)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
|
||||||
class GitBranchTest(mox.MoxTestBase):
|
class GitBranchTest(mox.MoxTestBase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -23,10 +52,11 @@ class GitBranchTest(mox.MoxTestBase):
|
|||||||
# Always stub RunCommmand out as we use it in every method.
|
# Always stub RunCommmand out as we use it in every method.
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand')
|
self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand')
|
||||||
self._branch = 'test_branch'
|
self._branch = 'test_branch'
|
||||||
|
self._tracking_branch = 'cros/test'
|
||||||
|
|
||||||
def testCreateBranchNoPrevious(self):
|
def testCreateBranchNoPrevious(self):
|
||||||
# Test init with no previous branch existing.
|
# Test init with no previous branch existing.
|
||||||
branch = cros_mark_as_stable._GitBranch(self._branch)
|
branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch)
|
||||||
self.mox.StubOutWithMock(branch, 'Exists')
|
self.mox.StubOutWithMock(branch, 'Exists')
|
||||||
self.mox.StubOutWithMock(branch, '_Checkout')
|
self.mox.StubOutWithMock(branch, '_Checkout')
|
||||||
branch.Exists().AndReturn(False)
|
branch.Exists().AndReturn(False)
|
||||||
@ -37,7 +67,7 @@ class GitBranchTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
def testCreateBranchWithPrevious(self):
|
def testCreateBranchWithPrevious(self):
|
||||||
# Test init with previous branch existing.
|
# Test init with previous branch existing.
|
||||||
branch = cros_mark_as_stable._GitBranch(self._branch)
|
branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch)
|
||||||
self.mox.StubOutWithMock(branch, 'Exists')
|
self.mox.StubOutWithMock(branch, 'Exists')
|
||||||
self.mox.StubOutWithMock(branch, 'Delete')
|
self.mox.StubOutWithMock(branch, 'Delete')
|
||||||
self.mox.StubOutWithMock(branch, '_Checkout')
|
self.mox.StubOutWithMock(branch, '_Checkout')
|
||||||
@ -51,35 +81,36 @@ class GitBranchTest(mox.MoxTestBase):
|
|||||||
def testCheckoutCreate(self):
|
def testCheckoutCreate(self):
|
||||||
# Test init with no previous branch existing.
|
# Test init with no previous branch existing.
|
||||||
cros_mark_as_stable._SimpleRunCommand(
|
cros_mark_as_stable._SimpleRunCommand(
|
||||||
'git checkout -b %s cros/master' % self._branch)
|
'git checkout -b %s %s' % (self._branch, self._tracking_branch))
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
branch = cros_mark_as_stable._GitBranch(self._branch)
|
branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch)
|
||||||
branch._Checkout(self._branch)
|
branch._Checkout(self._branch)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def testCheckoutNoCreate(self):
|
def testCheckoutNoCreate(self):
|
||||||
# Test init with previous branch existing.
|
# Test init with previous branch existing.
|
||||||
cros_mark_as_stable._SimpleRunCommand('git checkout cros/master')
|
cros_mark_as_stable._SimpleRunCommand('git checkout %s' % (
|
||||||
|
self._tracking_branch))
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
branch = cros_mark_as_stable._GitBranch(self._branch)
|
branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch)
|
||||||
branch._Checkout('cros/master', False)
|
branch._Checkout(self._tracking_branch, False)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def testDelete(self):
|
def testDelete(self):
|
||||||
branch = cros_mark_as_stable._GitBranch(self._branch)
|
branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch)
|
||||||
self.mox.StubOutWithMock(branch, '_Checkout')
|
self.mox.StubOutWithMock(branch, '_Checkout')
|
||||||
branch._Checkout('cros/master', create=False)
|
branch._Checkout(self._tracking_branch, create=False)
|
||||||
cros_mark_as_stable._SimpleRunCommand('git branch -D ' + self._branch)
|
cros_mark_as_stable._SimpleRunCommand('git branch -D ' + self._branch)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
branch.Delete()
|
branch.Delete()
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def testExists(self):
|
def testExists(self):
|
||||||
branch = cros_mark_as_stable._GitBranch(self._branch)
|
branch = cros_mark_as_stable.GitBranch(self._branch, self._tracking_branch)
|
||||||
|
|
||||||
# Test if branch exists that is created
|
# Test if branch exists that is created
|
||||||
cros_mark_as_stable._SimpleRunCommand('git branch').AndReturn(
|
cros_mark_as_stable._SimpleRunCommand('git branch').AndReturn(
|
||||||
'%s %s' % (self._branch, 'cros/master'))
|
'%s %s' % (self._branch, self._tracking_branch))
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
self.assertTrue(branch.Exists())
|
self.assertTrue(branch.Exists())
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
@ -90,45 +121,34 @@ class EBuildTest(mox.MoxTestBase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
mox.MoxTestBase.setUp(self)
|
mox.MoxTestBase.setUp(self)
|
||||||
|
|
||||||
def testInit(self):
|
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable._EBuild, '_ParseEBuildPath')
|
|
||||||
|
|
||||||
ebuild_path = '/overlay/cat/test_package/test_package-0.0.1-r1.ebuild'
|
|
||||||
cros_mark_as_stable._EBuild._ParseEBuildPath(
|
|
||||||
ebuild_path).AndReturn(['/overlay/cat/test_package-0.0.1',
|
|
||||||
'/overlay/cat/test_package',
|
|
||||||
1])
|
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
|
||||||
mock_file = ['EAPI=2', 'CROS_WORKON_COMMIT=old_id',
|
|
||||||
'KEYWORDS=\"~x86 ~arm\"', 'src_unpack(){}']
|
|
||||||
cros_mark_as_stable.fileinput.input(ebuild_path).AndReturn(mock_file)
|
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
|
||||||
ebuild = cros_mark_as_stable._EBuild(ebuild_path)
|
|
||||||
self.mox.VerifyAll()
|
|
||||||
self.assertEquals(ebuild.package, 'cat/test_package')
|
|
||||||
self.assertEquals(ebuild.ebuild_path, ebuild_path)
|
|
||||||
self.assertEquals(ebuild.ebuild_path_no_revision,
|
|
||||||
'/overlay/cat/test_package-0.0.1')
|
|
||||||
self.assertEquals(ebuild.ebuild_path_no_version,
|
|
||||||
'/overlay/cat/test_package')
|
|
||||||
self.assertEquals(ebuild.current_revision, 1)
|
|
||||||
|
|
||||||
def testParseEBuildPath(self):
|
def testParseEBuildPath(self):
|
||||||
# Test with ebuild with revision number.
|
# Test with ebuild with revision number.
|
||||||
no_rev, no_version, revision = cros_mark_as_stable._EBuild._ParseEBuildPath(
|
fake_ebuild_path = '/path/to/test_package/test_package-0.0.1-r1.ebuild'
|
||||||
'/path/test_package-0.0.1-r1.ebuild')
|
self.mox.StubOutWithMock(fileinput, 'input')
|
||||||
self.assertEquals(no_rev, '/path/test_package-0.0.1')
|
fileinput.input(fake_ebuild_path).AndReturn('')
|
||||||
self.assertEquals(no_version, '/path/test_package')
|
self.mox.ReplayAll()
|
||||||
self.assertEquals(revision, 1)
|
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
|
||||||
|
'/path/to/test_package/test_package-0.0.1')
|
||||||
|
self.assertEquals(fake_ebuild.ebuild_path_no_version,
|
||||||
|
'/path/to/test_package/test_package')
|
||||||
|
self.assertEquals(fake_ebuild.current_revision, 1)
|
||||||
|
|
||||||
def testParseEBuildPathNoRevisionNumber(self):
|
def testParseEBuildPathNoRevisionNumber(self):
|
||||||
# Test with ebuild without revision number.
|
# Test with ebuild without revision number.
|
||||||
no_rev, no_version, revision = cros_mark_as_stable._EBuild._ParseEBuildPath(
|
fake_ebuild_path = '/path/to/test_package/test_package-9999.ebuild'
|
||||||
'/path/test_package-9999.ebuild')
|
self.mox.StubOutWithMock(fileinput, 'input')
|
||||||
self.assertEquals(no_rev, '/path/test_package-0.0.1')
|
fileinput.input(fake_ebuild_path).AndReturn('')
|
||||||
self.assertEquals(no_version, '/path/test_package')
|
self.mox.ReplayAll()
|
||||||
self.assertEquals(revision, 0)
|
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
|
||||||
|
'/path/to/test_package/test_package-9999')
|
||||||
|
self.assertEquals(fake_ebuild.ebuild_path_no_version,
|
||||||
|
'/path/to/test_package/test_package')
|
||||||
|
self.assertEquals(fake_ebuild.current_revision, 0)
|
||||||
|
|
||||||
|
|
||||||
class EBuildStableMarkerTest(mox.MoxTestBase):
|
class EBuildStableMarkerTest(mox.MoxTestBase):
|
||||||
@ -138,7 +158,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand')
|
self.mox.StubOutWithMock(cros_mark_as_stable, '_SimpleRunCommand')
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable, 'RunCommand')
|
self.mox.StubOutWithMock(cros_mark_as_stable, 'RunCommand')
|
||||||
self.mox.StubOutWithMock(os, 'unlink')
|
self.mox.StubOutWithMock(os, 'unlink')
|
||||||
self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable._EBuild)
|
self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable.EBuild)
|
||||||
self.m_ebuild.is_stable = True
|
self.m_ebuild.is_stable = True
|
||||||
self.m_ebuild.package = 'test_package'
|
self.m_ebuild.package = 'test_package'
|
||||||
self.m_ebuild.current_revision = 1
|
self.m_ebuild.current_revision = 1
|
||||||
@ -147,7 +167,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
self.m_ebuild.ebuild_path = '/path/test_package-0.0.1-r1.ebuild'
|
self.m_ebuild.ebuild_path = '/path/test_package-0.0.1-r1.ebuild'
|
||||||
self.revved_ebuild_path = '/path/test_package-0.0.1-r2.ebuild'
|
self.revved_ebuild_path = '/path/test_package-0.0.1-r2.ebuild'
|
||||||
|
|
||||||
def testRevEBuild(self):
|
def testRevWorkOnEBuild(self):
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.os.path, 'exists')
|
self.mox.StubOutWithMock(cros_mark_as_stable.os.path, 'exists')
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.shutil, 'copyfile')
|
self.mox.StubOutWithMock(cros_mark_as_stable.shutil, 'copyfile')
|
||||||
@ -177,7 +197,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
||||||
marker.RevEBuild('my_id', redirect_file=m_file)
|
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def testRevUnchangedEBuild(self):
|
def testRevUnchangedEBuild(self):
|
||||||
@ -209,7 +229,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
||||||
marker.RevEBuild('my_id', redirect_file=m_file)
|
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def testRevMissingEBuild(self):
|
def testRevMissingEBuild(self):
|
||||||
@ -226,7 +246,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
ebuild_9999 = self.m_ebuild.ebuild_path_no_version + '-9999.ebuild'
|
ebuild_9999 = self.m_ebuild.ebuild_path_no_version + '-9999.ebuild'
|
||||||
cros_mark_as_stable.os.path.exists(ebuild_9999).AndReturn(False)
|
cros_mark_as_stable.os.path.exists(ebuild_9999).AndReturn(False)
|
||||||
cros_mark_as_stable.Die("Missing 9999 ebuild: %s" % ebuild_9999)
|
cros_mark_as_stable.Die("Missing unstable ebuild: %s" % ebuild_9999)
|
||||||
cros_mark_as_stable.shutil.copyfile(ebuild_9999, self.revved_ebuild_path)
|
cros_mark_as_stable.shutil.copyfile(ebuild_9999, self.revved_ebuild_path)
|
||||||
cros_mark_as_stable.fileinput.input(self.revved_ebuild_path,
|
cros_mark_as_stable.fileinput.input(self.revved_ebuild_path,
|
||||||
inplace=1).AndReturn(mock_file)
|
inplace=1).AndReturn(mock_file)
|
||||||
@ -244,7 +264,7 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
||||||
marker.RevEBuild('my_id', redirect_file=m_file)
|
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
|
||||||
@ -257,14 +277,6 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
marker.CommitChange(mock_message)
|
marker.CommitChange(mock_message)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def testPushChange(self):
|
|
||||||
#cros_mark_as_stable._SimpleRunCommand('git push')
|
|
||||||
#self.mox.ReplayAll()
|
|
||||||
#marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
|
||||||
#marker.PushChange()
|
|
||||||
#self.mox.VerifyAll()
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class _Package(object):
|
class _Package(object):
|
||||||
def __init__(self, package):
|
def __init__(self, package):
|
||||||
|
@ -19,6 +19,8 @@ DEFINE_string board "${DEFAULT_BOARD}" \
|
|||||||
"Target board of which tests were built"
|
"Target board of which tests were built"
|
||||||
DEFINE_string build_root "${DEFAULT_BUILD_ROOT}" \
|
DEFINE_string build_root "${DEFAULT_BUILD_ROOT}" \
|
||||||
"Root of build output"
|
"Root of build output"
|
||||||
|
DEFINE_string package_file "" \
|
||||||
|
"File with space-separated list of packages to run unit tests" f
|
||||||
DEFINE_string packages "" \
|
DEFINE_string packages "" \
|
||||||
"Optional space-separated list of packages to run unit tests" p
|
"Optional space-separated list of packages to run unit tests" p
|
||||||
|
|
||||||
@ -55,11 +57,19 @@ set -e
|
|||||||
|
|
||||||
[ -z "${FLAGS_board}" ] && die "--board required"
|
[ -z "${FLAGS_board}" ] && die "--board required"
|
||||||
|
|
||||||
# If no packages are specified we run all unit tests for chromeos-base
|
# Create package list from package file and list of packages.
|
||||||
# packages.
|
if [ -n "${FLAGS_package_file}" ]; then
|
||||||
if [ -n "${FLAGS_packages}" ]; then
|
if [ -f "${FLAGS_package_file}" ]; then
|
||||||
PACKAGE_LIST="${FLAGS_packages}"
|
PACKAGE_LIST="$(cat ${FLAGS_package_file})"
|
||||||
else
|
else
|
||||||
|
warn "Missing package file."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
[ -n "${FLAGS_packages}" ] && PACKAGE_LIST="${PACKAGE_LIST} ${FLAGS_packages}"
|
||||||
|
|
||||||
|
# If we didn't specify packages, find all packages.
|
||||||
|
if [ -z "${FLAGS_package_file}" -a -z "${FLAGS_packages}" ]; then
|
||||||
PACKAGE_LIST=$( ./get_package_list chromeos --board="${FLAGS_board}" |
|
PACKAGE_LIST=$( ./get_package_list chromeos --board="${FLAGS_board}" |
|
||||||
egrep '^chromeos-base' )
|
egrep '^chromeos-base' )
|
||||||
fi
|
fi
|
||||||
@ -71,7 +81,8 @@ for package in ${PACKAGE_LIST}; do
|
|||||||
warn "Skipping package ${package} since it is blacklisted."
|
warn "Skipping package ${package} since it is blacklisted."
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
EBUILD_PATH=$( equery-${FLAGS_board} which ${package} 2> /dev/null )
|
EBUILD_PATH=$( equery-${FLAGS_board} which ${package} 2> /dev/null ) || \
|
||||||
|
warn "${package} not found"
|
||||||
if [ -n "${EBUILD_PATH}" ]; then
|
if [ -n "${EBUILD_PATH}" ]; then
|
||||||
if check_src_test "${EBUILD_PATH}"; then
|
if check_src_test "${EBUILD_PATH}"; then
|
||||||
run_unit_test "${EBUILD_PATH}" || record_test_failure "${package}"
|
run_unit_test "${EBUILD_PATH}" || record_test_failure "${package}"
|
||||||
|
@ -278,6 +278,9 @@ setup_env
|
|||||||
# Use git:8 chars of sha1
|
# Use git:8 chars of sha1
|
||||||
REVISION=$(cd ${FLAGS_trunk}/src/scripts ; git rev-parse --short=8 HEAD)
|
REVISION=$(cd ${FLAGS_trunk}/src/scripts ; git rev-parse --short=8 HEAD)
|
||||||
CHROOT_PASSTHRU="CHROMEOS_REVISION=$REVISION BUILDBOT_BUILD=$FLAGS_build_number CHROMEOS_OFFICIAL=$CHROMEOS_OFFICIAL"
|
CHROOT_PASSTHRU="CHROMEOS_REVISION=$REVISION BUILDBOT_BUILD=$FLAGS_build_number CHROMEOS_OFFICIAL=$CHROMEOS_OFFICIAL"
|
||||||
|
CHROOT_PASSTHRU="${CHROOT_PASSTHRU} \
|
||||||
|
CHROMEOS_RELEASE_APPID=${CHROMEOS_RELEASE_APPID:-"{DEV-BUILD}"}"
|
||||||
|
|
||||||
if [ -d "$HOME/.subversion" ]; then
|
if [ -d "$HOME/.subversion" ]; then
|
||||||
# Bind mounting .subversion into chroot
|
# Bind mounting .subversion into chroot
|
||||||
info "mounting ~/.subversion into chroot"
|
info "mounting ~/.subversion into chroot"
|
||||||
|
@ -36,6 +36,8 @@ DEFINE_integer devserver_port 8080 \
|
|||||||
DEFINE_boolean for_vm ${FLAGS_FALSE} "Image is for a vm."
|
DEFINE_boolean for_vm ${FLAGS_FALSE} "Image is for a vm."
|
||||||
DEFINE_string image "" \
|
DEFINE_string image "" \
|
||||||
"Update with this image path that is in this source checkout." i
|
"Update with this image path that is in this source checkout." i
|
||||||
|
DEFINE_string payload "" \
|
||||||
|
"Update with this update payload, ignoring specified images."
|
||||||
DEFINE_string src_image "" \
|
DEFINE_string src_image "" \
|
||||||
"Create a delta update by passing in the image on the remote machine."
|
"Create a delta update by passing in the image on the remote machine."
|
||||||
DEFINE_boolean update_stateful ${FLAGS_TRUE} \
|
DEFINE_boolean update_stateful ${FLAGS_TRUE} \
|
||||||
@ -132,6 +134,11 @@ function start_dev_server {
|
|||||||
--image $(reinterpret_path_for_chroot ${IMAGE_PATH})"
|
--image $(reinterpret_path_for_chroot ${IMAGE_PATH})"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -n "${FLAGS_payload}" ]; then
|
||||||
|
devserver_flags="${devserver_flags} \
|
||||||
|
--payload $(reinterpret_path_for_chroot ${FLAGS_payload})"
|
||||||
|
fi
|
||||||
|
|
||||||
[ ${FLAGS_for_vm} -eq ${FLAGS_TRUE} ] && \
|
[ ${FLAGS_for_vm} -eq ${FLAGS_TRUE} ] && \
|
||||||
devserver_flags="${devserver_flags} --for_vm"
|
devserver_flags="${devserver_flags} --for_vm"
|
||||||
|
|
||||||
|
@ -13,6 +13,11 @@ _STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
|
|||||||
|
|
||||||
# TODO(sosa): Move logging to logging module.
|
# TODO(sosa): Move logging to logging module.
|
||||||
|
|
||||||
|
class RunCommandException(Exception):
|
||||||
|
"""Raised when there is an error in RunCommand."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def GetCallerName():
|
def GetCallerName():
|
||||||
"""Returns the name of the calling module with __main__."""
|
"""Returns the name of the calling module with __main__."""
|
||||||
top_frame = inspect.stack()[-1][0]
|
top_frame = inspect.stack()[-1][0]
|
||||||
@ -21,24 +26,30 @@ def GetCallerName():
|
|||||||
|
|
||||||
def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None,
|
def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None,
|
||||||
exit_code=False, redirect_stdout=False, redirect_stderr=False,
|
exit_code=False, redirect_stdout=False, redirect_stderr=False,
|
||||||
cwd=None, input=None, enter_chroot=False):
|
cwd=None, input=None, enter_chroot=False, num_retries=0):
|
||||||
"""Runs a shell command.
|
"""Runs a shell command.
|
||||||
|
|
||||||
Keyword arguments:
|
Arguments:
|
||||||
cmd - cmd to run. Should be input to subprocess.POpen. If a string,
|
cmd: cmd to run. Should be input to subprocess.POpen. If a string,
|
||||||
converted to an array using split().
|
converted to an array using split().
|
||||||
print_cmd -- prints the command before running it.
|
print_cmd: prints the command before running it.
|
||||||
error_ok -- does not raise an exception on error.
|
error_ok: does not raise an exception on error.
|
||||||
error_message -- prints out this message when an error occurrs.
|
error_message: prints out this message when an error occurrs.
|
||||||
exit_code -- returns the return code of the shell command.
|
exit_code: returns the return code of the shell command.
|
||||||
redirect_stdout -- returns the stdout.
|
redirect_stdout: returns the stdout.
|
||||||
redirect_stderr -- holds stderr output until input is communicated.
|
redirect_stderr: holds stderr output until input is communicated.
|
||||||
cwd -- the working directory to run this cmd.
|
cwd: the working directory to run this cmd.
|
||||||
input -- input to pipe into this command through stdin.
|
input: input to pipe into this command through stdin.
|
||||||
enter_chroot -- this command should be run from within the chroot. If set,
|
enter_chroot: this command should be run from within the chroot. If set,
|
||||||
cwd must point to the scripts directory.
|
cwd must point to the scripts directory.
|
||||||
|
num_retries: the number of retries to perform before dying
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
If exit_code is True, returns the return code of the shell command.
|
||||||
|
Else returns the output of the shell command.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
Exception: Raises generic exception on error with optional error_message.
|
Exception: Raises RunCommandException on error with optional error_message.
|
||||||
"""
|
"""
|
||||||
# Set default for variables.
|
# Set default for variables.
|
||||||
stdout = None
|
stdout = None
|
||||||
@ -57,21 +68,27 @@ def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None,
|
|||||||
Info('PROGRAM(%s) -> RunCommand: %r in dir %s' %
|
Info('PROGRAM(%s) -> RunCommand: %r in dir %s' %
|
||||||
(GetCallerName(), cmd, cwd))
|
(GetCallerName(), cmd, cwd))
|
||||||
|
|
||||||
|
for retry_count in range(num_retries + 1):
|
||||||
try:
|
try:
|
||||||
proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin,
|
proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin,
|
||||||
stdout=stdout, stderr=stderr)
|
stdout=stdout, stderr=stderr)
|
||||||
(output, error) = proc.communicate(input)
|
(output, error) = proc.communicate(input)
|
||||||
if exit_code:
|
if exit_code and retry_count == num_retries:
|
||||||
return proc.returncode
|
return proc.returncode
|
||||||
|
|
||||||
if not error_ok and proc.returncode:
|
if proc.returncode == 0:
|
||||||
raise Exception('Command "%r" failed.\n' % (cmd) +
|
break
|
||||||
|
|
||||||
|
raise RunCommandException('Command "%r" failed.\n' % (cmd) +
|
||||||
(error_message or error or output or ''))
|
(error_message or error or output or ''))
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
if not error_ok:
|
if not error_ok and retry_count == num_retries:
|
||||||
raise
|
raise RunCommandException(e)
|
||||||
else:
|
else:
|
||||||
Warning(str(e))
|
Warning(str(e))
|
||||||
|
if print_cmd:
|
||||||
|
Info('PROGRAM(%s) -> RunCommand: retrying %r in dir %s' %
|
||||||
|
(GetCallerName(), cmd, cwd))
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
@ -8,19 +8,20 @@
|
|||||||
# especially for being redistributed into platforms without complete Chromium OS
|
# especially for being redistributed into platforms without complete Chromium OS
|
||||||
# developing environment.
|
# developing environment.
|
||||||
|
|
||||||
# Check if given command is available in current system
|
# Checks if given command is available in current system
|
||||||
has_command() {
|
image_has_command() {
|
||||||
type "$1" >/dev/null 2>&1
|
type "$1" >/dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
err_die() {
|
# Prints error message and exit as 1 (error)
|
||||||
|
image_die() {
|
||||||
echo "ERROR: $@" >&2
|
echo "ERROR: $@" >&2
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Finds the best gzip compressor and invoke it.
|
# Finds the best gzip compressor and invoke it
|
||||||
gzip_compress() {
|
image_gzip_compress() {
|
||||||
if has_command pigz; then
|
if image_has_command pigz; then
|
||||||
# echo " ** Using parallel gzip **" >&2
|
# echo " ** Using parallel gzip **" >&2
|
||||||
# Tested with -b 32, 64, 128(default), 256, 1024, 16384, and -b 32 (max
|
# Tested with -b 32, 64, 128(default), 256, 1024, 16384, and -b 32 (max
|
||||||
# window size of Deflate) seems to be the best in output size.
|
# window size of Deflate) seems to be the best in output size.
|
||||||
@ -30,43 +31,58 @@ gzip_compress() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Finds the best bzip2 compressor and invoke it
|
||||||
|
image_bzip2_compress() {
|
||||||
|
if image_has_command pbzip2; then
|
||||||
|
pbzip2 "$@"
|
||||||
|
else
|
||||||
|
bzip2 "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Finds if current system has tools for part_* commands
|
# Finds if current system has tools for part_* commands
|
||||||
has_part_tools() {
|
image_has_part_tools() {
|
||||||
has_command cgpt || has_command parted
|
image_has_command cgpt || image_has_command parted
|
||||||
}
|
}
|
||||||
|
|
||||||
# Finds the best partition tool and print partition offset
|
# Finds the best partition tool and print partition offset
|
||||||
part_offset() {
|
image_part_offset() {
|
||||||
local file="$1"
|
local file="$1"
|
||||||
local partno="$2"
|
local partno="$2"
|
||||||
|
local unpack_file="$(dirname "$file")/unpack_partitions.sh"
|
||||||
|
|
||||||
if has_command cgpt; then
|
# TODO parted is available on most Linux so we may deprecate other code path
|
||||||
|
if image_has_command cgpt; then
|
||||||
cgpt show -b -i "$partno" "$file"
|
cgpt show -b -i "$partno" "$file"
|
||||||
elif has_command parted; then
|
elif image_has_command parted; then
|
||||||
parted -m "$file" unit s print |
|
parted -m "$file" unit s print | awk -F ':' "/^$partno:/ { print int(\$2) }"
|
||||||
grep "^$partno:" | cut -d ':' -f 2 | sed 's/s$//'
|
elif [ -f "$unpack_file" ]; then
|
||||||
|
awk "/ $partno *Label:/ { print \$2 }" "$unpack_file"
|
||||||
else
|
else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Finds the best partition tool and print partition size
|
# Finds the best partition tool and print partition size
|
||||||
part_size() {
|
image_part_size() {
|
||||||
local file="$1"
|
local file="$1"
|
||||||
local partno="$2"
|
local partno="$2"
|
||||||
|
local unpack_file="$(dirname "$file")/unpack_partitions.sh"
|
||||||
|
|
||||||
if has_command cgpt; then
|
# TODO parted is available on most Linux so we may deprecate other code path
|
||||||
|
if image_has_command cgpt; then
|
||||||
cgpt show -s -i "$partno" "$file"
|
cgpt show -s -i "$partno" "$file"
|
||||||
elif has_command parted; then
|
elif image_has_command parted; then
|
||||||
parted -m "$file" unit s print |
|
parted -m "$file" unit s print | awk -F ':' "/^$partno:/ { print int(\$4) }"
|
||||||
grep "^$partno:" | cut -d ':' -f 4 | sed 's/s$//'
|
elif [ -s "$unpack_file" ]; then
|
||||||
|
awk "/ $partno *Label:/ { print \$3 }" "$unpack_file"
|
||||||
else
|
else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Dumps a file by given offset and size (in sectors)
|
# Dumps a file by given offset and size (in sectors)
|
||||||
dump_partial_file() {
|
image_dump_partial_file() {
|
||||||
local file="$1"
|
local file="$1"
|
||||||
local offset="$2"
|
local offset="$2"
|
||||||
local sectors="$3"
|
local sectors="$3"
|
||||||
@ -82,10 +98,10 @@ dump_partial_file() {
|
|||||||
bs=$((bs * buffer_ratio))
|
bs=$((bs * buffer_ratio))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if has_command pv; then
|
if image_has_command pv; then
|
||||||
dd if="$file" bs=$bs skip="$offset" count="$sectors" \
|
dd if="$file" bs=$bs skip="$offset" count="$sectors" \
|
||||||
oflag=sync status=noxfer 2>/dev/null |
|
oflag=sync status=noxfer 2>/dev/null |
|
||||||
pv -ptreb -B 4m -s $((sectors * $bs))
|
pv -ptreb -B $bs -s $((sectors * bs))
|
||||||
else
|
else
|
||||||
dd if="$file" bs=$bs skip="$offset" count="$sectors" \
|
dd if="$file" bs=$bs skip="$offset" count="$sectors" \
|
||||||
oflag=sync status=noxfer 2>/dev/null
|
oflag=sync status=noxfer 2>/dev/null
|
||||||
@ -93,14 +109,62 @@ dump_partial_file() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Dumps a specific partition from given image file
|
# Dumps a specific partition from given image file
|
||||||
dump_partition() {
|
image_dump_partition() {
|
||||||
local file="$1"
|
local file="$1"
|
||||||
local part_num="$2"
|
local part_num="$2"
|
||||||
local offset="$(part_offset "$file" "$part_num")" ||
|
local offset="$(image_part_offset "$file" "$part_num")" ||
|
||||||
err_die "failed to dump partition #$part_num from: $file"
|
image_die "failed to find partition #$part_num from: $file"
|
||||||
local size="$(part_size "$file" "$part_num")" ||
|
local size="$(image_part_size "$file" "$part_num")" ||
|
||||||
err_die "failed to dump partition #$part_num from: $file"
|
image_die "failed to find partition #$part_num from: $file"
|
||||||
|
|
||||||
dump_partial_file "$file" "$offset" "$size"
|
image_dump_partial_file "$file" "$offset" "$size"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Maps a specific partition from given image file to a loop device
|
||||||
|
image_map_partition() {
|
||||||
|
local file="$1"
|
||||||
|
local part_num="$2"
|
||||||
|
local offset="$(image_part_offset "$file" "$part_num")" ||
|
||||||
|
image_die "failed to find partition #$part_num from: $file"
|
||||||
|
local size="$(image_part_size "$file" "$part_num")" ||
|
||||||
|
image_die "failed to find partition #$part_num from: $file"
|
||||||
|
|
||||||
|
losetup --offset $((offset * 512)) --sizelimit=$((size * 512)) \
|
||||||
|
-f --show "$file"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Unmaps a loop device created by image_map_partition
|
||||||
|
image_unmap_partition() {
|
||||||
|
local map_point="$1"
|
||||||
|
|
||||||
|
losetup -d "$map_point"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mounts a specific partition inside a given image file
|
||||||
|
image_mount_partition() {
|
||||||
|
local file="$1"
|
||||||
|
local part_num="$2"
|
||||||
|
local mount_point="$3"
|
||||||
|
local mount_opt="$4"
|
||||||
|
local offset="$(image_part_offset "$file" "$part_num")" ||
|
||||||
|
image_die "failed to find partition #$part_num from: $file"
|
||||||
|
local size="$(image_part_size "$file" "$part_num")" ||
|
||||||
|
image_die "failed to find partition #$part_num from: $file"
|
||||||
|
|
||||||
|
if [ -z "$mount_opt" ]; then
|
||||||
|
# by default, mount as read-only.
|
||||||
|
mount_opt=",ro"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mount \
|
||||||
|
-o "loop,offset=$((offset * 512)),sizelimit=$((size * 512)),$mount_opt" \
|
||||||
|
"$file" \
|
||||||
|
"$mount_point"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Unmounts a partition mount point by mount_partition
|
||||||
|
image_umount_partition() {
|
||||||
|
local mount_point="$1"
|
||||||
|
|
||||||
|
umount -d "$mount_point"
|
||||||
|
}
|
||||||
|
@ -39,32 +39,33 @@ DEFINE_string subfolder "" \
|
|||||||
FLAGS "$@" || exit 1
|
FLAGS "$@" || exit 1
|
||||||
eval set -- "${FLAGS_ARGV}"
|
eval set -- "${FLAGS_ARGV}"
|
||||||
|
|
||||||
if [ ! -f "${FLAGS_release}" ] ; then
|
if [ ! -f "${FLAGS_release}" ]; then
|
||||||
echo "Cannot find image file ${FLAGS_release}"
|
echo "Cannot find image file ${FLAGS_release}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "${FLAGS_factory}" ] ; then
|
if [ ! -f "${FLAGS_factory}" ]; then
|
||||||
echo "Cannot find image file ${FLAGS_factory}"
|
echo "Cannot find image file ${FLAGS_factory}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z "${FLAGS_firmware_updater}" ] && \
|
if [ -n "${FLAGS_firmware_updater}" ] &&
|
||||||
[ ! -f "${FLAGS_firmware_updater}" ] ; then
|
[ ! -f "${FLAGS_firmware_updater}" ]; then
|
||||||
echo "Cannot find firmware file ${FLAGS_firmware_updater}"
|
echo "Cannot find firmware file ${FLAGS_firmware_updater}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Convert args to paths. Need eval to un-quote the string so that shell
|
# Convert args to paths. Need eval to un-quote the string so that shell
|
||||||
# chars like ~ are processed; just doing FOO=`readlink -f ${FOO}` won't work.
|
# chars like ~ are processed; just doing FOO=`readlink -f ${FOO}` won't work.
|
||||||
OMAHA_DIR=${SRC_ROOT}/platform/dev
|
OMAHA_DIR="${SRC_ROOT}/platform/dev"
|
||||||
OMAHA_DATA_DIR=${OMAHA_DIR}/static/
|
OMAHA_CONF="${OMAHA_DIR}/miniomaha.conf"
|
||||||
|
OMAHA_DATA_DIR="${OMAHA_DIR}/static/"
|
||||||
|
|
||||||
# Note: The subfolder flag can only append configs. That means you will need
|
# Note: The subfolder flag can only append configs. That means you will need
|
||||||
# to have unique board IDs for every time you run. If you delete miniomaha.conf
|
# to have unique board IDs for every time you run. If you delete miniomaha.conf
|
||||||
# you can still use this flag and it will start fresh.
|
# you can still use this flag and it will start fresh.
|
||||||
if [ -n "${FLAGS_subfolder}" ] ; then
|
if [ -n "${FLAGS_subfolder}" ]; then
|
||||||
OMAHA_DATA_DIR=${OMAHA_DIR}/static/${FLAGS_subfolder}/
|
OMAHA_DATA_DIR="${OMAHA_DIR}/static/${FLAGS_subfolder}/"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ${INSIDE_CHROOT} -eq 0 ]; then
|
if [ ${INSIDE_CHROOT} -eq 0 ]; then
|
||||||
@ -74,20 +75,20 @@ if [ ${INSIDE_CHROOT} -eq 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Use this image as the source image to copy
|
# Use this image as the source image to copy
|
||||||
RELEASE_DIR=`dirname ${FLAGS_release}`
|
RELEASE_DIR="$(dirname "${FLAGS_release}")"
|
||||||
FACTORY_DIR=`dirname ${FLAGS_factory}`
|
FACTORY_DIR="$(dirname "${FLAGS_factory}")"
|
||||||
RELEASE_IMAGE=`basename ${FLAGS_release}`
|
RELEASE_IMAGE="$(basename "${FLAGS_release}")"
|
||||||
FACTORY_IMAGE=`basename ${FLAGS_factory}`
|
FACTORY_IMAGE="$(basename "${FLAGS_factory}")"
|
||||||
|
|
||||||
|
|
||||||
prepare_omaha() {
|
prepare_omaha() {
|
||||||
sudo rm -rf ${OMAHA_DATA_DIR}/rootfs-test.gz
|
sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-test.gz"
|
||||||
sudo rm -rf ${OMAHA_DATA_DIR}/rootfs-release.gz
|
sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-release.gz"
|
||||||
rm -rf ${OMAHA_DATA_DIR}/efi.gz
|
rm -rf "${OMAHA_DATA_DIR}/efi.gz"
|
||||||
rm -rf ${OMAHA_DATA_DIR}/oem.gz
|
rm -rf "${OMAHA_DATA_DIR}/oem.gz"
|
||||||
rm -rf ${OMAHA_DATA_DIR}/state.gz
|
rm -rf "${OMAHA_DATA_DIR}/state.gz"
|
||||||
if [ ! -f "${OMAHA_DATA_DIR}" ] ; then
|
if [ ! -d "${OMAHA_DATA_DIR}" ]; then
|
||||||
mkdir -p ${OMAHA_DATA_DIR}
|
mkdir -p "${OMAHA_DATA_DIR}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,12 +103,12 @@ prepare_dir() {
|
|||||||
compress_and_hash_memento_image() {
|
compress_and_hash_memento_image() {
|
||||||
local input_file="$1"
|
local input_file="$1"
|
||||||
|
|
||||||
if has_part_tools; then
|
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
|
||||||
sudo "${SCRIPTS_DIR}/mk_memento_images.sh" "$input_file" 2 3 |
|
sudo "${SCRIPTS_DIR}/mk_memento_images.sh" part_2 part_3 |
|
||||||
grep hash |
|
grep hash |
|
||||||
awk '{print $4}'
|
awk '{print $4}'
|
||||||
else
|
else
|
||||||
sudo "${SCRIPTS_DIR}/mk_memento_images.sh" part_2 part_3 |
|
sudo "${SCRIPTS_DIR}/mk_memento_images.sh" "$input_file" 2 3 |
|
||||||
grep hash |
|
grep hash |
|
||||||
awk '{print $4}'
|
awk '{print $4}'
|
||||||
fi
|
fi
|
||||||
@ -119,12 +120,12 @@ compress_and_hash_file() {
|
|||||||
|
|
||||||
if [ -z "$input_file" ]; then
|
if [ -z "$input_file" ]; then
|
||||||
# Runs as a pipe processor
|
# Runs as a pipe processor
|
||||||
gzip_compress -c -9 |
|
image_gzip_compress -c -9 |
|
||||||
tee "$output_file" |
|
tee "$output_file" |
|
||||||
openssl sha1 -binary |
|
openssl sha1 -binary |
|
||||||
openssl base64
|
openssl base64
|
||||||
else
|
else
|
||||||
gzip_compress -c -9 "$input_file" |
|
image_gzip_compress -c -9 "$input_file" |
|
||||||
tee "$output_file" |
|
tee "$output_file" |
|
||||||
openssl sha1 -binary |
|
openssl sha1 -binary |
|
||||||
openssl base64
|
openssl base64
|
||||||
@ -136,30 +137,38 @@ compress_and_hash_partition() {
|
|||||||
local part_num="$2"
|
local part_num="$2"
|
||||||
local output_file="$3"
|
local output_file="$3"
|
||||||
|
|
||||||
if has_part_tools; then
|
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
|
||||||
dump_partition "$input_file" "$part_num" |
|
|
||||||
compress_and_hash_file "" "$output_file"
|
|
||||||
else
|
|
||||||
compress_and_hash_file "part_$part_num" "$output_file"
|
compress_and_hash_file "part_$part_num" "$output_file"
|
||||||
|
else
|
||||||
|
image_dump_partition "$input_file" "$part_num" |
|
||||||
|
compress_and_hash_file "" "$output_file"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Clean up stale config and data files.
|
# Clean up stale config and data files.
|
||||||
prepare_omaha
|
prepare_omaha
|
||||||
|
|
||||||
|
# Decide if we should unpack partition
|
||||||
|
if image_has_part_tools; then
|
||||||
|
IMAGE_IS_UNPACKED=
|
||||||
|
else
|
||||||
|
#TODO(hungte) Currently we run unpack_partitions.sh if part_tools are not
|
||||||
|
# found. If the format of unpack_partitions.sh is reliable, we can prevent
|
||||||
|
# creating temporary files. See image_part_offset for more information.
|
||||||
|
echo "WARNING: cannot find partition tools. Using unpack_partitions.sh." >&2
|
||||||
|
IMAGE_IS_UNPACKED=1
|
||||||
|
fi
|
||||||
|
|
||||||
# Get the release image.
|
# Get the release image.
|
||||||
pushd ${RELEASE_DIR} > /dev/null
|
pushd "${RELEASE_DIR}" >/dev/null
|
||||||
echo "Generating omaha release image from ${FLAGS_release}"
|
echo "Generating omaha release image from ${FLAGS_release}"
|
||||||
echo "Generating omaha factory image from ${FLAGS_factory}"
|
echo "Generating omaha factory image from ${FLAGS_factory}"
|
||||||
echo "Output omaha image to ${OMAHA_DATA_DIR}"
|
echo "Output omaha image to ${OMAHA_DATA_DIR}"
|
||||||
echo "Output omaha config to ${OMAHA_DIR}/miniomaha.conf"
|
echo "Output omaha config to ${OMAHA_CONF}"
|
||||||
|
|
||||||
prepare_dir
|
prepare_dir
|
||||||
|
|
||||||
if ! has_part_tools; then
|
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
|
||||||
#TODO(hungte) we can still avoid running unpack_partitions.sh
|
|
||||||
# by $(cat unpack_partitions.sh | grep Label | sed "s/#//" | grep ${name}" |
|
|
||||||
# awk '{ print $1}') to fetch offset/size.
|
|
||||||
echo "Unpacking image ${RELEASE_IMAGE} ..." >&2
|
echo "Unpacking image ${RELEASE_IMAGE} ..." >&2
|
||||||
sudo ./unpack_partitions.sh "${RELEASE_IMAGE}" 2>/dev/null
|
sudo ./unpack_partitions.sh "${RELEASE_IMAGE}" 2>/dev/null
|
||||||
fi
|
fi
|
||||||
@ -167,24 +176,24 @@ fi
|
|||||||
release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")"
|
release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")"
|
||||||
sudo chmod a+rw update.gz
|
sudo chmod a+rw update.gz
|
||||||
mv update.gz rootfs-release.gz
|
mv update.gz rootfs-release.gz
|
||||||
mv rootfs-release.gz ${OMAHA_DATA_DIR}
|
mv rootfs-release.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "release: ${release_hash}"
|
echo "release: ${release_hash}"
|
||||||
|
|
||||||
oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")"
|
oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")"
|
||||||
mv oem.gz ${OMAHA_DATA_DIR}
|
mv oem.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "oem: ${oem_hash}"
|
echo "oem: ${oem_hash}"
|
||||||
|
|
||||||
efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")"
|
efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")"
|
||||||
mv efi.gz ${OMAHA_DATA_DIR}
|
mv efi.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "efi: ${efi_hash}"
|
echo "efi: ${efi_hash}"
|
||||||
|
|
||||||
popd > /dev/null
|
popd >/dev/null
|
||||||
|
|
||||||
# Go to retrieve the factory test image.
|
# Go to retrieve the factory test image.
|
||||||
pushd ${FACTORY_DIR} > /dev/null
|
pushd "${FACTORY_DIR}" >/dev/null
|
||||||
prepare_dir
|
prepare_dir
|
||||||
|
|
||||||
if ! has_part_tools; then
|
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
|
||||||
echo "Unpacking image ${FACTORY_IMAGE} ..." >&2
|
echo "Unpacking image ${FACTORY_IMAGE} ..." >&2
|
||||||
sudo ./unpack_partitions.sh "${FACTORY_IMAGE}" 2>/dev/null
|
sudo ./unpack_partitions.sh "${FACTORY_IMAGE}" 2>/dev/null
|
||||||
fi
|
fi
|
||||||
@ -192,16 +201,16 @@ fi
|
|||||||
test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")"
|
test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")"
|
||||||
sudo chmod a+rw update.gz
|
sudo chmod a+rw update.gz
|
||||||
mv update.gz rootfs-test.gz
|
mv update.gz rootfs-test.gz
|
||||||
mv rootfs-test.gz ${OMAHA_DATA_DIR}
|
mv rootfs-test.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "test: ${test_hash}"
|
echo "test: ${test_hash}"
|
||||||
|
|
||||||
state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")"
|
state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")"
|
||||||
mv state.gz ${OMAHA_DATA_DIR}
|
mv state.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "state: ${state_hash}"
|
echo "state: ${state_hash}"
|
||||||
|
|
||||||
popd > /dev/null
|
popd >/dev/null
|
||||||
|
|
||||||
if [ ! -z ${FLAGS_firmware_updater} ] ; then
|
if [ -n "${FLAGS_firmware_updater}" ]; then
|
||||||
SHELLBALL="${FLAGS_firmware_updater}"
|
SHELLBALL="${FLAGS_firmware_updater}"
|
||||||
if [ ! -f "$SHELLBALL" ]; then
|
if [ ! -f "$SHELLBALL" ]; then
|
||||||
echo "Failed to find firmware updater: $SHELLBALL."
|
echo "Failed to find firmware updater: $SHELLBALL."
|
||||||
@ -209,70 +218,70 @@ if [ ! -z ${FLAGS_firmware_updater} ] ; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
firmware_hash="$(compress_and_hash_file "$SHELLBALL" "firmware.gz")"
|
firmware_hash="$(compress_and_hash_file "$SHELLBALL" "firmware.gz")"
|
||||||
mv firmware.gz ${OMAHA_DATA_DIR}
|
mv firmware.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "firmware: ${firmware_hash}"
|
echo "firmware: ${firmware_hash}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If the file does exist and we are using the subfolder flag we are going to
|
# If the file does exist and we are using the subfolder flag we are going to
|
||||||
# append another config.
|
# append another config.
|
||||||
if [ -n "${FLAGS_subfolder}" ] && \
|
if [ -n "${FLAGS_subfolder}" ] &&
|
||||||
[ -f "${OMAHA_DIR}"/miniomaha.conf"" ] ; then
|
[ -f "${OMAHA_CONF}" ]; then
|
||||||
# Remove the ']' from the last line of the file so we can add another config.
|
# Remove the ']' from the last line of the file so we can add another config.
|
||||||
while [ -s "${OMAHA_DIR}/miniomaha.conf" ]; do
|
while [ -s "${OMAHA_CONF}" ]; do
|
||||||
# If the last line is null
|
# If the last line is null
|
||||||
if [ -z "$(tail -1 "${OMAHA_DIR}/miniomaha.conf")" ]; then
|
if [ -z "$(tail -1 "${OMAHA_CONF}")" ]; then
|
||||||
sed -i '$d' "${OMAHA_DIR}/miniomaha.conf"
|
sed -i '$d' "${OMAHA_CONF}"
|
||||||
elif [ "$(tail -1 "${OMAHA_DIR}/miniomaha.conf")" != ']' ]; then
|
elif [ "$(tail -1 "${OMAHA_CONF}")" != ']' ]; then
|
||||||
sed -i '$d' "${OMAHA_DIR}/miniomaha.conf"
|
sed -i '$d' "${OMAHA_CONF}"
|
||||||
else
|
else
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Remove the last ]
|
# Remove the last ]
|
||||||
if [ "$(tail -1 "${OMAHA_DIR}/miniomaha.conf")" = ']' ]; then
|
if [ "$(tail -1 "${OMAHA_CONF}")" = ']' ]; then
|
||||||
sed -i '$d' "${OMAHA_DIR}/miniomaha.conf"
|
sed -i '$d' "${OMAHA_CONF}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If the file is empty, create it from scratch
|
# If the file is empty, create it from scratch
|
||||||
if [ ! -s "${OMAHA_DIR}/miniomaha.conf" ]; then
|
if [ ! -s "${OMAHA_CONF}" ]; then
|
||||||
echo "config = [" > "${OMAHA_DIR}/miniomaha.conf"
|
echo "config = [" >"${OMAHA_CONF}"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "config = [" > "${OMAHA_DIR}/miniomaha.conf"
|
echo "config = [" >"${OMAHA_CONF}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${FLAGS_subfolder}" ] ; then
|
if [ -n "${FLAGS_subfolder}" ]; then
|
||||||
subfolder="${FLAGS_subfolder}/"
|
subfolder="${FLAGS_subfolder}/"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -n "{
|
echo -n "{
|
||||||
'qual_ids': set([\"${FLAGS_board}\"]),
|
'qual_ids': set([\"${FLAGS_board}\"]),
|
||||||
'factory_image': '"${subfolder}"rootfs-test.gz',
|
'factory_image': '${subfolder}rootfs-test.gz',
|
||||||
'factory_checksum': '${test_hash}',
|
'factory_checksum': '${test_hash}',
|
||||||
'release_image': '"${subfolder}"rootfs-release.gz',
|
'release_image': '${subfolder}rootfs-release.gz',
|
||||||
'release_checksum': '${release_hash}',
|
'release_checksum': '${release_hash}',
|
||||||
'oempartitionimg_image': '"${subfolder}"oem.gz',
|
'oempartitionimg_image': '${subfolder}oem.gz',
|
||||||
'oempartitionimg_checksum': '${oem_hash}',
|
'oempartitionimg_checksum': '${oem_hash}',
|
||||||
'efipartitionimg_image': '"${subfolder}"efi.gz',
|
'efipartitionimg_image': '${subfolder}efi.gz',
|
||||||
'efipartitionimg_checksum': '${efi_hash}',
|
'efipartitionimg_checksum': '${efi_hash}',
|
||||||
'stateimg_image': '"${subfolder}"state.gz',
|
'stateimg_image': '${subfolder}state.gz',
|
||||||
'stateimg_checksum': '${state_hash}'," >> ${OMAHA_DIR}/miniomaha.conf
|
'stateimg_checksum': '${state_hash}'," >>"${OMAHA_CONF}"
|
||||||
|
|
||||||
if [ ! -z "${FLAGS_firmware_updater}" ] ; then
|
if [ -n "${FLAGS_firmware_updater}" ] ; then
|
||||||
echo -n "
|
echo -n "
|
||||||
'firmware_image': '"${subfolder}"firmware.gz',
|
'firmware_image': '${subfolder}firmware.gz',
|
||||||
'firmware_checksum': '${firmware_hash}'," >> ${OMAHA_DIR}/miniomaha.conf
|
'firmware_checksum': '${firmware_hash}'," >>"${OMAHA_CONF}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -n "
|
echo -n "
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
" >> ${OMAHA_DIR}/miniomaha.conf
|
" >>"${OMAHA_CONF}"
|
||||||
|
|
||||||
echo "The miniomaha server lives in src/platform/dev"
|
echo "The miniomaha server lives in src/platform/dev.
|
||||||
echo "to validate the configutarion, run:"
|
To validate the configutarion, run:
|
||||||
echo " python2.6 devserver.py --factory_config miniomaha.conf \
|
python2.6 devserver.py --factory_config miniomaha.conf \
|
||||||
--validate_factory_config"
|
--validate_factory_config
|
||||||
echo "To run the server:"
|
To run the server:
|
||||||
echo " python2.6 devserver.py --factory_config miniomaha.conf"
|
python2.6 devserver.py --factory_config miniomaha.conf"
|
||||||
|
@ -28,13 +28,13 @@ if [ "$CROS_GENERATE_UPDATE_PAYLOAD_CALLED" != "1" ]; then
|
|||||||
echo " Please run that script with --help to see how to use it."
|
echo " Please run that script with --help to see how to use it."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! has_command pigz; then
|
if ! image_has_command pigz; then
|
||||||
(echo "WARNING:"
|
(echo "WARNING:"
|
||||||
echo " Your system does not have pigz (parallel gzip) installed."
|
echo " Your system does not have pigz (parallel gzip) installed."
|
||||||
echo " COMPRESSING WILL BE VERY SLOW. It is recommended to install pigz"
|
echo " COMPRESSING WILL BE VERY SLOW. It is recommended to install pigz"
|
||||||
if has_command apt-get; then
|
if image_has_command apt-get; then
|
||||||
echo " by 'sudo apt-get install pigz'."
|
echo " by 'sudo apt-get install pigz'."
|
||||||
elif has_command emerge; then
|
elif image_has_command emerge; then
|
||||||
echo " by 'sudo emerge pigz'."
|
echo " by 'sudo emerge pigz'."
|
||||||
fi) >&2
|
fi) >&2
|
||||||
fi
|
fi
|
||||||
@ -58,14 +58,14 @@ else
|
|||||||
# chromiumos_img kern_part_no rootfs_part_no
|
# chromiumos_img kern_part_no rootfs_part_no
|
||||||
KPART="$1"
|
KPART="$1"
|
||||||
ROOT_PART="$1"
|
ROOT_PART="$1"
|
||||||
KPART_OFFSET="$(part_offset "$KPART" "$2")" ||
|
KPART_OFFSET="$(image_part_offset "$KPART" "$2")" ||
|
||||||
err_die "cannot retieve kernel partition offset"
|
image_die "cannot retieve kernel partition offset"
|
||||||
KPART_SECTORS="$(part_size "$KPART" "$2")" ||
|
KPART_SECTORS="$(image_part_size "$KPART" "$2")" ||
|
||||||
err_die "cannot retieve kernel partition size"
|
image_die "cannot retieve kernel partition size"
|
||||||
ROOT_OFFSET="$(part_offset "$ROOT_PART" "$3")" ||
|
ROOT_OFFSET="$(image_part_offset "$ROOT_PART" "$3")" ||
|
||||||
err_die "cannot retieve root partition offset"
|
image_die "cannot retieve root partition offset"
|
||||||
ROOT_SECTORS="$(part_size "$ROOT_PART" "$3")" ||
|
ROOT_SECTORS="$(image_part_size "$ROOT_PART" "$3")" ||
|
||||||
err_die "cannot retieve root partition size"
|
image_die "cannot retieve root partition size"
|
||||||
KPART_SIZE=$((KPART_SECTORS * 512))
|
KPART_SIZE=$((KPART_SECTORS * 512))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -91,10 +91,10 @@ KPART_SIZE_SIGNATURE="$(printf "%016x" "$KPART_SIZE" |
|
|||||||
CS_AND_RET_CODES="$(
|
CS_AND_RET_CODES="$(
|
||||||
(echo -en "$KPART_SIZE_SIGNATURE"
|
(echo -en "$KPART_SIZE_SIGNATURE"
|
||||||
echo "Compressing kernel..." >&2
|
echo "Compressing kernel..." >&2
|
||||||
dump_partial_file "$KPART" "$KPART_OFFSET" "$KPART_SECTORS"
|
image_dump_partial_file "$KPART" "$KPART_OFFSET" "$KPART_SECTORS"
|
||||||
echo "Compressing rootfs..." >&2
|
echo "Compressing rootfs..." >&2
|
||||||
dump_partial_file "$ROOT_PART" "$ROOT_OFFSET" "$ROOT_SECTORS") |
|
image_dump_partial_file "$ROOT_PART" "$ROOT_OFFSET" "$ROOT_SECTORS") |
|
||||||
gzip_compress -9 -c |
|
image_gzip_compress -c -9 |
|
||||||
tee "$FINAL_OUT_FILE" |
|
tee "$FINAL_OUT_FILE" |
|
||||||
openssl sha1 -binary |
|
openssl sha1 -binary |
|
||||||
openssl base64 |
|
openssl base64 |
|
||||||
|
@ -6,36 +6,26 @@
|
|||||||
|
|
||||||
echo "Applying patch to init scripts."
|
echo "Applying patch to init scripts."
|
||||||
|
|
||||||
touch ${ROOT_FS_DIR}/root/.factory_test
|
touch "${ROOT_FS_DIR}/root/.factory_test"
|
||||||
patch -d ${ROOT_FS_DIR} -Np1 <<EOF
|
|
||||||
|
patch -d "${ROOT_FS_DIR}" -Np1 <<EOF
|
||||||
--- old/etc/init/ui.conf 2010-04-28 21:28:38.886069000 -0700
|
--- old/etc/init/ui.conf 2010-04-28 21:28:38.886069000 -0700
|
||||||
+++ new/etc/init/ui.conf 2010-04-28 21:29:42.676163000 -0700
|
+++ new/etc/init/ui.conf 2010-04-28 21:29:42.676163000 -0700
|
||||||
@@ -5,7 +5,7 @@
|
@@ -11 +11 @@
|
||||||
description "Chrome OS user interface"
|
|
||||||
author "chromium-os-dev@googlegroups.com"
|
|
||||||
|
|
||||||
-start on stopping startup
|
-start on stopping startup
|
||||||
+start on never
|
+start on never
|
||||||
stop on starting halt or starting reboot
|
|
||||||
|
|
||||||
respawn
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
patch -d ${ROOT_FS_DIR} -Np1 <<EOF
|
patch -d "${ROOT_FS_DIR}" -Np1 <<EOF
|
||||||
diff -Naur old/etc/init/dump-boot-stats.conf new/etc/init/dump-boot-stats.conf
|
diff -Naur old/etc/init/boot-complete.conf new/etc/init/boot-complete.conf
|
||||||
--- old/etc/init/boot-complete.conf 2010-07-21 11:22:30.000000000 +0800
|
--- old/etc/init/boot-complete.conf 2010-07-21 11:22:30.000000000 +0800
|
||||||
+++ new/etc/init/boot-complete.conf 2010-07-21 22:13:36.000000000 +0800
|
+++ new/etc/init/boot-complete.conf 2010-07-21 22:13:36.000000000 +0800
|
||||||
@@ -7,7 +7,7 @@
|
@@ -7 +7 @@
|
||||||
# Processing required during boot after login prompt is displayed.
|
|
||||||
# Primarily, this means calculating boot time statistics.
|
|
||||||
|
|
||||||
-start on login-prompt-ready
|
-start on login-prompt-ready
|
||||||
+start on stopping startup
|
+start on started udev
|
||||||
|
|
||||||
task
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
cat > ${ROOT_FS_DIR}/etc/init/factory.conf <<EOF
|
cat >"${ROOT_FS_DIR}/etc/init/factory.conf" <<EOF
|
||||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
# Use of this source code is governed by a BSD-style license that can be
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
# found in the LICENSE file.
|
# found in the LICENSE file.
|
||||||
@ -43,24 +33,24 @@ cat > ${ROOT_FS_DIR}/etc/init/factory.conf <<EOF
|
|||||||
description "Chrome OS factory startup stub"
|
description "Chrome OS factory startup stub"
|
||||||
author "chromium-os-dev@googlegroups.com"
|
author "chromium-os-dev@googlegroups.com"
|
||||||
|
|
||||||
start on started udev
|
start on stopped udev-addon
|
||||||
stop on starting halt or starting reboot
|
stop on starting halt or starting reboot
|
||||||
|
|
||||||
script
|
script
|
||||||
cd /usr/local/autotest
|
cd /usr/local/autotest
|
||||||
eval \$(./site_tests/suite_Factory/startx.sh)
|
eval \$(./site_tests/suite_Factory/startx.sh)
|
||||||
date >> /var/log/factory.log
|
date >>/var/log/factory.log
|
||||||
if [ ! -e factory_started ]; then
|
if [ ! -e factory_started ]; then
|
||||||
touch factory_started
|
touch factory_started
|
||||||
cp -f site_tests/suite_Factory/control .
|
cp -f site_tests/suite_Factory/control .
|
||||||
./bin/autotest control >> /var/log/factory.log 2>&1
|
./bin/autotest control >>/var/log/factory.log 2>&1
|
||||||
else
|
else
|
||||||
./tools/autotest >> /var/log/factory.log 2>&1
|
./tools/autotest >>/var/log/factory.log 2>&1
|
||||||
fi
|
fi
|
||||||
end script
|
end script
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
cat > ${ROOT_FS_DIR}/etc/init/factorylog.conf <<EOF
|
cat >"${ROOT_FS_DIR}/etc/init/factorylog.conf" <<EOF
|
||||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
# Use of this source code is governed by a BSD-style license that can be
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
# found in the LICENSE file.
|
# found in the LICENSE file.
|
||||||
@ -70,21 +60,33 @@ stop on starting halt or starting reboot
|
|||||||
|
|
||||||
respawn
|
respawn
|
||||||
script
|
script
|
||||||
tail -n 48 -F /var/log/factory.log > /dev/tty3
|
tail -n 48 -F /var/log/factory.log >/dev/tty3
|
||||||
end script
|
end script
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
patch -d ${ROOT_FS_DIR} -Np1 <<EOF
|
if [ -r "${ROOT_FS_DIR}/etc/init/chrontel.conf" ]; then
|
||||||
|
patch -d "${ROOT_FS_DIR}" -Np1 <<EOF
|
||||||
diff -Nau old/etc/init/chrontel.conf new/etc/init/
|
diff -Nau old/etc/init/chrontel.conf new/etc/init/
|
||||||
--- old/etc/init/chrontel.conf 2010-08-05 16:32:55.000000000 -0700
|
--- old/etc/init/chrontel.conf 2010-08-05 16:32:55.000000000 -0700
|
||||||
+++ new/etc/init/chrontel.conf 2010-08-05 16:32:45.000000000 -0700
|
+++ new/etc/init/chrontel.conf 2010-08-05 16:32:45.000000000 -0700
|
||||||
@@ -7,7 +7,7 @@
|
@@ -7 +7 @@
|
||||||
|
|
||||||
# start as early as possible to allow login screen on hdmi
|
|
||||||
# Need udev to have connected the i2c before we can use it
|
|
||||||
-start on started udev
|
-start on started udev
|
||||||
+start on never
|
+start on never
|
||||||
stop on starting halt or starting reboot
|
|
||||||
|
|
||||||
# sadly, these can't reference each other.
|
|
||||||
EOF
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -r "${ROOT_FS_DIR}/etc/init/nvrm.conf" ]; then
|
||||||
|
patch -d "${ROOT_FS_DIR}" -Np1 << EOF
|
||||||
|
--- old/etc/init/nvrm.conf 2010-11-19 14:57:16.000000000 -0800
|
||||||
|
+++ new/etc/init/nvrm.conf 2010-11-19 16:52:48.000000000 -0800
|
||||||
|
@@ -2,7 +2,7 @@
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
-start on starting ui
|
||||||
|
+start on starting factory
|
||||||
|
|
||||||
|
respawn
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
@ -6,4 +6,4 @@
|
|||||||
|
|
||||||
echo "Modifying Release Description for Factory."
|
echo "Modifying Release Description for Factory."
|
||||||
FILE="${ROOT_FS_DIR}/etc/lsb-release"
|
FILE="${ROOT_FS_DIR}/etc/lsb-release"
|
||||||
sed -i 's/Test/Factory/' $FILE
|
sed -i 's/Test/Factory/' "${FILE}"
|
||||||
|
@ -13,7 +13,7 @@ if [ -f "${GLOBAL_CONFIG}" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat > "${GLOBAL_CONFIG}" <<EOF
|
cat >"${GLOBAL_CONFIG}" <<EOF
|
||||||
[CLIENT]
|
[CLIENT]
|
||||||
drop_caches: False
|
drop_caches: False
|
||||||
drop_caches_between_iterations: False
|
drop_caches_between_iterations: False
|
||||||
|
@ -6,16 +6,18 @@
|
|||||||
|
|
||||||
TEST_DIR="${ROOT_FS_DIR}/usr/local/autotest/site_tests/hardware_Components"
|
TEST_DIR="${ROOT_FS_DIR}/usr/local/autotest/site_tests/hardware_Components"
|
||||||
|
|
||||||
pushd ${TEST_DIR} 1> /dev/null
|
if [ -d "${TEST_DIR}" ]; then
|
||||||
|
pushd "${TEST_DIR}" >/dev/null
|
||||||
|
|
||||||
# Remove the DB directories belonging to other boards.
|
# Remove the DB directories belonging to other boards.
|
||||||
KEEPDB="data_${BOARD}"
|
KEEPDB="data_${BOARD}"
|
||||||
ls -d data_* 2> /dev/null | grep -v ${KEEPDB} | xargs rm -fr
|
ls -d data_* 2>/dev/null | grep -v "${KEEPDB}" | xargs rm -fr
|
||||||
|
|
||||||
# Ensure there is DB directory in x86-agz and x86-mario.
|
# Ensure there is DB directory in x86-agz and x86-mario.
|
||||||
if [ ! -d ${KEEPDB} -a \
|
if [ ! -d "${KEEPDB}" ] &&
|
||||||
\( "${BOARD}" = "x86-agz" -o "${BOARD}" = "x86-mario" \) ]; then
|
[ "${BOARD}" = "x86-agz" -o "${BOARD}" = "x86-mario" ]; then
|
||||||
echo "No component DB directory found at: ${KEEPDB}"
|
echo "No component DB directory found at: ${KEEPDB}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
popd 1> /dev/null
|
popd >/dev/null
|
||||||
|
fi
|
||||||
|
24
mod_for_factory_scripts/600customizeRelease
Executable file
24
mod_for_factory_scripts/600customizeRelease
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
TEST_DIR="${ROOT_FS_DIR}/usr/local/autotest/site_tests/suite_Factory"
|
||||||
|
|
||||||
|
if [ -d "${TEST_DIR}" ]; then
|
||||||
|
pushd "${TEST_DIR}" >/dev/null
|
||||||
|
|
||||||
|
# If there is a customize_$BOARD script for this board, let's run it.
|
||||||
|
# This allows adding settings for specific factories or systems.
|
||||||
|
CUSTOMIZE="customize_${BOARD}"
|
||||||
|
if [ -e "${CUSTOMIZE}" ]; then
|
||||||
|
echo "Running ${CUSTOMIZE}"
|
||||||
|
"./${CUSTOMIZE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# We don't need the customize script anymore.
|
||||||
|
rm -f customize_*
|
||||||
|
|
||||||
|
popd >/dev/null
|
||||||
|
fi
|
@ -5,9 +5,11 @@
|
|||||||
# found in the LICENSE file.
|
# found in the LICENSE file.
|
||||||
|
|
||||||
echo "Modifying image for factory test..."
|
echo "Modifying image for factory test..."
|
||||||
|
set -e
|
||||||
|
|
||||||
for SCRIPT in \
|
SCRIPT_BASE="${GCLIENT_ROOT}/src/scripts/mod_for_factory_scripts/"
|
||||||
${GCLIENT_ROOT}/src/scripts/mod_for_factory_scripts/[0-9][0-9][0-9]*[!$~]
|
for SCRIPT in "${SCRIPT_BASE}"[0-9][0-9][0-9]*[!$~]
|
||||||
do
|
do
|
||||||
${SCRIPT}
|
echo "Apply $(basename "${SCRIPT}")..."
|
||||||
|
bash -e "${SCRIPT}"
|
||||||
done
|
done
|
||||||
|
@ -284,6 +284,10 @@ install_recovery_kernel() {
|
|||||||
count=$kern_a_size \
|
count=$kern_a_size \
|
||||||
conv=notrunc
|
conv=notrunc
|
||||||
|
|
||||||
|
# Set the 'Success' flag to 1 (to prevent the firmware from updating
|
||||||
|
# the 'Tries' flag).
|
||||||
|
sudo $GPT add -i 2 -S 1 "$RECOVERY_IMAGE"
|
||||||
|
|
||||||
# Repeat for the legacy bioses.
|
# Repeat for the legacy bioses.
|
||||||
# Replace vmlinuz.A with the recovery version
|
# Replace vmlinuz.A with the recovery version
|
||||||
local sysroot="${FLAGS_build_root}/${FLAGS_board}"
|
local sysroot="${FLAGS_build_root}/${FLAGS_board}"
|
||||||
|
@ -144,6 +144,7 @@ install_autotest() {
|
|||||||
--exclude=site_tests/platform_StackProtector \
|
--exclude=site_tests/platform_StackProtector \
|
||||||
--exclude=deps/chrome_test \
|
--exclude=deps/chrome_test \
|
||||||
--exclude=site_tests/desktopui_BrowserTest \
|
--exclude=site_tests/desktopui_BrowserTest \
|
||||||
|
--exclude=site_tests/desktopui_PageCyclerTests \
|
||||||
--exclude=site_tests/desktopui_UITest \
|
--exclude=site_tests/desktopui_UITest \
|
||||||
--exclude=.svn \
|
--exclude=.svn \
|
||||||
${AUTOTEST_SRC}/client/* "${stateful_root}/${autotest_client}"
|
${AUTOTEST_SRC}/client/* "${stateful_root}/${autotest_client}"
|
||||||
|
@ -20,6 +20,8 @@ DEFINE_string board "$DEFAULT_BOARD" \
|
|||||||
"The board for which the image was built." b
|
"The board for which the image was built." b
|
||||||
DEFINE_boolean read_only $FLAGS_FALSE \
|
DEFINE_boolean read_only $FLAGS_FALSE \
|
||||||
"Mount in read only mode -- skips stateful items."
|
"Mount in read only mode -- skips stateful items."
|
||||||
|
DEFINE_boolean safe $FLAGS_FALSE \
|
||||||
|
"Mount rootfs in read only mode."
|
||||||
DEFINE_boolean unmount $FLAGS_FALSE \
|
DEFINE_boolean unmount $FLAGS_FALSE \
|
||||||
"Unmount previously mounted dir." u
|
"Unmount previously mounted dir." u
|
||||||
DEFINE_string from "/dev/sdc" \
|
DEFINE_string from "/dev/sdc" \
|
||||||
@ -64,9 +66,12 @@ function unmount_image() {
|
|||||||
|
|
||||||
function get_usb_partitions() {
|
function get_usb_partitions() {
|
||||||
local ro_flag=""
|
local ro_flag=""
|
||||||
|
local safe_flag=""
|
||||||
[ ${FLAGS_read_only} -eq ${FLAGS_TRUE} ] && ro_flag="-o ro"
|
[ ${FLAGS_read_only} -eq ${FLAGS_TRUE} ] && ro_flag="-o ro"
|
||||||
|
[ ${FLAGS_read_only} -eq ${FLAGS_TRUE} -o \
|
||||||
|
${FLAGS_safe} -eq ${FLAGS_TRUE} ] && safe_flag="-o ro -t ext2"
|
||||||
|
|
||||||
sudo mount ${ro_flag} "${FLAGS_from}3" "${FLAGS_rootfs_mountpt}"
|
sudo mount ${safe_flag} "${FLAGS_from}3" "${FLAGS_rootfs_mountpt}"
|
||||||
sudo mount ${ro_flag} "${FLAGS_from}1" "${FLAGS_stateful_mountpt}"
|
sudo mount ${ro_flag} "${FLAGS_from}1" "${FLAGS_stateful_mountpt}"
|
||||||
if [[ -n "${FLAGS_esp_mountpt}" ]]; then
|
if [[ -n "${FLAGS_esp_mountpt}" ]]; then
|
||||||
sudo mount ${ro_flag} "${FLAGS_from}12" "${FLAGS_esp_mountpt}"
|
sudo mount ${ro_flag} "${FLAGS_from}12" "${FLAGS_esp_mountpt}"
|
||||||
@ -79,8 +84,15 @@ function get_gpt_partitions() {
|
|||||||
# Mount the rootfs partition using a loopback device.
|
# Mount the rootfs partition using a loopback device.
|
||||||
local offset=$(partoffset "${FLAGS_from}/${filename}" 3)
|
local offset=$(partoffset "${FLAGS_from}/${filename}" 3)
|
||||||
local ro_flag=""
|
local ro_flag=""
|
||||||
|
local safe_flag=""
|
||||||
|
|
||||||
if [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} ]; then
|
if [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} ]; then
|
||||||
ro_flag="-o ro"
|
ro_flag="-o ro"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${FLAGS_read_only} -eq ${FLAGS_TRUE} -o \
|
||||||
|
${FLAGS_safe} -eq ${FLAGS_TRUE} ]; then
|
||||||
|
safe_flag="-o ro -t ext2"
|
||||||
else
|
else
|
||||||
# Make sure any callers can actually mount and modify the fs
|
# Make sure any callers can actually mount and modify the fs
|
||||||
# if desired.
|
# if desired.
|
||||||
@ -88,7 +100,7 @@ function get_gpt_partitions() {
|
|||||||
enable_rw_mount "${FLAGS_from}/${filename}" "$(( offset * 512 ))"
|
enable_rw_mount "${FLAGS_from}/${filename}" "$(( offset * 512 ))"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sudo mount ${ro_flag} -o loop,offset=$(( offset * 512 )) \
|
sudo mount ${safe_flag} -o loop,offset=$(( offset * 512 )) \
|
||||||
"${FLAGS_from}/${filename}" "${FLAGS_rootfs_mountpt}"
|
"${FLAGS_from}/${filename}" "${FLAGS_rootfs_mountpt}"
|
||||||
|
|
||||||
# Mount the stateful partition using a loopback device.
|
# Mount the stateful partition using a loopback device.
|
||||||
|
@ -271,25 +271,24 @@ function main() {
|
|||||||
info "Running chrome autotest ${control_file}"
|
info "Running chrome autotest ${control_file}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export AUTOSERV_TEST_ARGS="${FLAGS_args}"
|
local autoserv_test_args="${FLAGS_args}"
|
||||||
export AUTOSERV_ARGS="-m ${FLAGS_remote} \
|
if [ -n "${autoserv_test_args}" ]; then
|
||||||
--ssh-port ${FLAGS_ssh_port} \
|
autoserv_test_args="-a \"${autoserv_test_args}\""
|
||||||
|
fi
|
||||||
|
local autoserv_args="-m ${FLAGS_remote} --ssh-port ${FLAGS_ssh_port} \
|
||||||
${option} ${control_file} -r ${results_dir} ${verbose}"
|
${option} ${control_file} -r ${results_dir} ${verbose}"
|
||||||
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then
|
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ]; then
|
||||||
cat > "${TMP}/run_test.sh" <<EOF
|
cat > "${TMP}/run_test.sh" <<EOF
|
||||||
export AUTOSERV_TEST_ARGS="${AUTOSERV_TEST_ARGS}"
|
cd /build/${FLAGS_board}/usr/local/autotest
|
||||||
export AUTOSERV_ARGS="${AUTOSERV_ARGS}"
|
sudo chmod a+w ./server/{tests,site_tests}
|
||||||
cd /home/${USER}/trunk/src/scripts
|
echo ./server/autoserv ${autoserv_args} ${autoserv_test_args}
|
||||||
./autotest_run.sh --board "${FLAGS_board}"
|
./server/autoserv ${autoserv_args} ${autoserv_test_args}
|
||||||
EOF
|
EOF
|
||||||
chmod a+rx "${TMP}/run_test.sh"
|
chmod a+rx "${TMP}/run_test.sh"
|
||||||
${ENTER_CHROOT} ${TMP_INSIDE_CHROOT}/run_test.sh >&2
|
${ENTER_CHROOT} ${TMP_INSIDE_CHROOT}/run_test.sh >&2
|
||||||
else
|
else
|
||||||
cp "${BUILD_DIR}/environment" "${TMP}/run_test.sh"
|
cp "${BUILD_DIR}/environment" "${TMP}/run_test.sh"
|
||||||
GRAPHICS_BACKEND=${GRAPHICS_BACKEND:-OPENGL}
|
GRAPHICS_BACKEND=${GRAPHICS_BACKEND:-OPENGL}
|
||||||
if [ -n "${AUTOSERV_TEST_ARGS}" ]; then
|
|
||||||
AUTOSERV_TEST_ARGS="-a \"${AUTOSERV_TEST_ARGS}\""
|
|
||||||
fi
|
|
||||||
cat >> "${TMP}/run_test.sh" <<EOF
|
cat >> "${TMP}/run_test.sh" <<EOF
|
||||||
export GCLIENT_ROOT=/home/${USER}/trunk
|
export GCLIENT_ROOT=/home/${USER}/trunk
|
||||||
export GRAPHICS_BACKEND=${GRAPHICS_BACKEND}
|
export GRAPHICS_BACKEND=${GRAPHICS_BACKEND}
|
||||||
@ -297,7 +296,8 @@ export SSH_AUTH_SOCK=${SSH_AUTH_SOCK} TMPDIR=/tmp SSH_AGENT_PID=${SSH_AGENT_PID}
|
|||||||
export SYSROOT=/build/${FLAGS_board}
|
export SYSROOT=/build/${FLAGS_board}
|
||||||
tc-export CC CXX PKG_CONFIG
|
tc-export CC CXX PKG_CONFIG
|
||||||
cd ${INSIDE_BUILD_DIR}
|
cd ${INSIDE_BUILD_DIR}
|
||||||
./server/autoserv ${AUTOSERV_ARGS} ${AUTOSERV_TEST_ARGS}
|
echo ./server/autoserv ${autoserv_args} ${autoserv_test_args}
|
||||||
|
./server/autoserv ${autoserv_args} ${autoserv_test_args}
|
||||||
EOF
|
EOF
|
||||||
sudo cp "${TMP}/run_test.sh" "${BUILD_DIR}"
|
sudo cp "${TMP}/run_test.sh" "${BUILD_DIR}"
|
||||||
sudo chmod a+rx "${BUILD_DIR}/run_test.sh"
|
sudo chmod a+rx "${BUILD_DIR}/run_test.sh"
|
||||||
|
@ -1,2 +1 @@
|
|||||||
chromeos-base/pam_offline
|
chromeos-base/pam_offline
|
||||||
chromeos-base/update_engine
|
|
||||||
|
@ -180,6 +180,8 @@ if [[ "${FLAGS_arch}" = "x86" ]]; then
|
|||||||
if [[ ${FLAGS_install_syslinux} -eq ${FLAGS_TRUE} ]]; then
|
if [[ ${FLAGS_install_syslinux} -eq ${FLAGS_TRUE} ]]; then
|
||||||
sudo umount "${ESP_FS_DIR}"
|
sudo umount "${ESP_FS_DIR}"
|
||||||
sudo syslinux -d /syslinux "${ESP_DEV}"
|
sudo syslinux -d /syslinux "${ESP_DEV}"
|
||||||
|
# mount again for cleanup to free resource gracefully
|
||||||
|
sudo mount -o ro "${ESP_DEV}" "${ESP_FS_DIR}"
|
||||||
fi
|
fi
|
||||||
elif [[ "${FLAGS_arch}" = "arm" ]]; then
|
elif [[ "${FLAGS_arch}" = "arm" ]]; then
|
||||||
# Copy u-boot script to ESP partition
|
# Copy u-boot script to ESP partition
|
||||||
|
Loading…
Reference in New Issue
Block a user