Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils

This commit is contained in:
Scott Zawalski 2011-01-04 13:21:33 -08:00
commit 3dfb64fd55
18 changed files with 548 additions and 238 deletions

View File

@ -74,10 +74,10 @@ def RepoSync(buildroot, retries=_DEFAULT_RETRIES):
# The --trace option ensures that repo shows the output from git. This
# is needed so that the buildbot can kill us if git is not making
# progress.
RunCommand(['repo', '--trace', 'sync'], cwd=buildroot)
RunCommand(['repo', 'forall', '-c', 'git', 'config',
'url.ssh://git@gitrw.chromium.org:9222.insteadof',
'http://git.chromium.org/git'], cwd=buildroot)
RunCommand(['repo', '--trace', 'sync'], cwd=buildroot)
retries = 0
except:
retries -= 1
@ -367,14 +367,6 @@ def _Build(buildroot, emptytree):
RunCommand(cmd, cwd=cwd, enter_chroot=True)
def _BuildChrome(buildroot, board, chrome_atom_to_build):
"""Wrapper for emerge call to build Chrome."""
cwd = os.path.join(buildroot, 'src', 'scripts')
RunCommand(['emerge-%s' % board,
'=%s' % chrome_atom_to_build],
cwd=cwd, enter_chroot=True)
def _EnableLocalAccount(buildroot):
cwd = os.path.join(buildroot, 'src', 'scripts')
# Set local account for test images.
@ -696,13 +688,8 @@ def main():
buildconfig['board'], rev_overlays)
_EnableLocalAccount(buildroot)
# Doesn't rebuild without acquiring more source.
if options.sync:
_Build(buildroot, emptytree)
if chrome_atom_to_build:
_BuildChrome(buildroot, buildconfig['board'], chrome_atom_to_build)
if buildconfig['unittests'] and options.tests:
_RunUnitTests(buildroot)

View File

@ -85,14 +85,14 @@ class AUTest(object):
if self.use_delta_updates:
try:
self.source_image = src_image
self._UpdateImageReportError(image)
self._UpdateImageReportError(image, stateful_change)
except:
Warning('Delta update failed, disabling delta updates and retrying.')
self.use_delta_updates = False
self.source_image = ''
self._UpdateImageReportError(image)
self._UpdateImageReportError(image, stateful_change)
else:
self._UpdateImageReportError(image)
self._UpdateImageReportError(image, stateful_change)
def _UpdateImageReportError(self, image_path, stateful_change='old',
proxy_port=None):
@ -355,9 +355,19 @@ class AUTest(object):
self.data_size += len(data)
return data
self._AttemptUpdateWithFilter(DelayedFilter())
def SimpleTest(self):
"""A simple update that updates the target image to itself.
We explicitly don't use test prefix so that isn't run by default. Can be
run using test_prefix option.
"""
self.PrepareBase(target_image_path)
self.UpdateImage(target_image_path)
self.VerifyImage(100)
class RealAUTest(unittest.TestCase, AUTest):
"""Test harness for updating real images."""
@ -436,12 +446,10 @@ class VirtualAUTest(unittest.TestCase, AUTest):
if os.path.exists(pid_file):
Warning('Existing %s found. Deleting and killing process' %
pid_file)
pid = RunCommand(['sudo', 'cat', pid_file], redirect_stdout=True,
enter_chroot=False)
if pid:
RunCommand(['sudo', 'kill', pid.strip()], error_ok=True,
enter_chroot=False)
RunCommand(['sudo', 'rm', pid_file], enter_chroot=False)
RunCommand(['./cros_stop_vm', '--kvm_pid=%s' % pid_file],
cwd=self.crosutilsbin)
assert not os.path.exists(pid_file)
def setUp(self):
"""Unit test overriden method. Is called before every test."""

View File

@ -264,9 +264,12 @@ def MarkChromeEBuildAsStable(stable_candidate, unstable_ebuild, chrome_rev,
new_ebuild_path = base_path + ('%s-r1.ebuild' % portage_suffix)
# Mark latest release and sticky branches as stable.
mark_stable = chrome_rev != TIP_OF_TRUNK
cros_mark_as_stable.EBuildStableMarker.MarkAsStable(
unstable_ebuild.ebuild_path, new_ebuild_path, 'CROS_SVN_COMMIT', commit,
make_stable=False)
make_stable=mark_stable)
new_ebuild = ChromeEBuild(new_ebuild_path)
if stable_candidate and (
stable_candidate.chrome_version == new_ebuild.chrome_version):
@ -321,12 +324,21 @@ def main():
commit_to_use = _GetTipOfTrunkSvnRevision()
elif chrome_rev == LATEST_RELEASE:
version_to_uprev = _GetLatestRelease()
# Don't rev on stable branch for latest_release.
if re.match('%s\.\d+' % sticky_branch, version_to_uprev):
Info('Latest release is sticky branch. Nothing to do.')
return
else:
version_to_uprev = _GetLatestRelease(sticky_branch)
stable_candidate = FindChromeUprevCandidate(stable_ebuilds, chrome_rev,
sticky_branch)
if stable_candidate:
Info('Stable candidate found %s' % stable_candidate)
else:
Info('No stable candidate found.')
os.chdir(overlay_dir)
work_branch = cros_mark_as_stable.GitBranch(
cros_mark_as_stable.STABLE_BRANCH_NAME, options.tracking_branch)

View File

@ -41,7 +41,7 @@ if [ -n "${FLAGS_payload}" ]; then
fi
if [ -n "${FLAGS_proxy_port}" ]; then
IMAGE_ARGS="--proxy_port=${FLAGS_proxy_port}"
IMAGE_ARGS="${IMAGE_ARGS} --proxy_port=${FLAGS_proxy_port}"
fi
$(dirname $0)/../image_to_live.sh \

24
bin/cros_stop_vm Executable file
View File

@ -0,0 +1,24 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Simple wrapper scipt to stop a vm specified from a pid file.
. "$(dirname $0)/../common.sh"
. "$(dirname $0)/../lib/cros_vm_lib.sh"
set -e
# Parse command line.
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
# Requires pid file to be set.
if [ -z "${FLAGS_kvm_pid}" ]; then
die "Must specify file with pid of kvm to kill."
fi
KVM_PID_FILE="${FLAGS_kvm_pid}"
stop_kvm

View File

@ -22,6 +22,7 @@ from cros_build_lib import RunCommand
from cros_build_lib import Warning
_IMAGE_TO_EXTRACT = 'chromiumos_test_image.bin'
_NEW_STYLE_VERSION = '0.9.131.0'
class HTMLDirectoryParser(HTMLParser.HTMLParser):
"""HTMLParser for parsing the default apache file index."""
@ -216,6 +217,13 @@ def GrabZipAndExtractImage(zip_url, download_folder, image_name) :
fh.write(zip_url)
fh.close()
version = zip_url.split('/')[-2]
if not _GreaterVersion(version, _NEW_STYLE_VERSION) == version:
# If the version isn't ready for new style, touch file to use old style.
old_style_touch_path = os.path.join(download_folder, '.use_e1000')
fh = open(old_style_touch_path, 'w+')
fh.close()
def RunAUTestHarness(board, channel, latest_url_base, zip_server_base,
no_graphics, type, remote):
@ -299,9 +307,5 @@ def main():
if __name__ == '__main__':
try:
main()
except Exception:
print "Got exception."
traceback.print_exc(file=sys.stdout)

View File

@ -146,6 +146,7 @@ cros_secure
kern_guid=%U
tpm_tis.force=1
tpm_tis.interrupts=0
nmi_watchdog=1
EOF
WORK="${WORK} ${FLAGS_working_dir}/config.txt"

View File

@ -129,21 +129,59 @@ CHROOT_TRUNK_DIR="/home/$USER/trunk"
# Install make for portage ebuilds. Used by build_image and gmergefs.
# TODO: Is /usr/local/autotest-chrome still used by anyone?
DEFAULT_INSTALL_MASK="/usr/include /usr/man /usr/share/man /usr/share/doc \
/usr/share/gtk-doc /usr/share/gtk-2.0 /usr/lib/gtk-2.0/include \
/usr/share/info /usr/share/aclocal /usr/lib/gcc /usr/lib/pkgconfig \
/usr/share/pkgconfig /usr/share/gettext /usr/share/readline /etc/runlevels \
/usr/share/openrc /lib/rc *.a *.la /etc/init.d /usr/lib/debug
/usr/local/autotest /usr/local/autotest-chrome"
DEFAULT_INSTALL_MASK="
*.a
*.la
/etc/init.d
/etc/runlevels
/lib/rc
/usr/bin/Xnest
/usr/bin/Xvfb
/usr/include
/usr/lib/debug
/usr/lib/gcc
/usr/lib/gtk-2.0/include
/usr/lib/pkgconfig
/usr/local/autotest
/usr/local/autotest-chrome
/usr/man
/usr/share/aclocal
/usr/share/doc
/usr/share/gettext
/usr/share/gtk-2.0
/usr/share/gtk-doc
/usr/share/info
/usr/share/man
/usr/share/openrc
/usr/share/pkgconfig
/usr/share/readline
"
FACTORY_INSTALL_MASK="/opt/google/chrome /opt/google/o3d /opt/netscape \
/opt/google/talkplugin /opt/Qualcomm /opt/Synaptics \
/usr/lib/dri /usr/lib/python2.6/test \
/usr/share/chewing /usr/share/fonts \
/usr/share/ibus-pinyin /usr/share/libhangul /usr/share/locale \
/usr/share/m17n /usr/share/mime /usr/share/sounds /usr/share/tts \
/usr/share/X11 /usr/share/zoneinfo /usr/lib/debug
/usr/local/autotest /usr/local/autotest-chrome /usr/local/autotest-pkgs"
FACTORY_INSTALL_MASK="
/opt/Qualcomm
/opt/Synaptics
/opt/google/chrome
/opt/google/o3d
/opt/google/talkplugin
/opt/netscape
/usr/lib/debug
/usr/lib/dri
/usr/lib/python2.6/test
/usr/local/autotest
/usr/local/autotest-chrome
/usr/local/autotest-pkgs
/usr/share/X11
/usr/share/chewing
/usr/share/fonts
/usr/share/ibus-pinyin
/usr/share/libhangul
/usr/share/locale
/usr/share/m17n
/usr/share/mime
/usr/share/sounds
/usr/share/tts
/usr/share/zoneinfo
"
# Check to ensure not running old scripts
V_REVERSE=''
@ -234,12 +272,9 @@ function restart_in_chroot_if_needed {
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
if [ $INSIDE_CHROOT -ne 1 ]
then
local abspath=$(readlink -f "$0")
# strip everything up to (and including) /src/scripts/ from abspath
local path_from_scripts="${abspath##*/src/scripts/}"
# Equivalent to enter_chroot.sh -- <current command>
exec $SCRIPTS_DIR/enter_chroot.sh -- \
"$CHROOT_TRUNK_DIR/src/scripts/$path_from_scripts" "$@"
exit
$CHROOT_TRUNK_DIR/src/scripts/$(basename $0) "$@"
fi
}

View File

@ -78,15 +78,15 @@ def _Print(message):
Info(message)
def _CleanStalePackages(board, package_array):
def _CleanStalePackages(board, package_atoms):
"""Cleans up stale package info from a previous build."""
Info('Cleaning up stale packages %s.' % package_array)
Info('Cleaning up stale packages %s.' % package_atoms)
unmerge_board_cmd = ['emerge-%s' % board, '--unmerge']
unmerge_board_cmd.extend(package_array)
unmerge_board_cmd.extend(package_atoms)
RunCommand(unmerge_board_cmd)
unmerge_host_cmd = ['sudo', 'emerge', '--unmerge']
unmerge_host_cmd.extend(package_array)
unmerge_host_cmd.extend(package_atoms)
RunCommand(unmerge_host_cmd)
RunCommand(['eclean-%s' % board, '-d', 'packages'], redirect_stderr=True)
@ -252,7 +252,7 @@ def PushChange(stable_branch, tracking_branch):
merge_branch_name = 'merge_branch'
for push_try in range(num_retries + 1):
try:
_SimpleRunCommand('git remote update')
_SimpleRunCommand('repo sync .')
merge_branch = GitBranch(merge_branch_name, tracking_branch)
merge_branch.CreateBranch()
if not merge_branch.Exists():
@ -319,15 +319,15 @@ class EBuild(object):
"""Sets up data about an ebuild from its path."""
from portage.versions import pkgsplit
unused_path, self.category, self.pkgname, filename = path.rsplit('/', 3)
unused_pkgname, version_no_rev, rev = pkgsplit(
unused_pkgname, self.version_no_rev, rev = pkgsplit(
filename.replace('.ebuild', ''))
self.ebuild_path_no_version = os.path.join(
os.path.dirname(path), self.pkgname)
self.ebuild_path_no_revision = '%s-%s' % (self.ebuild_path_no_version,
version_no_rev)
self.version_no_rev)
self.current_revision = int(rev.replace('r', ''))
self.version = '%s-%s' % (version_no_rev, rev)
self.version = '%s-%s' % (self.version_no_rev, rev)
self.package = '%s/%s' % (self.category, self.pkgname)
self.ebuild_path = path
@ -454,17 +454,19 @@ class EBuildStableMarker(object):
OSError: Error occurred while creating a new ebuild.
IOError: Error occurred while writing to the new revved ebuild file.
Returns:
True if the revved package is different than the old ebuild.
If the revved package is different than the old ebuild, return the full
revved package name, including the version number. Otherwise, return None.
"""
if self._ebuild.is_stable:
new_stable_ebuild_path = '%s-r%d.ebuild' % (
self._ebuild.ebuild_path_no_revision,
self._ebuild.current_revision + 1)
stable_version_no_rev = self._ebuild.version_no_rev
else:
# If given unstable ebuild, use 0.0.1 rather than 9999.
new_stable_ebuild_path = '%s-0.0.1-r%d.ebuild' % (
self._ebuild.ebuild_path_no_version,
stable_version_no_rev = '0.0.1'
new_version = '%s-r%d' % (stable_version_no_rev,
self._ebuild.current_revision + 1)
new_stable_ebuild_path = '%s-%s.ebuild' % (
self._ebuild.ebuild_path_no_version, new_version)
_Print('Creating new stable ebuild %s' % new_stable_ebuild_path)
unstable_ebuild_path = ('%s-9999.ebuild' %
@ -480,7 +482,7 @@ class EBuildStableMarker(object):
if 0 == RunCommand(diff_cmd, exit_code=True, redirect_stdout=True,
redirect_stderr=True, print_cmd=gflags.FLAGS.verbose):
os.unlink(new_stable_ebuild_path)
return False
return None
else:
_Print('Adding new stable ebuild to git')
_SimpleRunCommand('git add %s' % new_stable_ebuild_path)
@ -489,7 +491,7 @@ class EBuildStableMarker(object):
_Print('Removing old ebuild from git')
_SimpleRunCommand('git rm %s' % old_ebuild_path)
return True
return '%s-%s' % (self._ebuild.package, new_version)
@classmethod
def CommitChange(cls, message):
@ -556,16 +558,18 @@ def main(argv):
# Contains the array of packages we actually revved.
revved_packages = []
new_package_atoms = []
for ebuild in ebuilds:
try:
_Print('Working on %s' % ebuild.package)
worker = EBuildStableMarker(ebuild)
commit_id = ebuild.GetCommitId()
if worker.RevWorkOnEBuild(commit_id):
new_package = worker.RevWorkOnEBuild(commit_id)
if new_package:
message = _GIT_COMMIT_MESSAGE % (ebuild.package, commit_id)
worker.CommitChange(message)
revved_packages.append(ebuild.package)
new_package_atoms.append('=%s' % new_package)
except (OSError, IOError):
Warning('Cannot rev %s\n' % ebuild.package,
'Note you will have to go into %s '
@ -573,7 +577,7 @@ def main(argv):
raise
if revved_packages:
_CleanStalePackages(gflags.FLAGS.board, revved_packages)
_CleanStalePackages(gflags.FLAGS.board, new_package_atoms)
if gflags.FLAGS.drop_file:
fh = open(gflags.FLAGS.drop_file, 'w')
fh.write(' '.join(revved_packages))

View File

@ -33,7 +33,7 @@ class NonClassTests(mox.MoxTestBase):
cros_mark_as_stable.GitBranch.Exists().AndReturn(True)
cros_mark_as_stable._SimpleRunCommand('git log --format=format:%s%n%n%b ' +
self._tracking_branch + '..').AndReturn(git_log)
cros_mark_as_stable._SimpleRunCommand('git remote update')
cros_mark_as_stable._SimpleRunCommand('repo sync .')
cros_mark_as_stable._SimpleRunCommand('git merge --squash %s' %
self._branch)
cros_mark_as_stable._SimpleRunCommand('git commit -m "%s"' %
@ -129,6 +129,7 @@ class EBuildTest(mox.MoxTestBase):
self.mox.ReplayAll()
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
self.mox.VerifyAll()
self.assertEquals(fake_ebuild.version_no_rev, '0.0.1')
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
'/path/to/test_package/test_package-0.0.1')
self.assertEquals(fake_ebuild.ebuild_path_no_version,
@ -144,6 +145,7 @@ class EBuildTest(mox.MoxTestBase):
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
self.mox.VerifyAll()
self.assertEquals(fake_ebuild.version_no_rev, '9999')
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
'/path/to/test_package/test_package-9999')
self.assertEquals(fake_ebuild.ebuild_path_no_version,
@ -160,12 +162,14 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
self.mox.StubOutWithMock(os, 'unlink')
self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable.EBuild)
self.m_ebuild.is_stable = True
self.m_ebuild.package = 'test_package'
self.m_ebuild.package = 'test_package/test_package'
self.m_ebuild.version_no_rev = '0.0.1'
self.m_ebuild.current_revision = 1
self.m_ebuild.ebuild_path_no_revision = '/path/test_package-0.0.1'
self.m_ebuild.ebuild_path_no_version = '/path/test_package'
self.m_ebuild.ebuild_path = '/path/test_package-0.0.1-r1.ebuild'
self.revved_ebuild_path = '/path/test_package-0.0.1-r2.ebuild'
self.unstable_ebuild_path = '/path/test_package-9999.ebuild'
def testRevWorkOnEBuild(self):
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
@ -197,8 +201,9 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
self.mox.ReplayAll()
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
result = marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, 'test_package/test_package-0.0.1-r2')
def testRevUnchangedEBuild(self):
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
@ -229,8 +234,9 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
self.mox.ReplayAll()
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
result = marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, None)
def testRevMissingEBuild(self):
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
@ -239,6 +245,11 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
self.mox.StubOutWithMock(cros_mark_as_stable, 'Die')
m_file = self.mox.CreateMock(file)
revved_ebuild_path = self.m_ebuild.ebuild_path
self.m_ebuild.ebuild_path = self.unstable_ebuild_path
self.m_ebuild.is_stable = False
self.m_ebuild.current_revision = 0
# Prepare mock fileinput. This tests to make sure both the commit id
# and keywords are changed correctly.
mock_file = ['EAPI=2', 'CROS_WORKON_COMMIT=old_id',
@ -247,25 +258,24 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
ebuild_9999 = self.m_ebuild.ebuild_path_no_version + '-9999.ebuild'
cros_mark_as_stable.os.path.exists(ebuild_9999).AndReturn(False)
cros_mark_as_stable.Die("Missing unstable ebuild: %s" % ebuild_9999)
cros_mark_as_stable.shutil.copyfile(ebuild_9999, self.revved_ebuild_path)
cros_mark_as_stable.fileinput.input(self.revved_ebuild_path,
cros_mark_as_stable.shutil.copyfile(ebuild_9999, revved_ebuild_path)
cros_mark_as_stable.fileinput.input(revved_ebuild_path,
inplace=1).AndReturn(mock_file)
m_file.write('EAPI=2')
m_file.write('CROS_WORKON_COMMIT="my_id"\n')
m_file.write('KEYWORDS="x86 arm"')
m_file.write('src_unpack(){}')
diff_cmd = ['diff', '-Bu', self.m_ebuild.ebuild_path,
self.revved_ebuild_path]
diff_cmd = ['diff', '-Bu', self.unstable_ebuild_path, revved_ebuild_path]
cros_mark_as_stable.RunCommand(diff_cmd, exit_code=True,
print_cmd=False, redirect_stderr=True,
redirect_stdout=True).AndReturn(1)
cros_mark_as_stable._SimpleRunCommand('git add ' + self.revved_ebuild_path)
cros_mark_as_stable._SimpleRunCommand('git rm ' + self.m_ebuild.ebuild_path)
cros_mark_as_stable._SimpleRunCommand('git add ' + revved_ebuild_path)
self.mox.ReplayAll()
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
result = marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, 'test_package/test_package-0.0.1-r1')
def testCommitChange(self):

View File

@ -34,23 +34,53 @@ DEFINE_boolean unmount $FLAGS_FALSE "Only tear down mounts."
DEFINE_boolean ssh_agent $FLAGS_TRUE "Import ssh agent."
# More useful help
FLAGS_HELP="USAGE: $0 [flags] [VAR=value] [-- \"command\"]
FLAGS_HELP="USAGE: $0 [flags] [VAR=value] [-- command [arg1] [arg2] ...]
One or more VAR=value pairs can be specified to export variables into
the chroot environment. For example:
$0 FOO=bar BAZ=bel
If [-- \"command\"] is present, runs the command inside the chroot,
after changing directory to /$USER/trunk/src/scripts. Note that the
command should be enclosed in quotes to prevent interpretation by the
shell before getting into the chroot. For example:
If [-- command] is present, runs the command inside the chroot,
after changing directory to /$USER/trunk/src/scripts. Note that neither
the command nor args should include single quotes. For example:
$0 -- \"./build_platform_packages.sh\"
$0 -- ./build_platform_packages.sh
Otherwise, provides an interactive shell.
"
# Double up on the first '--' argument. Why? For enter_chroot, we want to
# emulate the behavior of sudo for setting environment vars. That is, we want:
# ./enter_chroot [flags] [VAR=val] [-- command]
# ...but shflags ends up eating the '--' out of the command line and gives
# us back "VAR=val" and "command" together in one chunk. By doubling up, we
# end up getting what we want back from shflags.
#
# Examples of how people might be using enter_chroot:
# 1. ./enter_chroot [chroot_flags] VAR1=val1 VAR2=val2 -- cmd arg1 arg2
# Set env vars and run cmd w/ args
# 2. ./enter_chroot [chroot_flags] VAR1=val1 VAR2=val2
# Set env vars and run shell
# 3. ./enter_chroot [chroot_flags] -- cmd arg1 arg2
# Run cmd w/ args
# 4. ./enter_chroot [chroot_flags] VAR1=val1 VAR2=val2 cmd arg1 arg2
# Like #1 _if_ args aren't flags (if they are, enter_chroot will claim them)
# 5. ./enter_chroot [chroot_flags] cmd arg1 arg2
# Like #3 _if_ args aren't flags (if they are, enter_chroot will claim them)
_FLAGS_FIXED=''
_SAW_DASHDASH=0
while [ $# -gt 0 ]; do
_FLAGS_FIXED="${_FLAGS_FIXED:+${_FLAGS_FIXED} }'$1'"
if [ $_SAW_DASHDASH -eq 0 ] && [[ "$1" == "--" ]]; then
_FLAGS_FIXED="${_FLAGS_FIXED:+${_FLAGS_FIXED} }'--'"
_SAW_DASHDASH=1
fi
shift
done
eval set -- "${_FLAGS_FIXED}"
# Parse command line flags
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
@ -303,9 +333,9 @@ git config -f ${FLAGS_chroot}/home/${USER}/.gitconfig --replace-all user.email \
# Run command or interactive shell. Also include the non-chrooted path to
# the source trunk for scripts that may need to print it (e.g.
# build_image.sh).
sudo chroot "$FLAGS_chroot" sudo -i -u $USER $CHROOT_PASSTHRU \
sudo -- chroot "$FLAGS_chroot" sudo -i -u $USER $CHROOT_PASSTHRU \
EXTERNAL_TRUNK_PATH="${FLAGS_trunk}" LANG=C SSH_AGENT_PID="${SSH_AGENT_PID}" \
SSH_AUTH_SOCK="${SSH_AUTH_SOCK}" -- "$@"
SSH_AUTH_SOCK="${SSH_AUTH_SOCK}" "$@"
# Remove trap and explicitly unmount
trap - EXIT

View File

@ -329,6 +329,11 @@ function verify_image {
fi
}
function find_root_dev {
remote_sh "rootdev -s"
echo ${REMOTE_OUT}
}
function main() {
assert_outside_chroot
@ -356,6 +361,8 @@ function main() {
remote_reboot
fi
local initial_root_dev=$(find_root_dev)
if [ -z "${FLAGS_update_url}" ]; then
# Start local devserver if no update url specified.
start_dev_server
@ -386,6 +393,13 @@ function main() {
remote_sh "grep ^CHROMEOS_RELEASE_DESCRIPTION= /etc/lsb-release"
if [ ${FLAGS_verify} -eq ${FLAGS_TRUE} ]; then
verify_image
if [ "${initial_root_dev}" == "$(find_root_dev)" ]; then
# At this point, the software version didn't change, but we didn't
# switch partitions either. Means it was an update to the same version
# that failed.
die "The root partition did NOT change. The update failed."
fi
else
local release_description=$(echo ${REMOTE_OUT} | cut -d '=' -f 2)
info "Update was successful and rebooted to $release_description"

View File

@ -168,3 +168,23 @@ image_umount_partition() {
umount -d "$mount_point"
}
# Copy a partition from one image to another.
image_partition_copy() {
local src="$1"
local srcpart="$2"
local dst="$3"
local dstpart="$4"
local srcoffset=$(image_part_offset "${src}" "${srcpart}")
local dstoffset=$(image_part_offset "${dst}" "${dstpart}")
local length=$(image_part_size "${src}" "${srcpart}")
local dstlength=$(image_part_size "${dst}" "${dstpart}")
if [ "${length}" -gt "${dstlength}" ]; then
exit 1
fi
image_dump_partition "${src}" "${srcpart}" |
dd of="${dst}" bs=512 seek="${dstoffset}" conv=notrunc
}

View File

@ -70,16 +70,23 @@ function start_kvm() {
snapshot="-snapshot"
fi
local net_option="-net nic,model=virtio"
if [ -f "$(dirname $1)/.use_e1000" ]; then
info "Detected older image, using e1000 instead of virtio."
net_option="-net nic,model=e1000"
fi
sudo kvm -m 1024 \
-vga std \
-pidfile "${KVM_PID_FILE}" \
-daemonize \
-net nic,model=virtio \
${net_option} \
${nographics} \
${snapshot} \
-net user,hostfwd=tcp::${FLAGS_ssh_port}-:22 \
-hda "${1}"
info "KVM started with pid stored in ${KVM_PID_FILE}"
LIVE_VM_IMAGE="${1}"
fi
}

View File

@ -34,6 +34,11 @@ DEFINE_string release "" \
"Directory and file containing release image: /path/chromiumos_image.bin"
DEFINE_string subfolder "" \
"If set, the name of the subfolder to put the payload items inside"
DEFINE_string diskimg "" \
"If set, the name of the diskimage file to output"
DEFINE_boolean preserve ${FLAGS_FALSE} \
"If set, reuse the diskimage file, if available"
DEFINE_integer sectors 31277232 "Size of image in sectors"
# Parse command line
FLAGS "$@" || exit 1
@ -80,6 +85,35 @@ FACTORY_DIR="$(dirname "${FLAGS_factory}")"
RELEASE_IMAGE="$(basename "${FLAGS_release}")"
FACTORY_IMAGE="$(basename "${FLAGS_factory}")"
prepare_img() {
local outdev="$FLAGS_diskimg"
local sectors="$FLAGS_sectors"
local force_full="true"
# We'll need some code to put in the PMBR, for booting on legacy BIOS.
echo "Fetch PMBR"
local pmbrcode="$(mktemp -d)/gptmbr.bin"
sudo dd bs=512 count=1 if="${FLAGS_release}" of="${pmbrcode}" status=noxfer
echo "Prepare base disk image"
# Create an output file if requested, or if none exists.
if [ -b "${outdev}" ] ; then
echo "Using block device ${outdev}"
elif [ ! -e "${outdev}" -o \
"$(stat -c %s ${outdev})" != "$(( ${sectors} * 512 ))" -o \
"$FLAGS_preserve" = "$FLAGS_FALSE" ]; then
echo "Generating empty image file"
image_dump_partial_file /dev/zero 0 "${sectors}" |
dd of="${outdev}" bs=8M
else
echo "Reusing $outdev"
fi
# Create GPT partition table.
install_gpt "${outdev}" 0 0 "${pmbrcode}" 0 "${force_full}"
# Activate the correct partition.
cgpt add -i 2 -S 1 -P 1 "${outdev}"
}
prepare_omaha() {
sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-test.gz"
@ -145,9 +179,6 @@ compress_and_hash_partition() {
fi
}
# Clean up stale config and data files.
prepare_omaha
# Decide if we should unpack partition
if image_has_part_tools; then
IMAGE_IS_UNPACKED=
@ -159,58 +190,98 @@ else
IMAGE_IS_UNPACKED=1
fi
# Get the release image.
pushd "${RELEASE_DIR}" >/dev/null
echo "Generating omaha release image from ${FLAGS_release}"
echo "Generating omaha factory image from ${FLAGS_factory}"
echo "Output omaha image to ${OMAHA_DATA_DIR}"
echo "Output omaha config to ${OMAHA_CONF}"
generate_img() {
local outdev="$FLAGS_diskimg"
local sectors="$FLAGS_sectors"
prepare_dir
prepare_img
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
# Get the release image.
pushd "${RELEASE_DIR}" >/dev/null
echo "Release Kernel"
image_partition_copy "${RELEASE_IMAGE}" 2 "${outdev}" 4
echo "Release Rootfs"
image_partition_copy "${RELEASE_IMAGE}" 3 "${outdev}" 5
echo "OEM parition"
image_partition_copy "${RELEASE_IMAGE}" 8 "${outdev}" 8
popd >/dev/null
# Go to retrieve the factory test image.
pushd "${FACTORY_DIR}" >/dev/null
echo "Factory Kernel"
image_partition_copy "${FACTORY_IMAGE}" 2 "${outdev}" 2
echo "Factory Rootfs"
image_partition_copy "${FACTORY_IMAGE}" 3 "${outdev}" 3
echo "Factory Stateful"
image_partition_copy "${FACTORY_IMAGE}" 1 "${outdev}" 1
echo "EFI Partition"
image_partition_copy "${FACTORY_IMAGE}" 12 "${outdev}" 12
echo "Generated Image at $outdev."
echo "Done"
}
generate_omaha() {
# Clean up stale config and data files.
prepare_omaha
# Get the release image.
pushd "${RELEASE_DIR}" >/dev/null
echo "Generating omaha release image from ${FLAGS_release}"
echo "Generating omaha factory image from ${FLAGS_factory}"
echo "Output omaha image to ${OMAHA_DATA_DIR}"
echo "Output omaha config to ${OMAHA_CONF}"
prepare_dir
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
echo "Unpacking image ${RELEASE_IMAGE} ..." >&2
sudo ./unpack_partitions.sh "${RELEASE_IMAGE}" 2>/dev/null
fi
fi
release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")"
sudo chmod a+rw update.gz
mv update.gz rootfs-release.gz
mv rootfs-release.gz "${OMAHA_DATA_DIR}"
echo "release: ${release_hash}"
release_hash="$(compress_and_hash_memento_image "${RELEASE_IMAGE}")"
sudo chmod a+rw update.gz
mv update.gz rootfs-release.gz
mv rootfs-release.gz "${OMAHA_DATA_DIR}"
echo "release: ${release_hash}"
oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")"
mv oem.gz "${OMAHA_DATA_DIR}"
echo "oem: ${oem_hash}"
oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")"
mv oem.gz "${OMAHA_DATA_DIR}"
echo "oem: ${oem_hash}"
efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")"
mv efi.gz "${OMAHA_DATA_DIR}"
echo "efi: ${efi_hash}"
popd >/dev/null
popd >/dev/null
# Go to retrieve the factory test image.
pushd "${FACTORY_DIR}" >/dev/null
prepare_dir
# Go to retrieve the factory test image.
pushd "${FACTORY_DIR}" >/dev/null
prepare_dir
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
if [ -n "${IMAGE_IS_UNPACKED}" ]; then
echo "Unpacking image ${FACTORY_IMAGE} ..." >&2
sudo ./unpack_partitions.sh "${FACTORY_IMAGE}" 2>/dev/null
fi
fi
test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")"
sudo chmod a+rw update.gz
mv update.gz rootfs-test.gz
mv rootfs-test.gz "${OMAHA_DATA_DIR}"
echo "test: ${test_hash}"
test_hash="$(compress_and_hash_memento_image "${FACTORY_IMAGE}")"
sudo chmod a+rw update.gz
mv update.gz rootfs-test.gz
mv rootfs-test.gz "${OMAHA_DATA_DIR}"
echo "test: ${test_hash}"
state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")"
mv state.gz "${OMAHA_DATA_DIR}"
echo "state: ${state_hash}"
state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")"
mv state.gz "${OMAHA_DATA_DIR}"
echo "state: ${state_hash}"
popd >/dev/null
efi_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 12 "efi.gz")"
mv efi.gz "${OMAHA_DATA_DIR}"
echo "efi: ${efi_hash}"
if [ -n "${FLAGS_firmware_updater}" ]; then
popd >/dev/null
if [ -n "${FLAGS_firmware_updater}" ]; then
SHELLBALL="${FLAGS_firmware_updater}"
if [ ! -f "$SHELLBALL" ]; then
echo "Failed to find firmware updater: $SHELLBALL."
@ -220,13 +291,14 @@ if [ -n "${FLAGS_firmware_updater}" ]; then
firmware_hash="$(compress_and_hash_file "$SHELLBALL" "firmware.gz")"
mv firmware.gz "${OMAHA_DATA_DIR}"
echo "firmware: ${firmware_hash}"
fi
fi
# If the file does exist and we are using the subfolder flag we are going to
# append another config.
if [ -n "${FLAGS_subfolder}" ] &&
# If the file does exist and we are using the subfolder flag we are going to
# append another config.
if [ -n "${FLAGS_subfolder}" ] &&
[ -f "${OMAHA_CONF}" ]; then
# Remove the ']' from the last line of the file so we can add another config.
# Remove the ']' from the last line of the file
# so we can add another config.
while [ -s "${OMAHA_CONF}" ]; do
# If the last line is null
if [ -z "$(tail -1 "${OMAHA_CONF}")" ]; then
@ -247,15 +319,15 @@ if [ -n "${FLAGS_subfolder}" ] &&
if [ ! -s "${OMAHA_CONF}" ]; then
echo "config = [" >"${OMAHA_CONF}"
fi
else
else
echo "config = [" >"${OMAHA_CONF}"
fi
fi
if [ -n "${FLAGS_subfolder}" ]; then
if [ -n "${FLAGS_subfolder}" ]; then
subfolder="${FLAGS_subfolder}/"
fi
fi
echo -n "{
echo -n "{
'qual_ids': set([\"${FLAGS_board}\"]),
'factory_image': '${subfolder}rootfs-test.gz',
'factory_checksum': '${test_hash}',
@ -268,20 +340,28 @@ echo -n "{
'stateimg_image': '${subfolder}state.gz',
'stateimg_checksum': '${state_hash}'," >>"${OMAHA_CONF}"
if [ -n "${FLAGS_firmware_updater}" ] ; then
if [ -n "${FLAGS_firmware_updater}" ] ; then
echo -n "
'firmware_image': '${subfolder}firmware.gz',
'firmware_checksum': '${firmware_hash}'," >>"${OMAHA_CONF}"
fi
fi
echo -n "
echo -n "
},
]
" >>"${OMAHA_CONF}"
echo "The miniomaha server lives in src/platform/dev.
echo "The miniomaha server lives in src/platform/dev.
To validate the configutarion, run:
python2.6 devserver.py --factory_config miniomaha.conf \
--validate_factory_config
To run the server:
python2.6 devserver.py --factory_config miniomaha.conf"
}
# Main
if [ -n "$FLAGS_diskimg" ]; then
generate_img
else
generate_omaha
fi

View File

@ -20,8 +20,8 @@ patch -d "${ROOT_FS_DIR}" -Np1 <<EOF
diff -Naur old/etc/init/boot-complete.conf new/etc/init/boot-complete.conf
--- old/etc/init/boot-complete.conf 2010-07-21 11:22:30.000000000 +0800
+++ new/etc/init/boot-complete.conf 2010-07-21 22:13:36.000000000 +0800
@@ -7 +7 @@
-start on login-prompt-ready
@@ -15 +15 @@
-start on login-prompt-visible
+start on started udev
EOF
@ -31,7 +31,7 @@ cat >"${ROOT_FS_DIR}/etc/init/factory.conf" <<EOF
# found in the LICENSE file.
description "Chrome OS factory startup stub"
author "chromium-os-dev@googlegroups.com"
author "chromium-os-dev@chromium.org"
start on stopped udev-addon
stop on starting halt or starting reboot
@ -55,6 +55,9 @@ cat >"${ROOT_FS_DIR}/etc/init/factorylog.conf" <<EOF
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
description "Print Chrome OS factory log to tty3"
author "chromium-os-dev@chromium.org"
start on started factory
stop on starting halt or starting reboot

View File

@ -76,7 +76,8 @@ if "PORTAGE_USERNAME" not in os.environ:
from _emerge.actions import adjust_configs
from _emerge.actions import load_emerge_config
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.depgraph import backtrack_depgraph
from _emerge.depgraph import depgraph as emerge_depgraph
from _emerge.depgraph import _frozen_depgraph_config
from _emerge.main import emerge_main
from _emerge.main import parse_opts
from _emerge.Package import Package
@ -479,24 +480,9 @@ class DepGraphGenerator(object):
cur_iuse, now_use, now_iuse)
return not flags
def GenDependencyTree(self, remote_pkgs):
"""Get dependency tree info from emerge.
TODO(): Update cros_extract_deps to also use this code.
Returns:
Dependency tree
"""
start = time.time()
def CreateDepgraph(self, emerge, packages):
"""Create an emerge depgraph object."""
# Setup emerge options.
#
# We treat dependency info a bit differently than emerge itself. Unless
# you're using --usepkgonly, we disable --getbinpkg and --usepkg here so
# that emerge will look at the dependencies of the source ebuilds rather
# than the binary dependencies. This helps ensure that we have the option
# of merging a package from source, if we want to switch to it with
# --workon and the dependencies have changed.
emerge = self.emerge
emerge_opts = emerge.opts.copy()
# Enable --emptytree so that we get the full tree, which we need for
@ -507,12 +493,86 @@ class DepGraphGenerator(object):
emerge_opts["--tree"] = True
emerge_opts["--emptytree"] = True
# Tell emerge not to worry about use flags yet. We handle those inside
# parallel_emerge itself. Further, when we use the --force-remote-binary
# flag, we don't emerge to reject a package just because it has different
# use flags.
emerge_opts.pop("--newuse", None)
emerge_opts.pop("--reinstall", None)
# Set up parameters.
params = create_depgraph_params(emerge_opts, emerge.action)
frozen_config = _frozen_depgraph_config(emerge.settings, emerge.trees,
emerge_opts, emerge.spinner)
backtrack_max = emerge_opts.get('--backtrack', 5)
runtime_pkg_mask = None
allow_backtracking = backtrack_max > 0
# Try up to backtrack_max times to create a working depgraph. Each time we
# run into a conflict, mask the offending package and try again.
# TODO(davidjames): When Portage supports --force-remote-binary directly,
# switch back to using the backtrack_depgraph function.
for i in range(backtrack_max + 1):
if i == backtrack_max:
# Looks like we hit the backtracking limit. Run the dependency
# calculation one more time (from scratch) to show the original error
# message.
runtime_pkg_mask = None
allow_backtracking = False
# Create a depgraph object.
depgraph = emerge_depgraph(emerge.settings, emerge.trees, emerge_opts,
params, emerge.spinner, frozen_config=frozen_config,
allow_backtracking=allow_backtracking,
runtime_pkg_mask=runtime_pkg_mask)
if i == 0:
for cpv in self.forced_remote_binary_packages:
# If --force-remote-binary was specified, we want to use this package
# regardless of its use flags. Unfortunately, Portage doesn't support
# ignoring use flags for just one package. To convince Portage to
# install the package, we trick Portage into thinking the package has
# the right use flags.
# TODO(davidjames): Update Portage to support --force-remote-binary
# directly, so that this hack isn't necessary.
pkg = depgraph._pkg(cpv, "binary", emerge.root_config)
pkgsettings = frozen_config.pkgsettings[pkg.root]
pkgsettings.setcpv(pkg)
pkg.use.enabled = pkgsettings["PORTAGE_USE"].split()
# Select the packages we want.
success, favorites = depgraph.select_files(packages)
if success:
break
elif depgraph.need_restart():
# Looks like we found some packages that can't be installed due to
# conflicts. Try again, masking out the conflicting packages.
runtime_pkg_mask = depgraph.get_runtime_pkg_mask()
elif allow_backtracking and i > 0:
# Looks like we tried all the possible combinations, and we still can't
# solve the graph. Stop backtracking, so that we can report an error
# message.
runtime_pkg_mask = None
allow_backtracking = False
else:
break
# Delete the --tree option, because we don't really want to display a
# tree. We just wanted to get emerge to leave uninstall instructions on
# the graph. Later, when we display the graph, we'll want standard-looking
# output, so removing the --tree option is important.
frozen_config.myopts.pop("--tree", None)
emerge.depgraph = depgraph
# Is it impossible to honor the user's request? Bail!
if not success:
depgraph.display_problems()
sys.exit(1)
def GenDependencyTree(self, remote_pkgs):
"""Get dependency tree info from emerge.
TODO(): Update cros_extract_deps to also use this code.
Returns:
Dependency tree
"""
start = time.time()
emerge = self.emerge
# Create a list of packages to merge
packages = set(emerge.cmdline_packages[:])
@ -527,9 +587,17 @@ class DepGraphGenerator(object):
full_pkgname in self.force_remote_binary):
forced_pkgs.setdefault(full_pkgname, []).append(pkg)
# Add forced binary packages to the dependency list. This is necessary
# to ensure that the install plan contains the right package.
#
# Putting the forced binary package at the beginning of the list is an
# optimization that helps avoid unnecessary backtracking (e.g., if
# Portage first selects the wrong version, and then backtracks later, it
# takes a bit longer and uses up an unnecessary backtrack iteration.)
packages = list(packages)
for pkgs in forced_pkgs.values():
forced_package = portage.versions.best(pkgs)
packages.add("=%s" % forced_package)
packages.insert(0, "=%s" % forced_package)
self.forced_remote_binary_packages.add(forced_package)
# Tell emerge to be quiet. We print plenty of info ourselves so we don't
@ -544,18 +612,8 @@ class DepGraphGenerator(object):
if "--quiet" not in emerge.opts:
print "Calculating deps..."
# Ask portage to build a dependency graph. with the options we specified
# above.
params = create_depgraph_params(emerge_opts, emerge.action)
success, depgraph, _ = backtrack_depgraph(
emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
packages, emerge.spinner)
emerge.depgraph = depgraph
# Is it impossible to honor the user's request? Bail!
if not success:
depgraph.display_problems()
sys.exit(1)
self.CreateDepgraph(emerge, packages)
depgraph = emerge.depgraph
# Build our own tree from the emerge digraph.
deps_tree = {}
@ -604,11 +662,6 @@ class DepGraphGenerator(object):
vardb = frozen_config.trees[root]["vartree"].dbapi
pkgsettings = frozen_config.pkgsettings[root]
# It's time to start worrying about use flags, if necessary.
for flag in ("--newuse", "--reinstall"):
if flag in emerge.opts:
emerge_opts[flag] = emerge.opts[flag]
deps_info = {}
for pkg in depgraph.altlist():
if isinstance(pkg, Package):
@ -636,12 +689,6 @@ class DepGraphGenerator(object):
deps_info[str(pkg.cpv)] = {"idx": len(deps_info),
"optional": optional}
# Delete the --tree option, because we don't really want to display a
# tree. We just wanted to get emerge to leave uninstall instructions on
# the graph. Later, when we display the graph, we'll want standard-looking
# output, so removing the --tree option is important.
frozen_config.myopts.pop("--tree", None)
seconds = time.time() - start
if "--quiet" not in emerge.opts:
print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)

View File

@ -14,6 +14,8 @@
DEFINE_string board "" "Override board reported by target"
DEFINE_string partition "" "Override kernel partition reported by target"
DEFINE_boolean modules false "Update modules on target"
DEFINE_boolean firmware false "Update firmware on target"
function cleanup {
cleanup_remote_access
@ -74,6 +76,28 @@ function main() {
remote_sh dd if=/tmp/new_kern.bin of="${FLAGS_partition}"
if [[ ${FLAGS_modules} -eq ${FLAGS_TRUE} ]]; then
echo "copying modules"
cmd="tar -C /build/${FLAGS_board}/lib/modules -cjf new_modules.tar ."
./enter_chroot.sh -- ${cmd}
remote_cp_to new_modules.tar /tmp/
remote_sh mount -o remount,rw /
remote_sh tar -C /lib/modules -xjf /tmp/new_modules.tar
fi
if [[ ${FLAGS_firmware} -eq ${FLAGS_TRUE} ]]; then
echo "copying firmware"
cmd="tar -C /build/${FLAGS_board}/lib/firmware -cjf new_firmware.tar ."
./enter_chroot.sh -- ${cmd}
remote_cp_to new_firmware.tar /tmp/
remote_sh mount -o remount,rw /
remote_sh tar -C /lib/firmware -xjf /tmp/new_firmware.tar
fi
remote_reboot
remote_sh uname -r -v