mirror of
https://github.com/flatcar/scripts.git
synced 2025-09-26 16:11:56 +02:00
Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils
This commit is contained in:
commit
3dfb64fd55
@ -74,10 +74,10 @@ def RepoSync(buildroot, retries=_DEFAULT_RETRIES):
|
|||||||
# The --trace option ensures that repo shows the output from git. This
|
# The --trace option ensures that repo shows the output from git. This
|
||||||
# is needed so that the buildbot can kill us if git is not making
|
# is needed so that the buildbot can kill us if git is not making
|
||||||
# progress.
|
# progress.
|
||||||
|
RunCommand(['repo', '--trace', 'sync'], cwd=buildroot)
|
||||||
RunCommand(['repo', 'forall', '-c', 'git', 'config',
|
RunCommand(['repo', 'forall', '-c', 'git', 'config',
|
||||||
'url.ssh://git@gitrw.chromium.org:9222.insteadof',
|
'url.ssh://git@gitrw.chromium.org:9222.insteadof',
|
||||||
'http://git.chromium.org/git'], cwd=buildroot)
|
'http://git.chromium.org/git'], cwd=buildroot)
|
||||||
RunCommand(['repo', '--trace', 'sync'], cwd=buildroot)
|
|
||||||
retries = 0
|
retries = 0
|
||||||
except:
|
except:
|
||||||
retries -= 1
|
retries -= 1
|
||||||
@ -367,14 +367,6 @@ def _Build(buildroot, emptytree):
|
|||||||
RunCommand(cmd, cwd=cwd, enter_chroot=True)
|
RunCommand(cmd, cwd=cwd, enter_chroot=True)
|
||||||
|
|
||||||
|
|
||||||
def _BuildChrome(buildroot, board, chrome_atom_to_build):
|
|
||||||
"""Wrapper for emerge call to build Chrome."""
|
|
||||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
|
||||||
RunCommand(['emerge-%s' % board,
|
|
||||||
'=%s' % chrome_atom_to_build],
|
|
||||||
cwd=cwd, enter_chroot=True)
|
|
||||||
|
|
||||||
|
|
||||||
def _EnableLocalAccount(buildroot):
|
def _EnableLocalAccount(buildroot):
|
||||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||||
# Set local account for test images.
|
# Set local account for test images.
|
||||||
@ -696,13 +688,8 @@ def main():
|
|||||||
buildconfig['board'], rev_overlays)
|
buildconfig['board'], rev_overlays)
|
||||||
|
|
||||||
_EnableLocalAccount(buildroot)
|
_EnableLocalAccount(buildroot)
|
||||||
# Doesn't rebuild without acquiring more source.
|
|
||||||
if options.sync:
|
|
||||||
_Build(buildroot, emptytree)
|
_Build(buildroot, emptytree)
|
||||||
|
|
||||||
if chrome_atom_to_build:
|
|
||||||
_BuildChrome(buildroot, buildconfig['board'], chrome_atom_to_build)
|
|
||||||
|
|
||||||
if buildconfig['unittests'] and options.tests:
|
if buildconfig['unittests'] and options.tests:
|
||||||
_RunUnitTests(buildroot)
|
_RunUnitTests(buildroot)
|
||||||
|
|
||||||
|
@ -85,14 +85,14 @@ class AUTest(object):
|
|||||||
if self.use_delta_updates:
|
if self.use_delta_updates:
|
||||||
try:
|
try:
|
||||||
self.source_image = src_image
|
self.source_image = src_image
|
||||||
self._UpdateImageReportError(image)
|
self._UpdateImageReportError(image, stateful_change)
|
||||||
except:
|
except:
|
||||||
Warning('Delta update failed, disabling delta updates and retrying.')
|
Warning('Delta update failed, disabling delta updates and retrying.')
|
||||||
self.use_delta_updates = False
|
self.use_delta_updates = False
|
||||||
self.source_image = ''
|
self.source_image = ''
|
||||||
self._UpdateImageReportError(image)
|
self._UpdateImageReportError(image, stateful_change)
|
||||||
else:
|
else:
|
||||||
self._UpdateImageReportError(image)
|
self._UpdateImageReportError(image, stateful_change)
|
||||||
|
|
||||||
def _UpdateImageReportError(self, image_path, stateful_change='old',
|
def _UpdateImageReportError(self, image_path, stateful_change='old',
|
||||||
proxy_port=None):
|
proxy_port=None):
|
||||||
@ -355,9 +355,19 @@ class AUTest(object):
|
|||||||
self.data_size += len(data)
|
self.data_size += len(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
self._AttemptUpdateWithFilter(DelayedFilter())
|
self._AttemptUpdateWithFilter(DelayedFilter())
|
||||||
|
|
||||||
|
def SimpleTest(self):
|
||||||
|
"""A simple update that updates the target image to itself.
|
||||||
|
|
||||||
|
We explicitly don't use test prefix so that isn't run by default. Can be
|
||||||
|
run using test_prefix option.
|
||||||
|
"""
|
||||||
|
self.PrepareBase(target_image_path)
|
||||||
|
self.UpdateImage(target_image_path)
|
||||||
|
self.VerifyImage(100)
|
||||||
|
|
||||||
|
|
||||||
class RealAUTest(unittest.TestCase, AUTest):
|
class RealAUTest(unittest.TestCase, AUTest):
|
||||||
"""Test harness for updating real images."""
|
"""Test harness for updating real images."""
|
||||||
|
|
||||||
@ -436,12 +446,10 @@ class VirtualAUTest(unittest.TestCase, AUTest):
|
|||||||
if os.path.exists(pid_file):
|
if os.path.exists(pid_file):
|
||||||
Warning('Existing %s found. Deleting and killing process' %
|
Warning('Existing %s found. Deleting and killing process' %
|
||||||
pid_file)
|
pid_file)
|
||||||
pid = RunCommand(['sudo', 'cat', pid_file], redirect_stdout=True,
|
RunCommand(['./cros_stop_vm', '--kvm_pid=%s' % pid_file],
|
||||||
enter_chroot=False)
|
cwd=self.crosutilsbin)
|
||||||
if pid:
|
|
||||||
RunCommand(['sudo', 'kill', pid.strip()], error_ok=True,
|
assert not os.path.exists(pid_file)
|
||||||
enter_chroot=False)
|
|
||||||
RunCommand(['sudo', 'rm', pid_file], enter_chroot=False)
|
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
"""Unit test overriden method. Is called before every test."""
|
"""Unit test overriden method. Is called before every test."""
|
||||||
|
@ -264,9 +264,12 @@ def MarkChromeEBuildAsStable(stable_candidate, unstable_ebuild, chrome_rev,
|
|||||||
|
|
||||||
new_ebuild_path = base_path + ('%s-r1.ebuild' % portage_suffix)
|
new_ebuild_path = base_path + ('%s-r1.ebuild' % portage_suffix)
|
||||||
|
|
||||||
|
# Mark latest release and sticky branches as stable.
|
||||||
|
mark_stable = chrome_rev != TIP_OF_TRUNK
|
||||||
|
|
||||||
cros_mark_as_stable.EBuildStableMarker.MarkAsStable(
|
cros_mark_as_stable.EBuildStableMarker.MarkAsStable(
|
||||||
unstable_ebuild.ebuild_path, new_ebuild_path, 'CROS_SVN_COMMIT', commit,
|
unstable_ebuild.ebuild_path, new_ebuild_path, 'CROS_SVN_COMMIT', commit,
|
||||||
make_stable=False)
|
make_stable=mark_stable)
|
||||||
new_ebuild = ChromeEBuild(new_ebuild_path)
|
new_ebuild = ChromeEBuild(new_ebuild_path)
|
||||||
if stable_candidate and (
|
if stable_candidate and (
|
||||||
stable_candidate.chrome_version == new_ebuild.chrome_version):
|
stable_candidate.chrome_version == new_ebuild.chrome_version):
|
||||||
@ -321,12 +324,21 @@ def main():
|
|||||||
commit_to_use = _GetTipOfTrunkSvnRevision()
|
commit_to_use = _GetTipOfTrunkSvnRevision()
|
||||||
elif chrome_rev == LATEST_RELEASE:
|
elif chrome_rev == LATEST_RELEASE:
|
||||||
version_to_uprev = _GetLatestRelease()
|
version_to_uprev = _GetLatestRelease()
|
||||||
|
# Don't rev on stable branch for latest_release.
|
||||||
|
if re.match('%s\.\d+' % sticky_branch, version_to_uprev):
|
||||||
|
Info('Latest release is sticky branch. Nothing to do.')
|
||||||
|
return
|
||||||
else:
|
else:
|
||||||
version_to_uprev = _GetLatestRelease(sticky_branch)
|
version_to_uprev = _GetLatestRelease(sticky_branch)
|
||||||
|
|
||||||
stable_candidate = FindChromeUprevCandidate(stable_ebuilds, chrome_rev,
|
stable_candidate = FindChromeUprevCandidate(stable_ebuilds, chrome_rev,
|
||||||
sticky_branch)
|
sticky_branch)
|
||||||
|
|
||||||
|
if stable_candidate:
|
||||||
|
Info('Stable candidate found %s' % stable_candidate)
|
||||||
|
else:
|
||||||
|
Info('No stable candidate found.')
|
||||||
|
|
||||||
os.chdir(overlay_dir)
|
os.chdir(overlay_dir)
|
||||||
work_branch = cros_mark_as_stable.GitBranch(
|
work_branch = cros_mark_as_stable.GitBranch(
|
||||||
cros_mark_as_stable.STABLE_BRANCH_NAME, options.tracking_branch)
|
cros_mark_as_stable.STABLE_BRANCH_NAME, options.tracking_branch)
|
||||||
|
@ -41,7 +41,7 @@ if [ -n "${FLAGS_payload}" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${FLAGS_proxy_port}" ]; then
|
if [ -n "${FLAGS_proxy_port}" ]; then
|
||||||
IMAGE_ARGS="--proxy_port=${FLAGS_proxy_port}"
|
IMAGE_ARGS="${IMAGE_ARGS} --proxy_port=${FLAGS_proxy_port}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$(dirname $0)/../image_to_live.sh \
|
$(dirname $0)/../image_to_live.sh \
|
||||||
|
24
bin/cros_stop_vm
Executable file
24
bin/cros_stop_vm
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
#
|
||||||
|
# Simple wrapper scipt to stop a vm specified from a pid file.
|
||||||
|
|
||||||
|
. "$(dirname $0)/../common.sh"
|
||||||
|
. "$(dirname $0)/../lib/cros_vm_lib.sh"
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Parse command line.
|
||||||
|
FLAGS "$@" || exit 1
|
||||||
|
eval set -- "${FLAGS_ARGV}"
|
||||||
|
|
||||||
|
# Requires pid file to be set.
|
||||||
|
if [ -z "${FLAGS_kvm_pid}" ]; then
|
||||||
|
die "Must specify file with pid of kvm to kill."
|
||||||
|
fi
|
||||||
|
|
||||||
|
KVM_PID_FILE="${FLAGS_kvm_pid}"
|
||||||
|
stop_kvm
|
12
bin/ctest.py
12
bin/ctest.py
@ -22,6 +22,7 @@ from cros_build_lib import RunCommand
|
|||||||
from cros_build_lib import Warning
|
from cros_build_lib import Warning
|
||||||
|
|
||||||
_IMAGE_TO_EXTRACT = 'chromiumos_test_image.bin'
|
_IMAGE_TO_EXTRACT = 'chromiumos_test_image.bin'
|
||||||
|
_NEW_STYLE_VERSION = '0.9.131.0'
|
||||||
|
|
||||||
class HTMLDirectoryParser(HTMLParser.HTMLParser):
|
class HTMLDirectoryParser(HTMLParser.HTMLParser):
|
||||||
"""HTMLParser for parsing the default apache file index."""
|
"""HTMLParser for parsing the default apache file index."""
|
||||||
@ -216,6 +217,13 @@ def GrabZipAndExtractImage(zip_url, download_folder, image_name) :
|
|||||||
fh.write(zip_url)
|
fh.write(zip_url)
|
||||||
fh.close()
|
fh.close()
|
||||||
|
|
||||||
|
version = zip_url.split('/')[-2]
|
||||||
|
if not _GreaterVersion(version, _NEW_STYLE_VERSION) == version:
|
||||||
|
# If the version isn't ready for new style, touch file to use old style.
|
||||||
|
old_style_touch_path = os.path.join(download_folder, '.use_e1000')
|
||||||
|
fh = open(old_style_touch_path, 'w+')
|
||||||
|
fh.close()
|
||||||
|
|
||||||
|
|
||||||
def RunAUTestHarness(board, channel, latest_url_base, zip_server_base,
|
def RunAUTestHarness(board, channel, latest_url_base, zip_server_base,
|
||||||
no_graphics, type, remote):
|
no_graphics, type, remote):
|
||||||
@ -299,9 +307,5 @@ def main():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
try:
|
|
||||||
main()
|
main()
|
||||||
except Exception:
|
|
||||||
print "Got exception."
|
|
||||||
traceback.print_exc(file=sys.stdout)
|
|
||||||
|
|
||||||
|
@ -146,6 +146,7 @@ cros_secure
|
|||||||
kern_guid=%U
|
kern_guid=%U
|
||||||
tpm_tis.force=1
|
tpm_tis.force=1
|
||||||
tpm_tis.interrupts=0
|
tpm_tis.interrupts=0
|
||||||
|
nmi_watchdog=1
|
||||||
EOF
|
EOF
|
||||||
WORK="${WORK} ${FLAGS_working_dir}/config.txt"
|
WORK="${WORK} ${FLAGS_working_dir}/config.txt"
|
||||||
|
|
||||||
|
73
common.sh
73
common.sh
@ -129,21 +129,59 @@ CHROOT_TRUNK_DIR="/home/$USER/trunk"
|
|||||||
|
|
||||||
# Install make for portage ebuilds. Used by build_image and gmergefs.
|
# Install make for portage ebuilds. Used by build_image and gmergefs.
|
||||||
# TODO: Is /usr/local/autotest-chrome still used by anyone?
|
# TODO: Is /usr/local/autotest-chrome still used by anyone?
|
||||||
DEFAULT_INSTALL_MASK="/usr/include /usr/man /usr/share/man /usr/share/doc \
|
DEFAULT_INSTALL_MASK="
|
||||||
/usr/share/gtk-doc /usr/share/gtk-2.0 /usr/lib/gtk-2.0/include \
|
*.a
|
||||||
/usr/share/info /usr/share/aclocal /usr/lib/gcc /usr/lib/pkgconfig \
|
*.la
|
||||||
/usr/share/pkgconfig /usr/share/gettext /usr/share/readline /etc/runlevels \
|
/etc/init.d
|
||||||
/usr/share/openrc /lib/rc *.a *.la /etc/init.d /usr/lib/debug
|
/etc/runlevels
|
||||||
/usr/local/autotest /usr/local/autotest-chrome"
|
/lib/rc
|
||||||
|
/usr/bin/Xnest
|
||||||
|
/usr/bin/Xvfb
|
||||||
|
/usr/include
|
||||||
|
/usr/lib/debug
|
||||||
|
/usr/lib/gcc
|
||||||
|
/usr/lib/gtk-2.0/include
|
||||||
|
/usr/lib/pkgconfig
|
||||||
|
/usr/local/autotest
|
||||||
|
/usr/local/autotest-chrome
|
||||||
|
/usr/man
|
||||||
|
/usr/share/aclocal
|
||||||
|
/usr/share/doc
|
||||||
|
/usr/share/gettext
|
||||||
|
/usr/share/gtk-2.0
|
||||||
|
/usr/share/gtk-doc
|
||||||
|
/usr/share/info
|
||||||
|
/usr/share/man
|
||||||
|
/usr/share/openrc
|
||||||
|
/usr/share/pkgconfig
|
||||||
|
/usr/share/readline
|
||||||
|
"
|
||||||
|
|
||||||
FACTORY_INSTALL_MASK="/opt/google/chrome /opt/google/o3d /opt/netscape \
|
FACTORY_INSTALL_MASK="
|
||||||
/opt/google/talkplugin /opt/Qualcomm /opt/Synaptics \
|
/opt/Qualcomm
|
||||||
/usr/lib/dri /usr/lib/python2.6/test \
|
/opt/Synaptics
|
||||||
/usr/share/chewing /usr/share/fonts \
|
/opt/google/chrome
|
||||||
/usr/share/ibus-pinyin /usr/share/libhangul /usr/share/locale \
|
/opt/google/o3d
|
||||||
/usr/share/m17n /usr/share/mime /usr/share/sounds /usr/share/tts \
|
/opt/google/talkplugin
|
||||||
/usr/share/X11 /usr/share/zoneinfo /usr/lib/debug
|
/opt/netscape
|
||||||
/usr/local/autotest /usr/local/autotest-chrome /usr/local/autotest-pkgs"
|
/usr/lib/debug
|
||||||
|
/usr/lib/dri
|
||||||
|
/usr/lib/python2.6/test
|
||||||
|
/usr/local/autotest
|
||||||
|
/usr/local/autotest-chrome
|
||||||
|
/usr/local/autotest-pkgs
|
||||||
|
/usr/share/X11
|
||||||
|
/usr/share/chewing
|
||||||
|
/usr/share/fonts
|
||||||
|
/usr/share/ibus-pinyin
|
||||||
|
/usr/share/libhangul
|
||||||
|
/usr/share/locale
|
||||||
|
/usr/share/m17n
|
||||||
|
/usr/share/mime
|
||||||
|
/usr/share/sounds
|
||||||
|
/usr/share/tts
|
||||||
|
/usr/share/zoneinfo
|
||||||
|
"
|
||||||
|
|
||||||
# Check to ensure not running old scripts
|
# Check to ensure not running old scripts
|
||||||
V_REVERSE='[7m'
|
V_REVERSE='[7m'
|
||||||
@ -234,12 +272,9 @@ function restart_in_chroot_if_needed {
|
|||||||
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
|
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
|
||||||
if [ $INSIDE_CHROOT -ne 1 ]
|
if [ $INSIDE_CHROOT -ne 1 ]
|
||||||
then
|
then
|
||||||
local abspath=$(readlink -f "$0")
|
# Equivalent to enter_chroot.sh -- <current command>
|
||||||
# strip everything up to (and including) /src/scripts/ from abspath
|
|
||||||
local path_from_scripts="${abspath##*/src/scripts/}"
|
|
||||||
exec $SCRIPTS_DIR/enter_chroot.sh -- \
|
exec $SCRIPTS_DIR/enter_chroot.sh -- \
|
||||||
"$CHROOT_TRUNK_DIR/src/scripts/$path_from_scripts" "$@"
|
$CHROOT_TRUNK_DIR/src/scripts/$(basename $0) "$@"
|
||||||
exit
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,15 +78,15 @@ def _Print(message):
|
|||||||
Info(message)
|
Info(message)
|
||||||
|
|
||||||
|
|
||||||
def _CleanStalePackages(board, package_array):
|
def _CleanStalePackages(board, package_atoms):
|
||||||
"""Cleans up stale package info from a previous build."""
|
"""Cleans up stale package info from a previous build."""
|
||||||
Info('Cleaning up stale packages %s.' % package_array)
|
Info('Cleaning up stale packages %s.' % package_atoms)
|
||||||
unmerge_board_cmd = ['emerge-%s' % board, '--unmerge']
|
unmerge_board_cmd = ['emerge-%s' % board, '--unmerge']
|
||||||
unmerge_board_cmd.extend(package_array)
|
unmerge_board_cmd.extend(package_atoms)
|
||||||
RunCommand(unmerge_board_cmd)
|
RunCommand(unmerge_board_cmd)
|
||||||
|
|
||||||
unmerge_host_cmd = ['sudo', 'emerge', '--unmerge']
|
unmerge_host_cmd = ['sudo', 'emerge', '--unmerge']
|
||||||
unmerge_host_cmd.extend(package_array)
|
unmerge_host_cmd.extend(package_atoms)
|
||||||
RunCommand(unmerge_host_cmd)
|
RunCommand(unmerge_host_cmd)
|
||||||
|
|
||||||
RunCommand(['eclean-%s' % board, '-d', 'packages'], redirect_stderr=True)
|
RunCommand(['eclean-%s' % board, '-d', 'packages'], redirect_stderr=True)
|
||||||
@ -252,7 +252,7 @@ def PushChange(stable_branch, tracking_branch):
|
|||||||
merge_branch_name = 'merge_branch'
|
merge_branch_name = 'merge_branch'
|
||||||
for push_try in range(num_retries + 1):
|
for push_try in range(num_retries + 1):
|
||||||
try:
|
try:
|
||||||
_SimpleRunCommand('git remote update')
|
_SimpleRunCommand('repo sync .')
|
||||||
merge_branch = GitBranch(merge_branch_name, tracking_branch)
|
merge_branch = GitBranch(merge_branch_name, tracking_branch)
|
||||||
merge_branch.CreateBranch()
|
merge_branch.CreateBranch()
|
||||||
if not merge_branch.Exists():
|
if not merge_branch.Exists():
|
||||||
@ -319,15 +319,15 @@ class EBuild(object):
|
|||||||
"""Sets up data about an ebuild from its path."""
|
"""Sets up data about an ebuild from its path."""
|
||||||
from portage.versions import pkgsplit
|
from portage.versions import pkgsplit
|
||||||
unused_path, self.category, self.pkgname, filename = path.rsplit('/', 3)
|
unused_path, self.category, self.pkgname, filename = path.rsplit('/', 3)
|
||||||
unused_pkgname, version_no_rev, rev = pkgsplit(
|
unused_pkgname, self.version_no_rev, rev = pkgsplit(
|
||||||
filename.replace('.ebuild', ''))
|
filename.replace('.ebuild', ''))
|
||||||
|
|
||||||
self.ebuild_path_no_version = os.path.join(
|
self.ebuild_path_no_version = os.path.join(
|
||||||
os.path.dirname(path), self.pkgname)
|
os.path.dirname(path), self.pkgname)
|
||||||
self.ebuild_path_no_revision = '%s-%s' % (self.ebuild_path_no_version,
|
self.ebuild_path_no_revision = '%s-%s' % (self.ebuild_path_no_version,
|
||||||
version_no_rev)
|
self.version_no_rev)
|
||||||
self.current_revision = int(rev.replace('r', ''))
|
self.current_revision = int(rev.replace('r', ''))
|
||||||
self.version = '%s-%s' % (version_no_rev, rev)
|
self.version = '%s-%s' % (self.version_no_rev, rev)
|
||||||
self.package = '%s/%s' % (self.category, self.pkgname)
|
self.package = '%s/%s' % (self.category, self.pkgname)
|
||||||
self.ebuild_path = path
|
self.ebuild_path = path
|
||||||
|
|
||||||
@ -454,17 +454,19 @@ class EBuildStableMarker(object):
|
|||||||
OSError: Error occurred while creating a new ebuild.
|
OSError: Error occurred while creating a new ebuild.
|
||||||
IOError: Error occurred while writing to the new revved ebuild file.
|
IOError: Error occurred while writing to the new revved ebuild file.
|
||||||
Returns:
|
Returns:
|
||||||
True if the revved package is different than the old ebuild.
|
If the revved package is different than the old ebuild, return the full
|
||||||
|
revved package name, including the version number. Otherwise, return None.
|
||||||
"""
|
"""
|
||||||
if self._ebuild.is_stable:
|
if self._ebuild.is_stable:
|
||||||
new_stable_ebuild_path = '%s-r%d.ebuild' % (
|
stable_version_no_rev = self._ebuild.version_no_rev
|
||||||
self._ebuild.ebuild_path_no_revision,
|
|
||||||
self._ebuild.current_revision + 1)
|
|
||||||
else:
|
else:
|
||||||
# If given unstable ebuild, use 0.0.1 rather than 9999.
|
# If given unstable ebuild, use 0.0.1 rather than 9999.
|
||||||
new_stable_ebuild_path = '%s-0.0.1-r%d.ebuild' % (
|
stable_version_no_rev = '0.0.1'
|
||||||
self._ebuild.ebuild_path_no_version,
|
|
||||||
|
new_version = '%s-r%d' % (stable_version_no_rev,
|
||||||
self._ebuild.current_revision + 1)
|
self._ebuild.current_revision + 1)
|
||||||
|
new_stable_ebuild_path = '%s-%s.ebuild' % (
|
||||||
|
self._ebuild.ebuild_path_no_version, new_version)
|
||||||
|
|
||||||
_Print('Creating new stable ebuild %s' % new_stable_ebuild_path)
|
_Print('Creating new stable ebuild %s' % new_stable_ebuild_path)
|
||||||
unstable_ebuild_path = ('%s-9999.ebuild' %
|
unstable_ebuild_path = ('%s-9999.ebuild' %
|
||||||
@ -480,7 +482,7 @@ class EBuildStableMarker(object):
|
|||||||
if 0 == RunCommand(diff_cmd, exit_code=True, redirect_stdout=True,
|
if 0 == RunCommand(diff_cmd, exit_code=True, redirect_stdout=True,
|
||||||
redirect_stderr=True, print_cmd=gflags.FLAGS.verbose):
|
redirect_stderr=True, print_cmd=gflags.FLAGS.verbose):
|
||||||
os.unlink(new_stable_ebuild_path)
|
os.unlink(new_stable_ebuild_path)
|
||||||
return False
|
return None
|
||||||
else:
|
else:
|
||||||
_Print('Adding new stable ebuild to git')
|
_Print('Adding new stable ebuild to git')
|
||||||
_SimpleRunCommand('git add %s' % new_stable_ebuild_path)
|
_SimpleRunCommand('git add %s' % new_stable_ebuild_path)
|
||||||
@ -489,7 +491,7 @@ class EBuildStableMarker(object):
|
|||||||
_Print('Removing old ebuild from git')
|
_Print('Removing old ebuild from git')
|
||||||
_SimpleRunCommand('git rm %s' % old_ebuild_path)
|
_SimpleRunCommand('git rm %s' % old_ebuild_path)
|
||||||
|
|
||||||
return True
|
return '%s-%s' % (self._ebuild.package, new_version)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def CommitChange(cls, message):
|
def CommitChange(cls, message):
|
||||||
@ -556,16 +558,18 @@ def main(argv):
|
|||||||
|
|
||||||
# Contains the array of packages we actually revved.
|
# Contains the array of packages we actually revved.
|
||||||
revved_packages = []
|
revved_packages = []
|
||||||
|
new_package_atoms = []
|
||||||
for ebuild in ebuilds:
|
for ebuild in ebuilds:
|
||||||
try:
|
try:
|
||||||
_Print('Working on %s' % ebuild.package)
|
_Print('Working on %s' % ebuild.package)
|
||||||
worker = EBuildStableMarker(ebuild)
|
worker = EBuildStableMarker(ebuild)
|
||||||
commit_id = ebuild.GetCommitId()
|
commit_id = ebuild.GetCommitId()
|
||||||
if worker.RevWorkOnEBuild(commit_id):
|
new_package = worker.RevWorkOnEBuild(commit_id)
|
||||||
|
if new_package:
|
||||||
message = _GIT_COMMIT_MESSAGE % (ebuild.package, commit_id)
|
message = _GIT_COMMIT_MESSAGE % (ebuild.package, commit_id)
|
||||||
worker.CommitChange(message)
|
worker.CommitChange(message)
|
||||||
revved_packages.append(ebuild.package)
|
revved_packages.append(ebuild.package)
|
||||||
|
new_package_atoms.append('=%s' % new_package)
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
Warning('Cannot rev %s\n' % ebuild.package,
|
Warning('Cannot rev %s\n' % ebuild.package,
|
||||||
'Note you will have to go into %s '
|
'Note you will have to go into %s '
|
||||||
@ -573,7 +577,7 @@ def main(argv):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
if revved_packages:
|
if revved_packages:
|
||||||
_CleanStalePackages(gflags.FLAGS.board, revved_packages)
|
_CleanStalePackages(gflags.FLAGS.board, new_package_atoms)
|
||||||
if gflags.FLAGS.drop_file:
|
if gflags.FLAGS.drop_file:
|
||||||
fh = open(gflags.FLAGS.drop_file, 'w')
|
fh = open(gflags.FLAGS.drop_file, 'w')
|
||||||
fh.write(' '.join(revved_packages))
|
fh.write(' '.join(revved_packages))
|
||||||
|
@ -33,7 +33,7 @@ class NonClassTests(mox.MoxTestBase):
|
|||||||
cros_mark_as_stable.GitBranch.Exists().AndReturn(True)
|
cros_mark_as_stable.GitBranch.Exists().AndReturn(True)
|
||||||
cros_mark_as_stable._SimpleRunCommand('git log --format=format:%s%n%n%b ' +
|
cros_mark_as_stable._SimpleRunCommand('git log --format=format:%s%n%n%b ' +
|
||||||
self._tracking_branch + '..').AndReturn(git_log)
|
self._tracking_branch + '..').AndReturn(git_log)
|
||||||
cros_mark_as_stable._SimpleRunCommand('git remote update')
|
cros_mark_as_stable._SimpleRunCommand('repo sync .')
|
||||||
cros_mark_as_stable._SimpleRunCommand('git merge --squash %s' %
|
cros_mark_as_stable._SimpleRunCommand('git merge --squash %s' %
|
||||||
self._branch)
|
self._branch)
|
||||||
cros_mark_as_stable._SimpleRunCommand('git commit -m "%s"' %
|
cros_mark_as_stable._SimpleRunCommand('git commit -m "%s"' %
|
||||||
@ -129,6 +129,7 @@ class EBuildTest(mox.MoxTestBase):
|
|||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
|
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
self.assertEquals(fake_ebuild.version_no_rev, '0.0.1')
|
||||||
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
|
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
|
||||||
'/path/to/test_package/test_package-0.0.1')
|
'/path/to/test_package/test_package-0.0.1')
|
||||||
self.assertEquals(fake_ebuild.ebuild_path_no_version,
|
self.assertEquals(fake_ebuild.ebuild_path_no_version,
|
||||||
@ -144,6 +145,7 @@ class EBuildTest(mox.MoxTestBase):
|
|||||||
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
|
fake_ebuild = cros_mark_as_stable.EBuild(fake_ebuild_path)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
self.assertEquals(fake_ebuild.version_no_rev, '9999')
|
||||||
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
|
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
|
||||||
'/path/to/test_package/test_package-9999')
|
'/path/to/test_package/test_package-9999')
|
||||||
self.assertEquals(fake_ebuild.ebuild_path_no_version,
|
self.assertEquals(fake_ebuild.ebuild_path_no_version,
|
||||||
@ -160,12 +162,14 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
self.mox.StubOutWithMock(os, 'unlink')
|
self.mox.StubOutWithMock(os, 'unlink')
|
||||||
self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable.EBuild)
|
self.m_ebuild = self.mox.CreateMock(cros_mark_as_stable.EBuild)
|
||||||
self.m_ebuild.is_stable = True
|
self.m_ebuild.is_stable = True
|
||||||
self.m_ebuild.package = 'test_package'
|
self.m_ebuild.package = 'test_package/test_package'
|
||||||
|
self.m_ebuild.version_no_rev = '0.0.1'
|
||||||
self.m_ebuild.current_revision = 1
|
self.m_ebuild.current_revision = 1
|
||||||
self.m_ebuild.ebuild_path_no_revision = '/path/test_package-0.0.1'
|
self.m_ebuild.ebuild_path_no_revision = '/path/test_package-0.0.1'
|
||||||
self.m_ebuild.ebuild_path_no_version = '/path/test_package'
|
self.m_ebuild.ebuild_path_no_version = '/path/test_package'
|
||||||
self.m_ebuild.ebuild_path = '/path/test_package-0.0.1-r1.ebuild'
|
self.m_ebuild.ebuild_path = '/path/test_package-0.0.1-r1.ebuild'
|
||||||
self.revved_ebuild_path = '/path/test_package-0.0.1-r2.ebuild'
|
self.revved_ebuild_path = '/path/test_package-0.0.1-r2.ebuild'
|
||||||
|
self.unstable_ebuild_path = '/path/test_package-9999.ebuild'
|
||||||
|
|
||||||
def testRevWorkOnEBuild(self):
|
def testRevWorkOnEBuild(self):
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
||||||
@ -197,8 +201,9 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
||||||
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
result = marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
self.assertEqual(result, 'test_package/test_package-0.0.1-r2')
|
||||||
|
|
||||||
def testRevUnchangedEBuild(self):
|
def testRevUnchangedEBuild(self):
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
||||||
@ -229,8 +234,9 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
||||||
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
result = marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
self.assertEqual(result, None)
|
||||||
|
|
||||||
def testRevMissingEBuild(self):
|
def testRevMissingEBuild(self):
|
||||||
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
self.mox.StubOutWithMock(cros_mark_as_stable.fileinput, 'input')
|
||||||
@ -239,6 +245,11 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
self.mox.StubOutWithMock(cros_mark_as_stable, 'Die')
|
self.mox.StubOutWithMock(cros_mark_as_stable, 'Die')
|
||||||
m_file = self.mox.CreateMock(file)
|
m_file = self.mox.CreateMock(file)
|
||||||
|
|
||||||
|
revved_ebuild_path = self.m_ebuild.ebuild_path
|
||||||
|
self.m_ebuild.ebuild_path = self.unstable_ebuild_path
|
||||||
|
self.m_ebuild.is_stable = False
|
||||||
|
self.m_ebuild.current_revision = 0
|
||||||
|
|
||||||
# Prepare mock fileinput. This tests to make sure both the commit id
|
# Prepare mock fileinput. This tests to make sure both the commit id
|
||||||
# and keywords are changed correctly.
|
# and keywords are changed correctly.
|
||||||
mock_file = ['EAPI=2', 'CROS_WORKON_COMMIT=old_id',
|
mock_file = ['EAPI=2', 'CROS_WORKON_COMMIT=old_id',
|
||||||
@ -247,25 +258,24 @@ class EBuildStableMarkerTest(mox.MoxTestBase):
|
|||||||
ebuild_9999 = self.m_ebuild.ebuild_path_no_version + '-9999.ebuild'
|
ebuild_9999 = self.m_ebuild.ebuild_path_no_version + '-9999.ebuild'
|
||||||
cros_mark_as_stable.os.path.exists(ebuild_9999).AndReturn(False)
|
cros_mark_as_stable.os.path.exists(ebuild_9999).AndReturn(False)
|
||||||
cros_mark_as_stable.Die("Missing unstable ebuild: %s" % ebuild_9999)
|
cros_mark_as_stable.Die("Missing unstable ebuild: %s" % ebuild_9999)
|
||||||
cros_mark_as_stable.shutil.copyfile(ebuild_9999, self.revved_ebuild_path)
|
cros_mark_as_stable.shutil.copyfile(ebuild_9999, revved_ebuild_path)
|
||||||
cros_mark_as_stable.fileinput.input(self.revved_ebuild_path,
|
cros_mark_as_stable.fileinput.input(revved_ebuild_path,
|
||||||
inplace=1).AndReturn(mock_file)
|
inplace=1).AndReturn(mock_file)
|
||||||
m_file.write('EAPI=2')
|
m_file.write('EAPI=2')
|
||||||
m_file.write('CROS_WORKON_COMMIT="my_id"\n')
|
m_file.write('CROS_WORKON_COMMIT="my_id"\n')
|
||||||
m_file.write('KEYWORDS="x86 arm"')
|
m_file.write('KEYWORDS="x86 arm"')
|
||||||
m_file.write('src_unpack(){}')
|
m_file.write('src_unpack(){}')
|
||||||
diff_cmd = ['diff', '-Bu', self.m_ebuild.ebuild_path,
|
diff_cmd = ['diff', '-Bu', self.unstable_ebuild_path, revved_ebuild_path]
|
||||||
self.revved_ebuild_path]
|
|
||||||
cros_mark_as_stable.RunCommand(diff_cmd, exit_code=True,
|
cros_mark_as_stable.RunCommand(diff_cmd, exit_code=True,
|
||||||
print_cmd=False, redirect_stderr=True,
|
print_cmd=False, redirect_stderr=True,
|
||||||
redirect_stdout=True).AndReturn(1)
|
redirect_stdout=True).AndReturn(1)
|
||||||
cros_mark_as_stable._SimpleRunCommand('git add ' + self.revved_ebuild_path)
|
cros_mark_as_stable._SimpleRunCommand('git add ' + revved_ebuild_path)
|
||||||
cros_mark_as_stable._SimpleRunCommand('git rm ' + self.m_ebuild.ebuild_path)
|
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
marker = cros_mark_as_stable.EBuildStableMarker(self.m_ebuild)
|
||||||
marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
result = marker.RevWorkOnEBuild('my_id', redirect_file=m_file)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
self.assertEqual(result, 'test_package/test_package-0.0.1-r1')
|
||||||
|
|
||||||
|
|
||||||
def testCommitChange(self):
|
def testCommitChange(self):
|
||||||
|
@ -34,23 +34,53 @@ DEFINE_boolean unmount $FLAGS_FALSE "Only tear down mounts."
|
|||||||
DEFINE_boolean ssh_agent $FLAGS_TRUE "Import ssh agent."
|
DEFINE_boolean ssh_agent $FLAGS_TRUE "Import ssh agent."
|
||||||
|
|
||||||
# More useful help
|
# More useful help
|
||||||
FLAGS_HELP="USAGE: $0 [flags] [VAR=value] [-- \"command\"]
|
FLAGS_HELP="USAGE: $0 [flags] [VAR=value] [-- command [arg1] [arg2] ...]
|
||||||
|
|
||||||
One or more VAR=value pairs can be specified to export variables into
|
One or more VAR=value pairs can be specified to export variables into
|
||||||
the chroot environment. For example:
|
the chroot environment. For example:
|
||||||
|
|
||||||
$0 FOO=bar BAZ=bel
|
$0 FOO=bar BAZ=bel
|
||||||
|
|
||||||
If [-- \"command\"] is present, runs the command inside the chroot,
|
If [-- command] is present, runs the command inside the chroot,
|
||||||
after changing directory to /$USER/trunk/src/scripts. Note that the
|
after changing directory to /$USER/trunk/src/scripts. Note that neither
|
||||||
command should be enclosed in quotes to prevent interpretation by the
|
the command nor args should include single quotes. For example:
|
||||||
shell before getting into the chroot. For example:
|
|
||||||
|
|
||||||
$0 -- \"./build_platform_packages.sh\"
|
$0 -- ./build_platform_packages.sh
|
||||||
|
|
||||||
Otherwise, provides an interactive shell.
|
Otherwise, provides an interactive shell.
|
||||||
"
|
"
|
||||||
|
|
||||||
|
# Double up on the first '--' argument. Why? For enter_chroot, we want to
|
||||||
|
# emulate the behavior of sudo for setting environment vars. That is, we want:
|
||||||
|
# ./enter_chroot [flags] [VAR=val] [-- command]
|
||||||
|
# ...but shflags ends up eating the '--' out of the command line and gives
|
||||||
|
# us back "VAR=val" and "command" together in one chunk. By doubling up, we
|
||||||
|
# end up getting what we want back from shflags.
|
||||||
|
#
|
||||||
|
# Examples of how people might be using enter_chroot:
|
||||||
|
# 1. ./enter_chroot [chroot_flags] VAR1=val1 VAR2=val2 -- cmd arg1 arg2
|
||||||
|
# Set env vars and run cmd w/ args
|
||||||
|
# 2. ./enter_chroot [chroot_flags] VAR1=val1 VAR2=val2
|
||||||
|
# Set env vars and run shell
|
||||||
|
# 3. ./enter_chroot [chroot_flags] -- cmd arg1 arg2
|
||||||
|
# Run cmd w/ args
|
||||||
|
# 4. ./enter_chroot [chroot_flags] VAR1=val1 VAR2=val2 cmd arg1 arg2
|
||||||
|
# Like #1 _if_ args aren't flags (if they are, enter_chroot will claim them)
|
||||||
|
# 5. ./enter_chroot [chroot_flags] cmd arg1 arg2
|
||||||
|
# Like #3 _if_ args aren't flags (if they are, enter_chroot will claim them)
|
||||||
|
_FLAGS_FIXED=''
|
||||||
|
_SAW_DASHDASH=0
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
_FLAGS_FIXED="${_FLAGS_FIXED:+${_FLAGS_FIXED} }'$1'"
|
||||||
|
if [ $_SAW_DASHDASH -eq 0 ] && [[ "$1" == "--" ]]; then
|
||||||
|
_FLAGS_FIXED="${_FLAGS_FIXED:+${_FLAGS_FIXED} }'--'"
|
||||||
|
_SAW_DASHDASH=1
|
||||||
|
fi
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
eval set -- "${_FLAGS_FIXED}"
|
||||||
|
|
||||||
|
|
||||||
# Parse command line flags
|
# Parse command line flags
|
||||||
FLAGS "$@" || exit 1
|
FLAGS "$@" || exit 1
|
||||||
eval set -- "${FLAGS_ARGV}"
|
eval set -- "${FLAGS_ARGV}"
|
||||||
@ -303,9 +333,9 @@ git config -f ${FLAGS_chroot}/home/${USER}/.gitconfig --replace-all user.email \
|
|||||||
# Run command or interactive shell. Also include the non-chrooted path to
|
# Run command or interactive shell. Also include the non-chrooted path to
|
||||||
# the source trunk for scripts that may need to print it (e.g.
|
# the source trunk for scripts that may need to print it (e.g.
|
||||||
# build_image.sh).
|
# build_image.sh).
|
||||||
sudo chroot "$FLAGS_chroot" sudo -i -u $USER $CHROOT_PASSTHRU \
|
sudo -- chroot "$FLAGS_chroot" sudo -i -u $USER $CHROOT_PASSTHRU \
|
||||||
EXTERNAL_TRUNK_PATH="${FLAGS_trunk}" LANG=C SSH_AGENT_PID="${SSH_AGENT_PID}" \
|
EXTERNAL_TRUNK_PATH="${FLAGS_trunk}" LANG=C SSH_AGENT_PID="${SSH_AGENT_PID}" \
|
||||||
SSH_AUTH_SOCK="${SSH_AUTH_SOCK}" -- "$@"
|
SSH_AUTH_SOCK="${SSH_AUTH_SOCK}" "$@"
|
||||||
|
|
||||||
# Remove trap and explicitly unmount
|
# Remove trap and explicitly unmount
|
||||||
trap - EXIT
|
trap - EXIT
|
||||||
|
@ -329,6 +329,11 @@ function verify_image {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function find_root_dev {
|
||||||
|
remote_sh "rootdev -s"
|
||||||
|
echo ${REMOTE_OUT}
|
||||||
|
}
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
assert_outside_chroot
|
assert_outside_chroot
|
||||||
|
|
||||||
@ -356,6 +361,8 @@ function main() {
|
|||||||
remote_reboot
|
remote_reboot
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
local initial_root_dev=$(find_root_dev)
|
||||||
|
|
||||||
if [ -z "${FLAGS_update_url}" ]; then
|
if [ -z "${FLAGS_update_url}" ]; then
|
||||||
# Start local devserver if no update url specified.
|
# Start local devserver if no update url specified.
|
||||||
start_dev_server
|
start_dev_server
|
||||||
@ -386,6 +393,13 @@ function main() {
|
|||||||
remote_sh "grep ^CHROMEOS_RELEASE_DESCRIPTION= /etc/lsb-release"
|
remote_sh "grep ^CHROMEOS_RELEASE_DESCRIPTION= /etc/lsb-release"
|
||||||
if [ ${FLAGS_verify} -eq ${FLAGS_TRUE} ]; then
|
if [ ${FLAGS_verify} -eq ${FLAGS_TRUE} ]; then
|
||||||
verify_image
|
verify_image
|
||||||
|
|
||||||
|
if [ "${initial_root_dev}" == "$(find_root_dev)" ]; then
|
||||||
|
# At this point, the software version didn't change, but we didn't
|
||||||
|
# switch partitions either. Means it was an update to the same version
|
||||||
|
# that failed.
|
||||||
|
die "The root partition did NOT change. The update failed."
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
local release_description=$(echo ${REMOTE_OUT} | cut -d '=' -f 2)
|
local release_description=$(echo ${REMOTE_OUT} | cut -d '=' -f 2)
|
||||||
info "Update was successful and rebooted to $release_description"
|
info "Update was successful and rebooted to $release_description"
|
||||||
|
@ -168,3 +168,23 @@ image_umount_partition() {
|
|||||||
|
|
||||||
umount -d "$mount_point"
|
umount -d "$mount_point"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Copy a partition from one image to another.
|
||||||
|
image_partition_copy() {
|
||||||
|
local src="$1"
|
||||||
|
local srcpart="$2"
|
||||||
|
local dst="$3"
|
||||||
|
local dstpart="$4"
|
||||||
|
|
||||||
|
local srcoffset=$(image_part_offset "${src}" "${srcpart}")
|
||||||
|
local dstoffset=$(image_part_offset "${dst}" "${dstpart}")
|
||||||
|
local length=$(image_part_size "${src}" "${srcpart}")
|
||||||
|
local dstlength=$(image_part_size "${dst}" "${dstpart}")
|
||||||
|
|
||||||
|
if [ "${length}" -gt "${dstlength}" ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
image_dump_partition "${src}" "${srcpart}" |
|
||||||
|
dd of="${dst}" bs=512 seek="${dstoffset}" conv=notrunc
|
||||||
|
}
|
||||||
|
@ -70,16 +70,23 @@ function start_kvm() {
|
|||||||
snapshot="-snapshot"
|
snapshot="-snapshot"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
local net_option="-net nic,model=virtio"
|
||||||
|
if [ -f "$(dirname $1)/.use_e1000" ]; then
|
||||||
|
info "Detected older image, using e1000 instead of virtio."
|
||||||
|
net_option="-net nic,model=e1000"
|
||||||
|
fi
|
||||||
|
|
||||||
sudo kvm -m 1024 \
|
sudo kvm -m 1024 \
|
||||||
-vga std \
|
-vga std \
|
||||||
-pidfile "${KVM_PID_FILE}" \
|
-pidfile "${KVM_PID_FILE}" \
|
||||||
-daemonize \
|
-daemonize \
|
||||||
-net nic,model=virtio \
|
${net_option} \
|
||||||
${nographics} \
|
${nographics} \
|
||||||
${snapshot} \
|
${snapshot} \
|
||||||
-net user,hostfwd=tcp::${FLAGS_ssh_port}-:22 \
|
-net user,hostfwd=tcp::${FLAGS_ssh_port}-:22 \
|
||||||
-hda "${1}"
|
-hda "${1}"
|
||||||
|
|
||||||
|
info "KVM started with pid stored in ${KVM_PID_FILE}"
|
||||||
LIVE_VM_IMAGE="${1}"
|
LIVE_VM_IMAGE="${1}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,11 @@ DEFINE_string release "" \
|
|||||||
"Directory and file containing release image: /path/chromiumos_image.bin"
|
"Directory and file containing release image: /path/chromiumos_image.bin"
|
||||||
DEFINE_string subfolder "" \
|
DEFINE_string subfolder "" \
|
||||||
"If set, the name of the subfolder to put the payload items inside"
|
"If set, the name of the subfolder to put the payload items inside"
|
||||||
|
DEFINE_string diskimg "" \
|
||||||
|
"If set, the name of the diskimage file to output"
|
||||||
|
DEFINE_boolean preserve ${FLAGS_FALSE} \
|
||||||
|
"If set, reuse the diskimage file, if available"
|
||||||
|
DEFINE_integer sectors 31277232 "Size of image in sectors"
|
||||||
|
|
||||||
# Parse command line
|
# Parse command line
|
||||||
FLAGS "$@" || exit 1
|
FLAGS "$@" || exit 1
|
||||||
@ -80,6 +85,35 @@ FACTORY_DIR="$(dirname "${FLAGS_factory}")"
|
|||||||
RELEASE_IMAGE="$(basename "${FLAGS_release}")"
|
RELEASE_IMAGE="$(basename "${FLAGS_release}")"
|
||||||
FACTORY_IMAGE="$(basename "${FLAGS_factory}")"
|
FACTORY_IMAGE="$(basename "${FLAGS_factory}")"
|
||||||
|
|
||||||
|
prepare_img() {
|
||||||
|
local outdev="$FLAGS_diskimg"
|
||||||
|
local sectors="$FLAGS_sectors"
|
||||||
|
local force_full="true"
|
||||||
|
|
||||||
|
# We'll need some code to put in the PMBR, for booting on legacy BIOS.
|
||||||
|
echo "Fetch PMBR"
|
||||||
|
local pmbrcode="$(mktemp -d)/gptmbr.bin"
|
||||||
|
sudo dd bs=512 count=1 if="${FLAGS_release}" of="${pmbrcode}" status=noxfer
|
||||||
|
|
||||||
|
echo "Prepare base disk image"
|
||||||
|
# Create an output file if requested, or if none exists.
|
||||||
|
if [ -b "${outdev}" ] ; then
|
||||||
|
echo "Using block device ${outdev}"
|
||||||
|
elif [ ! -e "${outdev}" -o \
|
||||||
|
"$(stat -c %s ${outdev})" != "$(( ${sectors} * 512 ))" -o \
|
||||||
|
"$FLAGS_preserve" = "$FLAGS_FALSE" ]; then
|
||||||
|
echo "Generating empty image file"
|
||||||
|
image_dump_partial_file /dev/zero 0 "${sectors}" |
|
||||||
|
dd of="${outdev}" bs=8M
|
||||||
|
else
|
||||||
|
echo "Reusing $outdev"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create GPT partition table.
|
||||||
|
install_gpt "${outdev}" 0 0 "${pmbrcode}" 0 "${force_full}"
|
||||||
|
# Activate the correct partition.
|
||||||
|
cgpt add -i 2 -S 1 -P 1 "${outdev}"
|
||||||
|
}
|
||||||
|
|
||||||
prepare_omaha() {
|
prepare_omaha() {
|
||||||
sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-test.gz"
|
sudo rm -rf "${OMAHA_DATA_DIR}/rootfs-test.gz"
|
||||||
@ -145,9 +179,6 @@ compress_and_hash_partition() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Clean up stale config and data files.
|
|
||||||
prepare_omaha
|
|
||||||
|
|
||||||
# Decide if we should unpack partition
|
# Decide if we should unpack partition
|
||||||
if image_has_part_tools; then
|
if image_has_part_tools; then
|
||||||
IMAGE_IS_UNPACKED=
|
IMAGE_IS_UNPACKED=
|
||||||
@ -159,6 +190,46 @@ else
|
|||||||
IMAGE_IS_UNPACKED=1
|
IMAGE_IS_UNPACKED=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
generate_img() {
|
||||||
|
local outdev="$FLAGS_diskimg"
|
||||||
|
local sectors="$FLAGS_sectors"
|
||||||
|
|
||||||
|
prepare_img
|
||||||
|
|
||||||
|
# Get the release image.
|
||||||
|
pushd "${RELEASE_DIR}" >/dev/null
|
||||||
|
|
||||||
|
echo "Release Kernel"
|
||||||
|
image_partition_copy "${RELEASE_IMAGE}" 2 "${outdev}" 4
|
||||||
|
|
||||||
|
echo "Release Rootfs"
|
||||||
|
image_partition_copy "${RELEASE_IMAGE}" 3 "${outdev}" 5
|
||||||
|
|
||||||
|
echo "OEM parition"
|
||||||
|
image_partition_copy "${RELEASE_IMAGE}" 8 "${outdev}" 8
|
||||||
|
|
||||||
|
popd >/dev/null
|
||||||
|
|
||||||
|
# Go to retrieve the factory test image.
|
||||||
|
pushd "${FACTORY_DIR}" >/dev/null
|
||||||
|
|
||||||
|
echo "Factory Kernel"
|
||||||
|
image_partition_copy "${FACTORY_IMAGE}" 2 "${outdev}" 2
|
||||||
|
echo "Factory Rootfs"
|
||||||
|
image_partition_copy "${FACTORY_IMAGE}" 3 "${outdev}" 3
|
||||||
|
echo "Factory Stateful"
|
||||||
|
image_partition_copy "${FACTORY_IMAGE}" 1 "${outdev}" 1
|
||||||
|
echo "EFI Partition"
|
||||||
|
image_partition_copy "${FACTORY_IMAGE}" 12 "${outdev}" 12
|
||||||
|
|
||||||
|
echo "Generated Image at $outdev."
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_omaha() {
|
||||||
|
# Clean up stale config and data files.
|
||||||
|
prepare_omaha
|
||||||
|
|
||||||
# Get the release image.
|
# Get the release image.
|
||||||
pushd "${RELEASE_DIR}" >/dev/null
|
pushd "${RELEASE_DIR}" >/dev/null
|
||||||
echo "Generating omaha release image from ${FLAGS_release}"
|
echo "Generating omaha release image from ${FLAGS_release}"
|
||||||
@ -183,10 +254,6 @@ oem_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 8 "oem.gz")"
|
|||||||
mv oem.gz "${OMAHA_DATA_DIR}"
|
mv oem.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "oem: ${oem_hash}"
|
echo "oem: ${oem_hash}"
|
||||||
|
|
||||||
efi_hash="$(compress_and_hash_partition "${RELEASE_IMAGE}" 12 "efi.gz")"
|
|
||||||
mv efi.gz "${OMAHA_DATA_DIR}"
|
|
||||||
echo "efi: ${efi_hash}"
|
|
||||||
|
|
||||||
popd >/dev/null
|
popd >/dev/null
|
||||||
|
|
||||||
# Go to retrieve the factory test image.
|
# Go to retrieve the factory test image.
|
||||||
@ -208,6 +275,10 @@ state_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 1 "state.gz")"
|
|||||||
mv state.gz "${OMAHA_DATA_DIR}"
|
mv state.gz "${OMAHA_DATA_DIR}"
|
||||||
echo "state: ${state_hash}"
|
echo "state: ${state_hash}"
|
||||||
|
|
||||||
|
efi_hash="$(compress_and_hash_partition "${FACTORY_IMAGE}" 12 "efi.gz")"
|
||||||
|
mv efi.gz "${OMAHA_DATA_DIR}"
|
||||||
|
echo "efi: ${efi_hash}"
|
||||||
|
|
||||||
popd >/dev/null
|
popd >/dev/null
|
||||||
|
|
||||||
if [ -n "${FLAGS_firmware_updater}" ]; then
|
if [ -n "${FLAGS_firmware_updater}" ]; then
|
||||||
@ -226,7 +297,8 @@ fi
|
|||||||
# append another config.
|
# append another config.
|
||||||
if [ -n "${FLAGS_subfolder}" ] &&
|
if [ -n "${FLAGS_subfolder}" ] &&
|
||||||
[ -f "${OMAHA_CONF}" ]; then
|
[ -f "${OMAHA_CONF}" ]; then
|
||||||
# Remove the ']' from the last line of the file so we can add another config.
|
# Remove the ']' from the last line of the file
|
||||||
|
# so we can add another config.
|
||||||
while [ -s "${OMAHA_CONF}" ]; do
|
while [ -s "${OMAHA_CONF}" ]; do
|
||||||
# If the last line is null
|
# If the last line is null
|
||||||
if [ -z "$(tail -1 "${OMAHA_CONF}")" ]; then
|
if [ -z "$(tail -1 "${OMAHA_CONF}")" ]; then
|
||||||
@ -285,3 +357,11 @@ To validate the configutarion, run:
|
|||||||
--validate_factory_config
|
--validate_factory_config
|
||||||
To run the server:
|
To run the server:
|
||||||
python2.6 devserver.py --factory_config miniomaha.conf"
|
python2.6 devserver.py --factory_config miniomaha.conf"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main
|
||||||
|
if [ -n "$FLAGS_diskimg" ]; then
|
||||||
|
generate_img
|
||||||
|
else
|
||||||
|
generate_omaha
|
||||||
|
fi
|
||||||
|
@ -20,8 +20,8 @@ patch -d "${ROOT_FS_DIR}" -Np1 <<EOF
|
|||||||
diff -Naur old/etc/init/boot-complete.conf new/etc/init/boot-complete.conf
|
diff -Naur old/etc/init/boot-complete.conf new/etc/init/boot-complete.conf
|
||||||
--- old/etc/init/boot-complete.conf 2010-07-21 11:22:30.000000000 +0800
|
--- old/etc/init/boot-complete.conf 2010-07-21 11:22:30.000000000 +0800
|
||||||
+++ new/etc/init/boot-complete.conf 2010-07-21 22:13:36.000000000 +0800
|
+++ new/etc/init/boot-complete.conf 2010-07-21 22:13:36.000000000 +0800
|
||||||
@@ -7 +7 @@
|
@@ -15 +15 @@
|
||||||
-start on login-prompt-ready
|
-start on login-prompt-visible
|
||||||
+start on started udev
|
+start on started udev
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
@ -31,7 +31,7 @@ cat >"${ROOT_FS_DIR}/etc/init/factory.conf" <<EOF
|
|||||||
# found in the LICENSE file.
|
# found in the LICENSE file.
|
||||||
|
|
||||||
description "Chrome OS factory startup stub"
|
description "Chrome OS factory startup stub"
|
||||||
author "chromium-os-dev@googlegroups.com"
|
author "chromium-os-dev@chromium.org"
|
||||||
|
|
||||||
start on stopped udev-addon
|
start on stopped udev-addon
|
||||||
stop on starting halt or starting reboot
|
stop on starting halt or starting reboot
|
||||||
@ -55,6 +55,9 @@ cat >"${ROOT_FS_DIR}/etc/init/factorylog.conf" <<EOF
|
|||||||
# Use of this source code is governed by a BSD-style license that can be
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
# found in the LICENSE file.
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
description "Print Chrome OS factory log to tty3"
|
||||||
|
author "chromium-os-dev@chromium.org"
|
||||||
|
|
||||||
start on started factory
|
start on started factory
|
||||||
stop on starting halt or starting reboot
|
stop on starting halt or starting reboot
|
||||||
|
|
||||||
|
143
parallel_emerge
143
parallel_emerge
@ -76,7 +76,8 @@ if "PORTAGE_USERNAME" not in os.environ:
|
|||||||
from _emerge.actions import adjust_configs
|
from _emerge.actions import adjust_configs
|
||||||
from _emerge.actions import load_emerge_config
|
from _emerge.actions import load_emerge_config
|
||||||
from _emerge.create_depgraph_params import create_depgraph_params
|
from _emerge.create_depgraph_params import create_depgraph_params
|
||||||
from _emerge.depgraph import backtrack_depgraph
|
from _emerge.depgraph import depgraph as emerge_depgraph
|
||||||
|
from _emerge.depgraph import _frozen_depgraph_config
|
||||||
from _emerge.main import emerge_main
|
from _emerge.main import emerge_main
|
||||||
from _emerge.main import parse_opts
|
from _emerge.main import parse_opts
|
||||||
from _emerge.Package import Package
|
from _emerge.Package import Package
|
||||||
@ -479,24 +480,9 @@ class DepGraphGenerator(object):
|
|||||||
cur_iuse, now_use, now_iuse)
|
cur_iuse, now_use, now_iuse)
|
||||||
return not flags
|
return not flags
|
||||||
|
|
||||||
def GenDependencyTree(self, remote_pkgs):
|
def CreateDepgraph(self, emerge, packages):
|
||||||
"""Get dependency tree info from emerge.
|
"""Create an emerge depgraph object."""
|
||||||
|
|
||||||
TODO(): Update cros_extract_deps to also use this code.
|
|
||||||
Returns:
|
|
||||||
Dependency tree
|
|
||||||
"""
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
# Setup emerge options.
|
# Setup emerge options.
|
||||||
#
|
|
||||||
# We treat dependency info a bit differently than emerge itself. Unless
|
|
||||||
# you're using --usepkgonly, we disable --getbinpkg and --usepkg here so
|
|
||||||
# that emerge will look at the dependencies of the source ebuilds rather
|
|
||||||
# than the binary dependencies. This helps ensure that we have the option
|
|
||||||
# of merging a package from source, if we want to switch to it with
|
|
||||||
# --workon and the dependencies have changed.
|
|
||||||
emerge = self.emerge
|
|
||||||
emerge_opts = emerge.opts.copy()
|
emerge_opts = emerge.opts.copy()
|
||||||
|
|
||||||
# Enable --emptytree so that we get the full tree, which we need for
|
# Enable --emptytree so that we get the full tree, which we need for
|
||||||
@ -507,12 +493,86 @@ class DepGraphGenerator(object):
|
|||||||
emerge_opts["--tree"] = True
|
emerge_opts["--tree"] = True
|
||||||
emerge_opts["--emptytree"] = True
|
emerge_opts["--emptytree"] = True
|
||||||
|
|
||||||
# Tell emerge not to worry about use flags yet. We handle those inside
|
# Set up parameters.
|
||||||
# parallel_emerge itself. Further, when we use the --force-remote-binary
|
params = create_depgraph_params(emerge_opts, emerge.action)
|
||||||
# flag, we don't emerge to reject a package just because it has different
|
frozen_config = _frozen_depgraph_config(emerge.settings, emerge.trees,
|
||||||
# use flags.
|
emerge_opts, emerge.spinner)
|
||||||
emerge_opts.pop("--newuse", None)
|
backtrack_max = emerge_opts.get('--backtrack', 5)
|
||||||
emerge_opts.pop("--reinstall", None)
|
runtime_pkg_mask = None
|
||||||
|
allow_backtracking = backtrack_max > 0
|
||||||
|
|
||||||
|
# Try up to backtrack_max times to create a working depgraph. Each time we
|
||||||
|
# run into a conflict, mask the offending package and try again.
|
||||||
|
# TODO(davidjames): When Portage supports --force-remote-binary directly,
|
||||||
|
# switch back to using the backtrack_depgraph function.
|
||||||
|
for i in range(backtrack_max + 1):
|
||||||
|
if i == backtrack_max:
|
||||||
|
# Looks like we hit the backtracking limit. Run the dependency
|
||||||
|
# calculation one more time (from scratch) to show the original error
|
||||||
|
# message.
|
||||||
|
runtime_pkg_mask = None
|
||||||
|
allow_backtracking = False
|
||||||
|
|
||||||
|
# Create a depgraph object.
|
||||||
|
depgraph = emerge_depgraph(emerge.settings, emerge.trees, emerge_opts,
|
||||||
|
params, emerge.spinner, frozen_config=frozen_config,
|
||||||
|
allow_backtracking=allow_backtracking,
|
||||||
|
runtime_pkg_mask=runtime_pkg_mask)
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
for cpv in self.forced_remote_binary_packages:
|
||||||
|
# If --force-remote-binary was specified, we want to use this package
|
||||||
|
# regardless of its use flags. Unfortunately, Portage doesn't support
|
||||||
|
# ignoring use flags for just one package. To convince Portage to
|
||||||
|
# install the package, we trick Portage into thinking the package has
|
||||||
|
# the right use flags.
|
||||||
|
# TODO(davidjames): Update Portage to support --force-remote-binary
|
||||||
|
# directly, so that this hack isn't necessary.
|
||||||
|
pkg = depgraph._pkg(cpv, "binary", emerge.root_config)
|
||||||
|
pkgsettings = frozen_config.pkgsettings[pkg.root]
|
||||||
|
pkgsettings.setcpv(pkg)
|
||||||
|
pkg.use.enabled = pkgsettings["PORTAGE_USE"].split()
|
||||||
|
|
||||||
|
# Select the packages we want.
|
||||||
|
success, favorites = depgraph.select_files(packages)
|
||||||
|
if success:
|
||||||
|
break
|
||||||
|
elif depgraph.need_restart():
|
||||||
|
# Looks like we found some packages that can't be installed due to
|
||||||
|
# conflicts. Try again, masking out the conflicting packages.
|
||||||
|
runtime_pkg_mask = depgraph.get_runtime_pkg_mask()
|
||||||
|
elif allow_backtracking and i > 0:
|
||||||
|
# Looks like we tried all the possible combinations, and we still can't
|
||||||
|
# solve the graph. Stop backtracking, so that we can report an error
|
||||||
|
# message.
|
||||||
|
runtime_pkg_mask = None
|
||||||
|
allow_backtracking = False
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Delete the --tree option, because we don't really want to display a
|
||||||
|
# tree. We just wanted to get emerge to leave uninstall instructions on
|
||||||
|
# the graph. Later, when we display the graph, we'll want standard-looking
|
||||||
|
# output, so removing the --tree option is important.
|
||||||
|
frozen_config.myopts.pop("--tree", None)
|
||||||
|
|
||||||
|
emerge.depgraph = depgraph
|
||||||
|
|
||||||
|
# Is it impossible to honor the user's request? Bail!
|
||||||
|
if not success:
|
||||||
|
depgraph.display_problems()
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def GenDependencyTree(self, remote_pkgs):
|
||||||
|
"""Get dependency tree info from emerge.
|
||||||
|
|
||||||
|
TODO(): Update cros_extract_deps to also use this code.
|
||||||
|
Returns:
|
||||||
|
Dependency tree
|
||||||
|
"""
|
||||||
|
start = time.time()
|
||||||
|
|
||||||
|
emerge = self.emerge
|
||||||
|
|
||||||
# Create a list of packages to merge
|
# Create a list of packages to merge
|
||||||
packages = set(emerge.cmdline_packages[:])
|
packages = set(emerge.cmdline_packages[:])
|
||||||
@ -527,9 +587,17 @@ class DepGraphGenerator(object):
|
|||||||
full_pkgname in self.force_remote_binary):
|
full_pkgname in self.force_remote_binary):
|
||||||
forced_pkgs.setdefault(full_pkgname, []).append(pkg)
|
forced_pkgs.setdefault(full_pkgname, []).append(pkg)
|
||||||
|
|
||||||
|
# Add forced binary packages to the dependency list. This is necessary
|
||||||
|
# to ensure that the install plan contains the right package.
|
||||||
|
#
|
||||||
|
# Putting the forced binary package at the beginning of the list is an
|
||||||
|
# optimization that helps avoid unnecessary backtracking (e.g., if
|
||||||
|
# Portage first selects the wrong version, and then backtracks later, it
|
||||||
|
# takes a bit longer and uses up an unnecessary backtrack iteration.)
|
||||||
|
packages = list(packages)
|
||||||
for pkgs in forced_pkgs.values():
|
for pkgs in forced_pkgs.values():
|
||||||
forced_package = portage.versions.best(pkgs)
|
forced_package = portage.versions.best(pkgs)
|
||||||
packages.add("=%s" % forced_package)
|
packages.insert(0, "=%s" % forced_package)
|
||||||
self.forced_remote_binary_packages.add(forced_package)
|
self.forced_remote_binary_packages.add(forced_package)
|
||||||
|
|
||||||
# Tell emerge to be quiet. We print plenty of info ourselves so we don't
|
# Tell emerge to be quiet. We print plenty of info ourselves so we don't
|
||||||
@ -544,18 +612,8 @@ class DepGraphGenerator(object):
|
|||||||
if "--quiet" not in emerge.opts:
|
if "--quiet" not in emerge.opts:
|
||||||
print "Calculating deps..."
|
print "Calculating deps..."
|
||||||
|
|
||||||
# Ask portage to build a dependency graph. with the options we specified
|
self.CreateDepgraph(emerge, packages)
|
||||||
# above.
|
depgraph = emerge.depgraph
|
||||||
params = create_depgraph_params(emerge_opts, emerge.action)
|
|
||||||
success, depgraph, _ = backtrack_depgraph(
|
|
||||||
emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
|
|
||||||
packages, emerge.spinner)
|
|
||||||
emerge.depgraph = depgraph
|
|
||||||
|
|
||||||
# Is it impossible to honor the user's request? Bail!
|
|
||||||
if not success:
|
|
||||||
depgraph.display_problems()
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Build our own tree from the emerge digraph.
|
# Build our own tree from the emerge digraph.
|
||||||
deps_tree = {}
|
deps_tree = {}
|
||||||
@ -604,11 +662,6 @@ class DepGraphGenerator(object):
|
|||||||
vardb = frozen_config.trees[root]["vartree"].dbapi
|
vardb = frozen_config.trees[root]["vartree"].dbapi
|
||||||
pkgsettings = frozen_config.pkgsettings[root]
|
pkgsettings = frozen_config.pkgsettings[root]
|
||||||
|
|
||||||
# It's time to start worrying about use flags, if necessary.
|
|
||||||
for flag in ("--newuse", "--reinstall"):
|
|
||||||
if flag in emerge.opts:
|
|
||||||
emerge_opts[flag] = emerge.opts[flag]
|
|
||||||
|
|
||||||
deps_info = {}
|
deps_info = {}
|
||||||
for pkg in depgraph.altlist():
|
for pkg in depgraph.altlist():
|
||||||
if isinstance(pkg, Package):
|
if isinstance(pkg, Package):
|
||||||
@ -636,12 +689,6 @@ class DepGraphGenerator(object):
|
|||||||
deps_info[str(pkg.cpv)] = {"idx": len(deps_info),
|
deps_info[str(pkg.cpv)] = {"idx": len(deps_info),
|
||||||
"optional": optional}
|
"optional": optional}
|
||||||
|
|
||||||
# Delete the --tree option, because we don't really want to display a
|
|
||||||
# tree. We just wanted to get emerge to leave uninstall instructions on
|
|
||||||
# the graph. Later, when we display the graph, we'll want standard-looking
|
|
||||||
# output, so removing the --tree option is important.
|
|
||||||
frozen_config.myopts.pop("--tree", None)
|
|
||||||
|
|
||||||
seconds = time.time() - start
|
seconds = time.time() - start
|
||||||
if "--quiet" not in emerge.opts:
|
if "--quiet" not in emerge.opts:
|
||||||
print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
|
print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
DEFINE_string board "" "Override board reported by target"
|
DEFINE_string board "" "Override board reported by target"
|
||||||
DEFINE_string partition "" "Override kernel partition reported by target"
|
DEFINE_string partition "" "Override kernel partition reported by target"
|
||||||
|
DEFINE_boolean modules false "Update modules on target"
|
||||||
|
DEFINE_boolean firmware false "Update firmware on target"
|
||||||
|
|
||||||
function cleanup {
|
function cleanup {
|
||||||
cleanup_remote_access
|
cleanup_remote_access
|
||||||
@ -74,6 +76,28 @@ function main() {
|
|||||||
|
|
||||||
remote_sh dd if=/tmp/new_kern.bin of="${FLAGS_partition}"
|
remote_sh dd if=/tmp/new_kern.bin of="${FLAGS_partition}"
|
||||||
|
|
||||||
|
if [[ ${FLAGS_modules} -eq ${FLAGS_TRUE} ]]; then
|
||||||
|
echo "copying modules"
|
||||||
|
cmd="tar -C /build/${FLAGS_board}/lib/modules -cjf new_modules.tar ."
|
||||||
|
./enter_chroot.sh -- ${cmd}
|
||||||
|
|
||||||
|
remote_cp_to new_modules.tar /tmp/
|
||||||
|
|
||||||
|
remote_sh mount -o remount,rw /
|
||||||
|
remote_sh tar -C /lib/modules -xjf /tmp/new_modules.tar
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ${FLAGS_firmware} -eq ${FLAGS_TRUE} ]]; then
|
||||||
|
echo "copying firmware"
|
||||||
|
cmd="tar -C /build/${FLAGS_board}/lib/firmware -cjf new_firmware.tar ."
|
||||||
|
./enter_chroot.sh -- ${cmd}
|
||||||
|
|
||||||
|
remote_cp_to new_firmware.tar /tmp/
|
||||||
|
|
||||||
|
remote_sh mount -o remount,rw /
|
||||||
|
remote_sh tar -C /lib/firmware -xjf /tmp/new_firmware.tar
|
||||||
|
fi
|
||||||
|
|
||||||
remote_reboot
|
remote_reboot
|
||||||
|
|
||||||
remote_sh uname -r -v
|
remote_sh uname -r -v
|
||||||
|
Loading…
x
Reference in New Issue
Block a user