mirror of
https://github.com/flatcar/scripts.git
synced 2025-08-15 17:06:58 +02:00
Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils
This commit is contained in:
commit
6c74d5f8f3
@ -194,6 +194,8 @@ def _UprevFromRevisionList(buildroot, tracking_branch, revision_list, board):
|
||||
package_str = package_str.strip()
|
||||
|
||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||
# TODO(davidjames): --foo="bar baz" only works here because we're using
|
||||
# enter_chroot.
|
||||
RunCommand(['./cros_mark_as_stable',
|
||||
'--board=%s' % board,
|
||||
'--tracking_branch="%s"' % tracking_branch,
|
||||
@ -205,6 +207,8 @@ def _UprevFromRevisionList(buildroot, tracking_branch, revision_list, board):
|
||||
def _UprevAllPackages(buildroot, tracking_branch, board):
|
||||
"""Uprevs all packages that have been updated since last uprev."""
|
||||
cwd = os.path.join(buildroot, 'src', 'scripts')
|
||||
# TODO(davidjames): --foo="bar baz" only works here because we're using
|
||||
# enter_chroot.
|
||||
RunCommand(['./cros_mark_as_stable', '--all',
|
||||
'--board=%s' % board,
|
||||
'--tracking_branch="%s"' % tracking_branch, 'commit'],
|
||||
@ -230,7 +234,7 @@ def _GitCleanup(buildroot, board, tracking_branch):
|
||||
if os.path.exists(cwd):
|
||||
RunCommand(['./cros_mark_as_stable', '--srcroot=..',
|
||||
'--board=%s' % board,
|
||||
'--tracking_branch="%s"' % tracking_branch, 'clean'],
|
||||
'--tracking_branch=%s' % tracking_branch, 'clean'],
|
||||
cwd=cwd, error_ok=True)
|
||||
|
||||
|
||||
@ -391,9 +395,9 @@ def _UprevPush(buildroot, tracking_branch, board, overlays):
|
||||
overlays = [public_overlay, private_overlay]
|
||||
RunCommand(['./cros_mark_as_stable', '--srcroot=..',
|
||||
'--board=%s' % board,
|
||||
'--overlays="%s"' % " ".join(overlays),
|
||||
'--tracking_branch="%s"' % tracking_branch,
|
||||
'--push_options="--bypass-hooks -f"', 'push'],
|
||||
'--overlays=%s' % " ".join(overlays),
|
||||
'--tracking_branch=%s' % tracking_branch,
|
||||
'--push_options=--bypass-hooks -f', 'push'],
|
||||
cwd=cwd)
|
||||
|
||||
|
||||
|
@ -63,6 +63,21 @@ class AUTest(object):
|
||||
|
||||
return int(percent_passed)
|
||||
|
||||
# TODO(sosa) - Remove try and convert function to DeltaUpdateImage().
|
||||
def TryDeltaAndFallbackToFull(self, src_image, image, stateful_change='old'):
|
||||
"""Tries the delta update first if set and falls back to full update."""
|
||||
if self.use_delta_updates:
|
||||
try:
|
||||
self.source_image = src_image
|
||||
self.UpdateImage(image)
|
||||
except:
|
||||
Warning('Delta update failed, disabling delta updates and retrying.')
|
||||
self.use_delta_updates = False
|
||||
self.source_image = ''
|
||||
self.UpdateImage(image)
|
||||
else:
|
||||
self.UpdateImage(image)
|
||||
|
||||
def PrepareBase(self):
|
||||
"""Prepares target with base_image_path."""
|
||||
pass
|
||||
@ -130,28 +145,14 @@ class AUTest(object):
|
||||
# with the dev channel.
|
||||
percent_passed = self.VerifyImage(10)
|
||||
|
||||
if self.use_delta_updates: self.source_image = base_image_path
|
||||
|
||||
# Update to - all tests should pass on new image.
|
||||
Info('Updating from base image on vm to target image.')
|
||||
try:
|
||||
self.UpdateImage(target_image_path)
|
||||
except:
|
||||
if self.use_delta_updates:
|
||||
Warning('Delta update failed, disabling delta updates and retrying.')
|
||||
self.use_delta_updates = False
|
||||
self.source_image = ''
|
||||
self.UpdateImage(target_image_path)
|
||||
else:
|
||||
raise
|
||||
|
||||
self.TryDeltaAndFallbackToFull(base_image_path, target_image_path)
|
||||
self.VerifyImage(100)
|
||||
|
||||
if self.use_delta_updates: self.source_image = target_image_path
|
||||
|
||||
# Update from - same percentage should pass that originally passed.
|
||||
Info('Updating from updated image on vm back to base image.')
|
||||
self.UpdateImage(base_image_path)
|
||||
self.TryDeltaAndFallbackToFull(target_image_path, base_image_path)
|
||||
self.VerifyImage(percent_passed)
|
||||
|
||||
def testFullUpdateWipeStateful(self):
|
||||
@ -167,28 +168,14 @@ class AUTest(object):
|
||||
# with the dev channel.
|
||||
percent_passed = self.VerifyImage(10)
|
||||
|
||||
if self.use_delta_updates: self.source_image = base_image_path
|
||||
|
||||
# Update to - all tests should pass on new image.
|
||||
Info('Updating from base image on vm to target image and wiping stateful.')
|
||||
try:
|
||||
self.UpdateImage(target_image_path, 'clean')
|
||||
except:
|
||||
if self.use_delta_updates:
|
||||
Warning('Delta update failed, disabling delta updates and retrying.')
|
||||
self.use_delta_updates = False
|
||||
self.source_image = ''
|
||||
self.UpdateImage(target_image_path)
|
||||
else:
|
||||
raise
|
||||
|
||||
self.TryDeltaAndFallbackToFull(base_image_path, target_image_path, 'clean')
|
||||
self.VerifyImage(100)
|
||||
|
||||
if self.use_delta_updates: self.source_image = target_image_path
|
||||
|
||||
# Update from - same percentage should pass that originally passed.
|
||||
Info('Updating from updated image back to base image and wiping stateful.')
|
||||
self.UpdateImage(base_image_path, 'clean')
|
||||
self.TryDeltaAndFallbackToFull(target_image_path, base_image_path, 'clean')
|
||||
self.VerifyImage(percent_passed)
|
||||
|
||||
|
||||
|
1
bin/cros_run_parallel_vm_tests
Symbolic link
1
bin/cros_run_parallel_vm_tests
Symbolic link
@ -0,0 +1 @@
|
||||
cros_run_parallel_vm_tests.py
|
163
bin/cros_run_parallel_vm_tests.py
Executable file
163
bin/cros_run_parallel_vm_tests.py
Executable file
@ -0,0 +1,163 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Runs tests on VMs in parallel."""
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
|
||||
from cros_build_lib import Die
|
||||
from cros_build_lib import Info
|
||||
|
||||
|
||||
_DEFAULT_BASE_SSH_PORT = 9222
|
||||
|
||||
class ParallelTestRunner(object):
|
||||
"""Runs tests on VMs in parallel.
|
||||
|
||||
This class is a simple wrapper around cros_run_vm_test that provides an easy
|
||||
way to spawn several test instances in parallel and aggregate the results when
|
||||
the tests complete.
|
||||
"""
|
||||
|
||||
def __init__(self, tests, base_ssh_port=_DEFAULT_BASE_SSH_PORT, board=None,
|
||||
image_path=None, order_output=False, results_dir_root=None):
|
||||
"""Constructs and initializes the test runner class.
|
||||
|
||||
Args:
|
||||
tests: A list of test names (see run_remote_tests.sh).
|
||||
base_ssh_port: The base SSH port. Spawned VMs listen to localhost SSH
|
||||
ports incrementally allocated starting from the base one.
|
||||
board: The target board. If none, cros_run_vm_tests will use the default
|
||||
board.
|
||||
image_path: Full path to the VM image. If none, cros_run_vm_tests will use
|
||||
the latest image.
|
||||
order_output: If True, output of individual VMs will be piped to
|
||||
temporary files and emitted at the end.
|
||||
results_dir_root: The results directory root. If provided, the results
|
||||
directory root for each test will be created under it with the SSH port
|
||||
appended to the test name.
|
||||
"""
|
||||
self._tests = tests
|
||||
self._base_ssh_port = base_ssh_port
|
||||
self._board = board
|
||||
self._image_path = image_path
|
||||
self._order_output = order_output
|
||||
self._results_dir_root = results_dir_root
|
||||
|
||||
def _SpawnTests(self):
|
||||
"""Spawns VMs and starts the test runs on them.
|
||||
|
||||
Runs all tests in |self._tests|. Each test is executed on a separate VM.
|
||||
|
||||
Returns:
|
||||
A list of test process info objects containing the following dictionary
|
||||
entries:
|
||||
'test': the test name;
|
||||
'proc': the Popen process instance for this test run.
|
||||
"""
|
||||
ssh_port = self._base_ssh_port
|
||||
spawned_tests = []
|
||||
# Test runs shouldn't need anything from stdin. However, it seems that
|
||||
# running with stdin leaves the terminal in a bad state so redirect from
|
||||
# /dev/null.
|
||||
dev_null = open('/dev/null')
|
||||
for test in self._tests:
|
||||
args = [ os.path.join(os.path.dirname(__file__), 'cros_run_vm_test'),
|
||||
'--snapshot', # The image is shared so don't modify it.
|
||||
'--no_graphics',
|
||||
'--ssh_port=%d' % ssh_port,
|
||||
'--test_case=%s' % test ]
|
||||
if self._board: args.append('--board=%s' % self._board)
|
||||
if self._image_path: args.append('--image_path=%s' % self._image_path)
|
||||
if self._results_dir_root:
|
||||
args.append('--results_dir_root=%s/%s.%d' %
|
||||
(self._results_dir_root, test, ssh_port))
|
||||
Info('Running %r...' % args)
|
||||
output = None
|
||||
if self._order_output:
|
||||
output = tempfile.NamedTemporaryFile(prefix='parallel_vm_test_')
|
||||
Info('Piping output to %s.' % output.name)
|
||||
proc = subprocess.Popen(args, stdin=dev_null, stdout=output,
|
||||
stderr=output)
|
||||
test_info = { 'test': test,
|
||||
'proc': proc,
|
||||
'output': output }
|
||||
spawned_tests.append(test_info)
|
||||
ssh_port = ssh_port + 1
|
||||
return spawned_tests
|
||||
|
||||
def _WaitForCompletion(self, spawned_tests):
|
||||
"""Waits for tests to complete and returns a list of failed tests.
|
||||
|
||||
If the test output was piped to a file, dumps the file contents to stdout.
|
||||
|
||||
Args:
|
||||
spawned_tests: A list of test info objects (see _SpawnTests).
|
||||
|
||||
Returns:
|
||||
A list of failed test names.
|
||||
"""
|
||||
failed_tests = []
|
||||
for test_info in spawned_tests:
|
||||
proc = test_info['proc']
|
||||
proc.wait()
|
||||
if proc.returncode: failed_tests.append(test_info['test'])
|
||||
output = test_info['output']
|
||||
if output:
|
||||
test = test_info['test']
|
||||
Info('------ START %s:%s ------' % (test, output.name))
|
||||
output.seek(0)
|
||||
for line in output:
|
||||
print line,
|
||||
Info('------ END %s:%s ------' % (test, output.name))
|
||||
return failed_tests
|
||||
|
||||
def Run(self):
|
||||
"""Runs the tests in |self._tests| on separate VMs in parallel."""
|
||||
spawned_tests = self._SpawnTests()
|
||||
failed_tests = self._WaitForCompletion(spawned_tests)
|
||||
if failed_tests: Die('Tests failed: %r' % failed_tests)
|
||||
|
||||
|
||||
def main():
|
||||
usage = 'Usage: %prog [options] tests...'
|
||||
parser = optparse.OptionParser(usage=usage)
|
||||
parser.add_option('--base_ssh_port', type='int',
|
||||
default=_DEFAULT_BASE_SSH_PORT,
|
||||
help='Base SSH port. Spawned VMs listen to localhost SSH '
|
||||
'ports incrementally allocated starting from the base one. '
|
||||
'[default: %default]')
|
||||
parser.add_option('--board',
|
||||
help='The target board. If none specified, '
|
||||
'cros_run_vm_test will use the default board.')
|
||||
parser.add_option('--image_path',
|
||||
help='Full path to the VM image. If none specified, '
|
||||
'cros_run_vm_test will use the latest image.')
|
||||
parser.add_option('--order_output', action='store_true', default=False,
|
||||
help='Rather than emitting interleaved progress output '
|
||||
'from the individual VMs, accumulate the outputs in '
|
||||
'temporary files and dump them at the end.')
|
||||
parser.add_option('--results_dir_root',
|
||||
help='Root results directory. If none specified, each test '
|
||||
'will store its results in a separate /tmp directory.')
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if not args:
|
||||
parser.print_help()
|
||||
Die('no tests provided')
|
||||
|
||||
runner = ParallelTestRunner(args, options.base_ssh_port, options.board,
|
||||
options.image_path, options.order_output,
|
||||
options.results_dir_root)
|
||||
runner.Run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -494,7 +494,9 @@ def main(argv):
|
||||
_BuildEBuildDictionary(overlays, gflags.FLAGS.all, package_list)
|
||||
|
||||
for overlay, ebuilds in overlays.items():
|
||||
if not os.path.exists(overlay): continue
|
||||
if not os.path.exists(overlay):
|
||||
Warning("Skipping %s" % overlay)
|
||||
continue
|
||||
os.chdir(overlay)
|
||||
|
||||
if command == 'clean':
|
||||
|
@ -65,7 +65,7 @@ if [ -b "$FLAGS_image" ]; then
|
||||
else
|
||||
max_kern_size=32768
|
||||
dd if=/dev/zero of="${FLAGS_image}" bs=512 count=0 \
|
||||
seek=$((1 + max_kern_size + header_offset + stateful_sectors))
|
||||
seek=$((1 + max_kern_size + (2 * header_offset) + stateful_sectors))
|
||||
sudo=""
|
||||
fi
|
||||
|
||||
@ -99,7 +99,7 @@ kernel_sectors=$((kernel_bytes / 512))
|
||||
kernel_sectors=$(roundup $kernel_sectors)
|
||||
|
||||
$sudo $GPT create $FLAGS_image
|
||||
trap "rm $FLAGS_image" ERR
|
||||
trap "rm $FLAGS_image; echo 'An error occurred! Rerun with -v for details.'" ERR
|
||||
|
||||
offset=$header_offset
|
||||
$sudo $GPT add -b $offset -s $stateful_sectors \
|
||||
@ -119,4 +119,4 @@ $sudo $GPT boot -p -b "$PMBRCODE" -i 1 $FLAGS_image 1>&2
|
||||
|
||||
$sudo $GPT show $FLAGS_image
|
||||
|
||||
echo "Done."
|
||||
echo "Emitted $FLAGS_image successfully!"
|
||||
|
@ -136,7 +136,7 @@ function main() {
|
||||
if [[ ${INSIDE_CHROOT} -eq 0 ]]; then
|
||||
if [[ -n "${FLAGS_results_dir_root}" ]]; then
|
||||
TMP=${FLAGS_chroot}${FLAGS_results_dir_root}
|
||||
mkdir -m 777 ${TMP}
|
||||
mkdir -p -m 777 ${TMP}
|
||||
else
|
||||
TMP=$(mktemp -d ${FLAGS_chroot}/tmp/run_remote_tests.XXXX)
|
||||
fi
|
||||
@ -144,7 +144,7 @@ function main() {
|
||||
else
|
||||
if [[ -n "${FLAGS_results_dir_root}" ]]; then
|
||||
TMP=${FLAGS_results_dir_root}
|
||||
mkdir -m 777 ${TMP}
|
||||
mkdir -p -m 777 ${TMP}
|
||||
else
|
||||
TMP=$(mktemp -d /tmp/run_remote_tests.XXXX)
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user