Remove testing utilities and fix symlinks.

For run_remote_tests this required "fixing" restart_in_chroot if required.  Right now that script ->only<- works if its run on a scripts being called from src/scripts.  This change makes it more generic by doing this for any script called from within our source tree.

I borrow reinterpret_path_for_chroot from image_to_live.  Other CL to remove it from there.

Change-Id: If717beccd777ac178366d58b91521b9a62d55d85

BUG=chromium-os:11172
TEST=Ran them

Review URL: http://codereview.chromium.org/6730012
This commit is contained in:
Chris Sosa 2011-03-24 16:06:59 -07:00
parent afa1b4ca27
commit fd2cdec118
10 changed files with 38 additions and 1223 deletions

View File

@ -1 +1 @@
cros_run_parallel_vm_tests.py
../../platform/crostestutils/utils_py/cros_run_parallel_vm_tests.py

View File

@ -1,175 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests on VMs in parallel."""
import optparse
import os
import subprocess
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from cros_build_lib import Die
from cros_build_lib import Info
_DEFAULT_BASE_SSH_PORT = 9222
class ParallelTestRunner(object):
"""Runs tests on VMs in parallel.
This class is a simple wrapper around cros_run_vm_test that provides an easy
way to spawn several test instances in parallel and aggregate the results when
the tests complete. Only uses emerged autotest packaged, as trying to pull
from the caller's source tree creates races that cause tests to fail.
"""
def __init__(self, tests, base_ssh_port=_DEFAULT_BASE_SSH_PORT, board=None,
image_path=None, order_output=False, quiet=False,
results_dir_root=None):
"""Constructs and initializes the test runner class.
Args:
tests: A list of test names (see run_remote_tests.sh).
base_ssh_port: The base SSH port. Spawned VMs listen to localhost SSH
ports incrementally allocated starting from the base one.
board: The target board. If none, cros_run_vm_tests will use the default
board.
image_path: Full path to the VM image. If none, cros_run_vm_tests will use
the latest image.
order_output: If True, output of individual VMs will be piped to
temporary files and emitted at the end.
quiet: Emits no output from the VMs. Forces --order_output to be false,
and requires specifying --results_dir_root
results_dir_root: The results directory root. If provided, the results
directory root for each test will be created under it with the SSH port
appended to the test name.
"""
self._tests = tests
self._base_ssh_port = base_ssh_port
self._board = board
self._image_path = image_path
self._order_output = order_output
self._quiet = quiet
self._results_dir_root = results_dir_root
def _SpawnTests(self):
"""Spawns VMs and starts the test runs on them.
Runs all tests in |self._tests|. Each test is executed on a separate VM.
Returns:
A list of test process info objects containing the following dictionary
entries:
'test': the test name;
'proc': the Popen process instance for this test run.
"""
ssh_port = self._base_ssh_port
spawned_tests = []
for test in self._tests:
args = [ os.path.join(os.path.dirname(__file__), 'cros_run_vm_test'),
'--snapshot', # The image is shared so don't modify it.
'--no_graphics',
'--use_emerged',
'--ssh_port=%d' % ssh_port ]
if self._board: args.append('--board=%s' % self._board)
if self._image_path: args.append('--image_path=%s' % self._image_path)
results_dir = None
if self._results_dir_root:
results_dir = '%s/%s.%d' % (self._results_dir_root, test, ssh_port)
args.append('--results_dir_root=%s' % results_dir)
args.append(test)
Info('Running %r...' % args)
output = None
if self._quiet:
output = open('/dev/null', mode='w')
Info('Log files are in %s' % results_dir)
elif self._order_output:
output = tempfile.NamedTemporaryFile(prefix='parallel_vm_test_')
Info('Piping output to %s.' % output.name)
proc = subprocess.Popen(args, stdout=output, stderr=output)
test_info = { 'test': test,
'proc': proc,
'output': output }
spawned_tests.append(test_info)
ssh_port = ssh_port + 1
return spawned_tests
def _WaitForCompletion(self, spawned_tests):
"""Waits for tests to complete and returns a list of failed tests.
If the test output was piped to a file, dumps the file contents to stdout.
Args:
spawned_tests: A list of test info objects (see _SpawnTests).
Returns:
A list of failed test names.
"""
failed_tests = []
for test_info in spawned_tests:
proc = test_info['proc']
proc.wait()
if proc.returncode: failed_tests.append(test_info['test'])
output = test_info['output']
if output and not self._quiet:
test = test_info['test']
Info('------ START %s:%s ------' % (test, output.name))
output.seek(0)
for line in output:
print line,
Info('------ END %s:%s ------' % (test, output.name))
return failed_tests
def Run(self):
"""Runs the tests in |self._tests| on separate VMs in parallel."""
spawned_tests = self._SpawnTests()
failed_tests = self._WaitForCompletion(spawned_tests)
if failed_tests: Die('Tests failed: %r' % failed_tests)
def main():
usage = 'Usage: %prog [options] tests...'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--base_ssh_port', type='int',
default=_DEFAULT_BASE_SSH_PORT,
help='Base SSH port. Spawned VMs listen to localhost SSH '
'ports incrementally allocated starting from the base one. '
'[default: %default]')
parser.add_option('--board',
help='The target board. If none specified, '
'cros_run_vm_test will use the default board.')
parser.add_option('--image_path',
help='Full path to the VM image. If none specified, '
'cros_run_vm_test will use the latest image.')
parser.add_option('--order_output', action='store_true', default=False,
help='Rather than emitting interleaved progress output '
'from the individual VMs, accumulate the outputs in '
'temporary files and dump them at the end.')
parser.add_option('--quiet', action='store_true', default=False,
help='Emits no output from the VMs. Forces --order_output'
'to be false, and requires specifying --results_dir_root')
parser.add_option('--results_dir_root',
help='Root results directory. If none specified, each test '
'will store its results in a separate /tmp directory.')
(options, args) = parser.parse_args()
if not args:
parser.print_help()
Die('no tests provided')
if options.quiet:
options.order_output = False
if not options.results_dir_root:
Die('--quiet requires --results_dir_root')
runner = ParallelTestRunner(args, options.base_ssh_port, options.board,
options.image_path, options.order_output,
options.quiet, options.results_dir_root)
runner.Run()
if __name__ == '__main__':
main()

View File

@ -1,110 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Runs a given test case under a VM.
# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities. Inside the chroot this file is installed in
# /usr/lib/crosutils. Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
local common_paths=(/usr/lib/crosutils "$(dirname "$(readlink -f "$0")")/..")
local path
SCRIPT_ROOT=
for path in "${common_paths[@]}"; do
if [ -r "${path}/common.sh" ]; then
SCRIPT_ROOT=${path}
break
fi
done
}
find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---
. "${SCRIPT_ROOT}/lib/cros_vm_lib.sh" || die "Unable to load cros_vm_lib.sh"
. "${SCRIPT_ROOT}/lib/cros_vm_constants.sh" || \
die "Unable to load cros_vm_constants.sh"
get_default_board
DEFINE_string board "$DEFAULT_BOARD" \
"The board for which you built autotest." b
DEFINE_string image_path "" "Full path of the VM image"
DEFINE_string results_dir_root "" "alternate root results directory"
DEFINE_string test_case "" "Name of the test case to run"
DEFINE_boolean use_emerged ${FLAGS_FALSE} \
"Force use of emerged autotest packages"
DEFINE_string verify_chrome_version "" \
"Verify that this chrome version matches that on vm."
set -e
# Returns normally if the given $1 is a valid chrome version.
chrome_version_is_valid() {
local chrome_version="$1"
echo ${chrome_version} | egrep '^[0-9]+.[0-9]+.[0-9]+.[0-9]+$' &> /dev/null
}
# Parse command line.
FLAGS "$@" || exit 1
# Use latest if not specified.
if [ -z "${FLAGS_image_path}" ]; then
LATEST_IMAGE="$(${SCRIPTS_DIR}/get_latest_image.sh \
--board=${FLAGS_board})/${DEFAULT_QEMU_IMAGE}"
info "Using latest vm image ${LATEST_IMAGE}"
FLAGS_image_path=${LATEST_IMAGE}
fi
[ -e "${FLAGS_image_path}" ] || die "Image ${FLAGS_image_path} does not exist."
if [ -n "${FLAGS_test_case}" ]; then
warn "Use of --test_case=<test> is being deprecated. Just pass test names \
as separate command line arguments."
fi
if [ -z "${FLAGS_test_case}" ] && [ -z "${FLAGS_ARGV}" ]; then
die "You must specify a test case."
fi
USE_EMERGED=
if [[ ${FLAGS_use_emerged} -eq ${FLAGS_TRUE} ]]; then
USE_EMERGED="--use_emerged"
fi
tests=( )
[ -n "${FLAGS_test_case}" ] && tests=( "${FLAGS_test_case}" )
for test in ${FLAGS_ARGV}; do
tests=( "${tests[@]}" "$(remove_quotes "${test}")" )
done
trap stop_kvm EXIT
start_kvm "${FLAGS_image_path}"
retry_until_ssh
if [ -n "${FLAGS_verify_chrome_version}" ]; then
info "Verifying version of Chrome matches what we expect."
if chrome_version_is_valid "${FLAGS_verify_chrome_version}"; then
chrome_version_on_vm=$("${SCRIPTS_DIR}/bin/cros_get_chrome_version" \
--remote=127.0.0.1 \
--ssh_port=${FLAGS_ssh_port})
[[ ${chrome_version_on_vm} == ${FLAGS_verify_chrome_version} ]] || \
warn "CHROME_VERSION is no longer set.This check will be removed"
else
warn "${FLAGS_verify_chrome_version} is not a valid Chrome version"
fi
fi
"${SCRIPTS_DIR}/run_remote_tests.sh" \
--board=${FLAGS_board} \
--ssh_port=${FLAGS_ssh_port} \
--remote=127.0.0.1 \
--results_dir_root="${FLAGS_results_dir_root}" \
${USE_EMERGED} \
"${tests[@]}"

1
bin/cros_run_vm_test Symbolic link
View File

@ -0,0 +1 @@
../../platform/crostestutils/cros_run_vm_test

View File

@ -1,79 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Updates an existing vm image with another image.
# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities. Inside the chroot this file is installed in
# /usr/lib/crosutils. Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
local common_paths=(/usr/lib/crosutils "$(dirname "$(readlink -f "$0")")/..")
local path
SCRIPT_ROOT=
for path in "${common_paths[@]}"; do
if [ -r "${path}/common.sh" ]; then
SCRIPT_ROOT=${path}
break
fi
done
}
find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---
. "${SCRIPT_ROOT}/lib/cros_vm_lib.sh" || die "Unable to load cros_vm_lib.sh"
DEFINE_string payload "" "Full name of the payload to update with."
DEFINE_string proxy_port "" \
"Have the client request from this proxy instead of devserver."
DEFINE_string src_image "" \
"Create a delta update by passing in the image on the remote machine."
DEFINE_string stateful_update_flag "" "Flags to pass to stateful update." s
DEFINE_string image "" "Path of the image to update to." u
DEFINE_string update_log "update_engine.log" \
"Path to log for the update_engine."
DEFINE_string update_url "" "Full url of an update image."
DEFINE_string vm_image_path "" "Path of the VM image to update from." v
set -e
# Parse command line.
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
[ -n "${FLAGS_vm_image_path}" ] || \
die "You must specify a path to a vm image."
trap stop_kvm EXIT
start_kvm "${FLAGS_vm_image_path}"
retry_until_ssh
if [ -n "${FLAGS_image}" ]; then
IMAGE_ARGS="--image=$(readlink -f ${FLAGS_image})"
fi
if [ -n "${FLAGS_payload}" ]; then
IMAGE_ARGS="--payload=${FLAGS_payload}"
fi
if [ -n "${FLAGS_proxy_port}" ]; then
IMAGE_ARGS="${IMAGE_ARGS} --proxy_port=${FLAGS_proxy_port}"
fi
"${SCRIPTS_DIR}/image_to_live.sh" \
--for_vm \
--remote=127.0.0.1 \
--ssh_port=${FLAGS_ssh_port} \
--stateful_update_flag=${FLAGS_stateful_update_flag} \
--src_image="${FLAGS_src_image}" \
--update_log="${FLAGS_update_log}" \
--update_url="${FLAGS_update_url}" \
--verify \
${IMAGE_ARGS}

1
bin/cros_run_vm_update Symbolic link
View File

@ -0,0 +1 @@
../../platform/crostestutils/cros_run_vm_update

View File

@ -305,11 +305,10 @@ function make_pkg_common {
function restart_in_chroot_if_needed {
# NB: Pass in ARGV: restart_in_chroot_if_needed "$@"
if [ $INSIDE_CHROOT -ne 1 ]; then
local abspath=$(readlink -f "$0")
# strip everything up to (and including) /scripts/ from abspath
local path_from_scripts="${abspath##*/scripts/}"
# Get inside_chroot path for script.
local chroot_path="$(reinterpret_path_for_chroot "$0")"
exec $SCRIPTS_DIR/enter_chroot.sh -- \
"$CHROOT_TRUNK_DIR/src/scripts/$path_from_scripts" "$@"
"$chroot_path" "$@"
fi
}
@ -696,3 +695,31 @@ check_for_tool() {
exit 1
fi
}
# Reinterprets path from outside the chroot for use inside.
# Returns "" if "" given.
# $1 - The path to reinterpret.
function reinterpret_path_for_chroot() {
if [ $INSIDE_CHROOT -ne 1 ]; then
if [ -z "${1}" ]; then
echo ""
else
local path_abs_path=$(readlink -f "${1}")
local gclient_root_abs_path=$(readlink -f "${GCLIENT_ROOT}")
# Strip the repository root from the path.
local relative_path=$(echo ${path_abs_path} \
| sed s:${gclient_root_abs_path}/::)
if [ "${relative_path}" = "${path_abs_path}" ]; then
die "Error reinterpreting path. Path ${1} is not within source tree."
fi
# Prepend the chroot repository path.
echo "/home/${USER}/trunk/${relative_path}"
fi
else
# Path is already inside the chroot :).
echo "${1}"
fi
}

View File

@ -1,132 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script builds and runs Chromium OS unit tests. Note that this script
# utilizes the src_test stanza in chromeos-base packages. These stanzas
# should both build and run the unit tests.
# This script requires that you run build_packages first.
# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities. Inside the chroot this file is installed in
# /usr/lib/crosutils. Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
local common_paths=(/usr/lib/crosutils $(dirname "$(readlink -f "$0")"))
local path
SCRIPT_ROOT=
for path in "${common_paths[@]}"; do
if [ -r "${path}/common.sh" ]; then
SCRIPT_ROOT=${path}
break
fi
done
}
find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---
get_default_board
# Flags
DEFINE_string board "${DEFAULT_BOARD}" \
"Target board of which tests were built"
DEFINE_string build_root "${DEFAULT_BUILD_ROOT}" \
"Root of build output"
DEFINE_string package_file "" \
"File with space-separated list of packages to run unit tests" f
DEFINE_string packages "" \
"Optional space-separated list of packages to run unit tests" p
DEFINE_boolean withdebug "${FLAGS_TRUE}" \
"Build debug versions of Chromium-OS-specific packages."
# Total count of packages with unit tests.
TEST_COUNT=0
# Number of unit test failures.
TEST_FAILURES=0
# List of packages with no unit tests.
NO_UNITTESTS=""
# List of packages that have unit test failures.
FAILED_PACKAGES=""
function check_src_test() {
egrep '^src_test()' "${1}" > /dev/null
}
function record_test_failure() {
TEST_FAILURES=$(( TEST_FAILURES + 1 ))
FAILED_PACKAGES="${FAILED_PACKAGES} ${1}"
}
function run_unit_test() {
FEATURES="-buildpkg -digest noauto" \
ebuild-${FLAGS_board} "${1}" clean unpack prepare configure test
}
# Parse command line and die if unexpected parameters given.
FLAGS_HELP="usage: ${0} [flags]"
FLAGS "${@}" || exit 1
eval set -- "${FLAGS_ARGV}"
check_flags_only_and_allow_null_arg "${@}" && set --
set -e
[ -z "${FLAGS_board}" ] && die "--board required"
# Create package list from package file and list of packages.
if [ -n "${FLAGS_package_file}" ]; then
if [ -f "${FLAGS_package_file}" ]; then
PACKAGE_LIST="$(cat ${FLAGS_package_file})"
else
warn "Missing package file."
fi
fi
[ -n "${FLAGS_packages}" ] && PACKAGE_LIST="${PACKAGE_LIST} ${FLAGS_packages}"
# If we didn't specify packages, find all packages.
if [ -z "${FLAGS_package_file}" -a -z "${FLAGS_packages}" ]; then
PACKAGE_LIST=$( ./get_package_list chromeos --board="${FLAGS_board}" |
egrep '^chromeos-base' )
fi
BLACK_LIST_FILE="${SCRIPTS_DIR}/unit_test_black_list.txt"
if [ "${FLAGS_withdebug}" -eq "${FLAGS_FALSE}" ]; then
export USE="${USE} -cros-debug"
fi
for package in ${PACKAGE_LIST}; do
if grep -xq "${package}" "${BLACK_LIST_FILE}"; then
warn "Skipping package ${package} since it is blacklisted."
continue
fi
EBUILD_PATH=$( equery-${FLAGS_board} which ${package} 2> /dev/null ) || \
warn "${package} not found"
if [ -n "${EBUILD_PATH}" ]; then
if check_src_test "${EBUILD_PATH}"; then
run_unit_test "${EBUILD_PATH}" || record_test_failure "${package}"
else
NO_UNITTESTS="${NO_UNITTESTS} ${package}"
fi
TEST_COUNT=$(( TEST_COUNT + 1 ))
fi
done
if [ -n "${NO_UNITTESTS}" ]; then
warn "The following packages have no unit tests:"
warn "${NO_UNITTESTS}"
fi
if [ ${TEST_FAILURES} -ne 0 ]; then
error "Run completed with ${TEST_FAILURES}/${TEST_COUNT} test failures."
error "Following packages had failing tests:"
die "${FAILED_PACKAGES}"
else
info "All unit tests passed."
fi

1
cros_run_unit_tests Symbolic link
View File

@ -0,0 +1 @@
../platform/crostestutils/cros_run_unit_tests

View File

@ -1 +1 @@
generate_test_report.py
../platform/crostestutils/utils_py/generate_test_report.py

View File

@ -1,327 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses and displays the contents of one or more autoserv result directories.
This script parses the contents of one or more autoserv results folders and
generates test reports.
"""
import glob
import optparse
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
from cros_build_lib import Color, Die
_STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
# List of crashes which are okay to ignore. This list should almost always be
# empty. If you add an entry, mark it with a TODO(<your name>) and the issue
# filed for the crash.
_CRASH_WHITELIST = {}
class ReportGenerator(object):
"""Collects and displays data from autoserv results directories.
This class collects status and performance data from one or more autoserv
result directories and generates test reports.
"""
_KEYVAL_INDENT = 2
def __init__(self, options, args):
self._options = options
self._args = args
self._color = Color(options.color)
def _CollectPerf(self, testdir):
"""Parses keyval file under testdir.
If testdir contains a result folder, process the keyval file and return
a dictionary of perf keyval pairs.
Args:
testdir: The autoserv test result directory.
Returns:
If the perf option is disabled or the there's no keyval file under
testdir, returns an empty dictionary. Otherwise, returns a dictionary of
parsed keyvals. Duplicate keys are uniquified by their instance number.
"""
perf = {}
if not self._options.perf:
return perf
keyval_file = os.path.join(testdir, 'results', 'keyval')
if not os.path.isfile(keyval_file):
return perf
instances = {}
for line in open(keyval_file):
match = re.search(r'^(.+){perf}=(.+)$', line)
if match:
key = match.group(1)
val = match.group(2)
# If the same key name was generated multiple times, uniquify all
# instances other than the first one by adding the instance count
# to the key name.
key_inst = key
instance = instances.get(key, 0)
if instance:
key_inst = '%s{%d}' % (key, instance)
instances[key] = instance + 1
perf[key_inst] = val
return perf
def _CollectResult(self, testdir):
"""Adds results stored under testdir to the self._results dictionary.
If testdir contains 'status.log' or 'status' files, assume it's a test
result directory and add the results data to the self._results dictionary.
The test directory name is used as a key into the results dictionary.
Args:
testdir: The autoserv test result directory.
"""
status_file = os.path.join(testdir, 'status.log')
if not os.path.isfile(status_file):
status_file = os.path.join(testdir, 'status')
if not os.path.isfile(status_file):
return
# Remove false positives that are missing a debug dir.
if not os.path.exists(os.path.join(testdir, 'debug')):
return
status_raw = open(status_file, 'r').read()
status = 'FAIL'
if (re.search(r'GOOD.+completed successfully', status_raw) and
not re.search(r'ABORT|ERROR|FAIL|TEST_NA', status_raw)):
status = 'PASS'
perf = self._CollectPerf(testdir)
if testdir.startswith(self._options.strip):
testdir = testdir.replace(self._options.strip, '', 1)
crashes = []
regex = re.compile('Received crash notification for ([-\w]+).+ (sig \d+)')
for match in regex.finditer(status_raw):
if (match.group(1) in _CRASH_WHITELIST and
match.group(2) in _CRASH_WHITELIST[match.group(1)]):
continue
crashes.append('%s %s' % match.groups())
self._results[testdir] = {'crashes': crashes,
'status': status,
'perf': perf}
def _CollectResultsRec(self, resdir):
"""Recursively collect results into the self._results dictionary.
Args:
resdir: results/test directory to parse results from and recurse into.
"""
self._CollectResult(resdir)
for testdir in glob.glob(os.path.join(resdir, '*')):
self._CollectResultsRec(testdir)
def _CollectResults(self):
"""Parses results into the self._results dictionary.
Initializes a dictionary (self._results) with test folders as keys and
result data (status, perf keyvals) as values.
"""
self._results = {}
for resdir in self._args:
if not os.path.isdir(resdir):
Die('\'%s\' does not exist' % resdir)
self._CollectResultsRec(resdir)
if not self._results:
Die('no test directories found')
def GetTestColumnWidth(self):
"""Returns the test column width based on the test data.
Aligns the test results by formatting the test directory entry based on
the longest test directory or perf key string stored in the self._results
dictionary.
Returns:
The width for the test columnt.
"""
width = len(max(self._results, key=len))
for result in self._results.values():
perf = result['perf']
if perf:
perf_key_width = len(max(perf, key=len))
width = max(width, perf_key_width + self._KEYVAL_INDENT)
return width + 1
def _GenerateReportText(self):
"""Prints a result report to stdout.
Prints a result table to stdout. Each row of the table contains the test
result directory and the test result (PASS, FAIL). If the perf option is
enabled, each test entry is followed by perf keyval entries from the test
results.
"""
tests = self._results.keys()
tests.sort()
tests_with_errors = []
width = self.GetTestColumnWidth()
line = ''.ljust(width + 5, '-')
crashes = {}
tests_pass = 0
print line
for test in tests:
# Emit the test/status entry first
test_entry = test.ljust(width)
result = self._results[test]
status_entry = result['status']
if status_entry == 'PASS':
color = Color.GREEN
tests_pass += 1
else:
color = Color.RED
tests_with_errors.append(test)
status_entry = self._color.Color(color, status_entry)
print test_entry + status_entry
# Emit the perf keyvals entries. There will be no entries if the
# --no-perf option is specified.
perf = result['perf']
perf_keys = perf.keys()
perf_keys.sort()
for perf_key in perf_keys:
perf_key_entry = perf_key.ljust(width - self._KEYVAL_INDENT)
perf_key_entry = perf_key_entry.rjust(width)
perf_value_entry = self._color.Color(Color.BOLD, perf[perf_key])
print perf_key_entry + perf_value_entry
# Ignore top-level entry, since it's just a combination of all the
# individual results.
if result['crashes'] and test != tests[0]:
for crash in result['crashes']:
if not crash in crashes:
crashes[crash] = set([])
crashes[crash].add(test)
print line
total_tests = len(tests)
percent_pass = 100 * tests_pass / total_tests
pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass)
print 'Total PASS: ' + self._color.Color(Color.BOLD, pass_str)
if self._options.crash_detection:
print ''
if crashes:
print self._color.Color(Color.RED, 'Crashes detected during testing:')
print line
for crash_name, crashed_tests in sorted(crashes.iteritems()):
print self._color.Color(Color.RED, crash_name)
for crashed_test in crashed_tests:
print ' '*self._KEYVAL_INDENT + crashed_test
print line
print 'Total unique crashes: ' + self._color.Color(Color.BOLD,
str(len(crashes)))
else:
print self._color.Color(Color.GREEN,
'No crashes detected during testing.')
# Print out error log for failed tests.
if self._options.print_debug:
for test in tests_with_errors:
debug_file_regex = os.path.join(self._options.strip, test, 'debug',
'%s*.ERROR' % os.path.basename(test))
for path in glob.glob(debug_file_regex):
try:
fh = open(path)
print >> sys.stderr, (
'\n========== ERROR FILE %s FOR TEST %s ==============\n' % (
path, test))
out = fh.read()
while out:
print >> sys.stderr, out
out = fh.read()
print >> sys.stderr, (
'\n=========== END ERROR FILE %s FOR TEST %s ===========\n' % (
path, test))
fh.close()
except:
print 'Could not open %s' % path
# Sometimes the builders exit before these buffers are flushed.
sys.stderr.flush()
sys.stdout.flush()
def Run(self):
"""Runs report generation."""
self._CollectResults()
self._GenerateReportText()
for v in self._results.itervalues():
if v['status'] != 'PASS' or (self._options.crash_detection
and v['crashes']):
sys.exit(1)
def main():
usage = 'Usage: %prog [options] result-directories...'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--color', dest='color', action='store_true',
default=_STDOUT_IS_TTY,
help='Use color for text reports [default if TTY stdout]')
parser.add_option('--no-color', dest='color', action='store_false',
help='Don\'t use color for text reports')
parser.add_option('--no-crash-detection', dest='crash_detection',
action='store_false', default=True,
help='Don\'t report crashes or error out when detected')
parser.add_option('--perf', dest='perf', action='store_true',
default=True,
help='Include perf keyvals in the report [default]')
parser.add_option('--no-perf', dest='perf', action='store_false',
help='Don\'t include perf keyvals in the report')
parser.add_option('--strip', dest='strip', type='string', action='store',
default='results.',
help='Strip a prefix from test directory names'
' [default: \'%default\']')
parser.add_option('--no-strip', dest='strip', const='', action='store_const',
help='Don\'t strip a prefix from test directory names')
parser.add_option('--no-debug', dest='print_debug', action='store_false',
default=True,
help='Don\'t print out logs when tests fail.')
(options, args) = parser.parse_args()
if not args:
parser.print_help()
Die('no result directories provided')
generator = ReportGenerator(options, args)
generator.Run()
if __name__ == '__main__':
main()

View File

@ -1,340 +0,0 @@
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to run client or server tests on a live remote image.
# Load common constants. This should be the first executable line.
# The path to common.sh should be relative to your script's location.
# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities. Inside the chroot this file is installed in
# /usr/lib/crosutils. Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
local common_paths=(/usr/lib/crosutils $(dirname "$(readlink -f "$0")"))
local path
SCRIPT_ROOT=
for path in "${common_paths[@]}"; do
if [ -r "${path}/common.sh" ]; then
SCRIPT_ROOT=${path}
break
fi
done
}
find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---
. "${SCRIPT_ROOT}/remote_access.sh" || die "Unable to load remote_access.sh"
DEFINE_string args "" \
"Command line arguments for test. Quoted and space separated if multiple." a
DEFINE_string board "" \
"The board for which you are building autotest"
DEFINE_boolean build ${FLAGS_FALSE} "Build tests while running" b
DEFINE_string chroot "${DEFAULT_CHROOT_DIR}" "alternate chroot location" c
DEFINE_boolean cleanup ${FLAGS_FALSE} "Clean up temp directory"
DEFINE_integer iterations 1 "Iterations to run every top level test" i
DEFINE_string results_dir_root "" "alternate root results directory"
DEFINE_boolean verbose ${FLAGS_FALSE} "Show verbose autoserv output" v
DEFINE_boolean use_emerged ${FLAGS_FALSE} \
"Force use of emerged autotest packages"
RAN_ANY_TESTS=${FLAGS_FALSE}
function stop_ssh_agent() {
# Call this function from the exit trap of the main script.
# Iff we started ssh-agent, be nice and clean it up.
# Note, only works if called from the main script - no subshells.
if [[ 1 -eq ${OWN_SSH_AGENT} ]]; then
kill ${SSH_AGENT_PID} 2>/dev/null
unset OWN_SSH_AGENT SSH_AGENT_PID SSH_AUTH_SOCK
fi
}
function start_ssh_agent() {
local tmp_private_key=$TMP/autotest_key
if [ -z "$SSH_AGENT_PID" ]; then
eval $(ssh-agent)
OWN_SSH_AGENT=1
else
OWN_SSH_AGENT=0
fi
cp $FLAGS_private_key $tmp_private_key
chmod 0400 $tmp_private_key
ssh-add $tmp_private_key
}
function cleanup() {
# Always remove the build path in case it was used.
[[ -n "${BUILD_DIR}" ]] && sudo rm -rf "${BUILD_DIR}"
if [[ $FLAGS_cleanup -eq ${FLAGS_TRUE} ]] || \
[[ ${RAN_ANY_TESTS} -eq ${FLAGS_FALSE} ]]; then
rm -rf "${TMP}"
else
ln -nsf "${TMP}" /tmp/run_remote_tests.latest
echo ">>> Details stored under ${TMP}"
fi
stop_ssh_agent
cleanup_remote_access
}
# Determine if a control is for a client or server test. Echos
# either "server" or "client".
# Arguments:
# $1 - control file path
function read_test_type() {
local control_file=$1
# Assume a line starts with TEST_TYPE =
local test_type=$(egrep -m1 \
'^[[:space:]]*TEST_TYPE[[:space:]]*=' "${control_file}")
if [[ -z "${test_type}" ]]; then
die "Unable to find TEST_TYPE line in ${control_file}"
fi
test_type=$(python -c "${test_type}; print TEST_TYPE.lower()")
if [[ "${test_type}" != "client" ]] && [[ "${test_type}" != "server" ]]; then
die "Unknown type of test (${test_type}) in ${control_file}"
fi
echo ${test_type}
}
function create_tmp() {
# Set global TMP for remote_access.sh's sake
# and if --results_dir_root is specified,
# set TMP and create dir appropriately
if [[ -n "${FLAGS_results_dir_root}" ]]; then
TMP=${FLAGS_results_dir_root}
mkdir -p -m 777 ${TMP}
else
TMP=$(mktemp -d /tmp/run_remote_tests.XXXX)
fi
}
function prepare_build_env() {
info "Pilfering toolchain shell environment from Portage."
local ebuild_dir="${TMP}/chromeos-base/autotest-build"
mkdir -p "${ebuild_dir}"
local E_only="autotest-build-9999.ebuild"
cat > "${ebuild_dir}/${E_only}" <<EOF
inherit toolchain-funcs
SLOT="0"
EOF
local E="chromeos-base/autotest-build/${E_only}"
"ebuild-${FLAGS_board}" --skip-manifest "${ebuild_dir}/${E_only}" \
clean unpack 2>&1 > /dev/null
local P_tmp="/build/${FLAGS_board}/tmp/portage/"
local E_dir="${E%%/*}/${E_only%.*}"
export BUILD_ENV="${P_tmp}/${E_dir}/temp/environment"
}
function autodetect_build() {
if [ ${FLAGS_use_emerged} -eq ${FLAGS_TRUE} ]; then
AUTOTEST_DIR="/build/${FLAGS_board}/usr/local/autotest"
FLAGS_build=${FLAGS_FALSE}
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Could not find pre-installed autotest, you need to emerge-${FLAGS_board} \
autotest autotest-tests (or use --build)."
fi
info \
"As requested, using emerged autotests already installed at ${AUTOTEST_DIR}."
return
fi
if [ ${FLAGS_build} -eq ${FLAGS_FALSE} ] &&
cros_workon --board=${FLAGS_board} list |
grep -q autotest; then
AUTOTEST_DIR="${SRC_ROOT}/third_party/autotest/files"
FLAGS_build=${FLAGS_TRUE}
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Detected cros_workon autotest but ${AUTOTEST_DIR} does not exist. Run \
repo sync autotest."
fi
info \
"Detected cros_workon autotests. Building and running your autotests from \
${AUTOTEST_DIR}. To use emerged autotest, pass --use_emerged."
return
fi
# flag use_emerged should be false once the code reaches here.
if [ ${FLAGS_build} -eq ${FLAGS_TRUE} ]; then
AUTOTEST_DIR="${SRC_ROOT}/third_party/autotest/files"
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Build flag was turned on but ${AUTOTEST_DIR} is not found. Run cros_workon \
start autotest and repo sync to continue."
fi
info "Build and run autotests from ${AUTOTEST_DIR}."
else
AUTOTEST_DIR="/build/${FLAGS_board}/usr/local/autotest"
if [ ! -d "${AUTOTEST_DIR}" ]; then
die \
"Autotest was not emerged. Run emerge-${FLAGS_board} autotest \
autotest-tests to continue."
fi
info "Using emerged autotests already installed at ${AUTOTEST_DIR}."
fi
}
function main() {
cd "${SCRIPTS_DIR}"
FLAGS "$@" || exit 1
if [[ -z "${FLAGS_ARGV}" ]]; then
echo "Usage: $0 --remote=[hostname] [regexp...]:"
echo "Each regexp pattern must uniquely match a control file. For example:"
echo " $0 --remote=MyMachine BootPerfServer"
exit 1
fi
# Check the validity of the user-specified result directory
# It must be within the /tmp directory
if [[ -n "${FLAGS_results_dir_root}" ]]; then
SUBSTRING=${FLAGS_results_dir_root:0:5}
if [[ ${SUBSTRING} != "/tmp/" ]]; then
echo "User-specified result directory must be within the /tmp directory"
echo "ex: --results_dir_root=/tmp/<result_directory>"
exit 1
fi
fi
set -e
create_tmp
trap cleanup EXIT
remote_access_init
# autotest requires that an ssh-agent already be running
start_ssh_agent
learn_board
autodetect_build
local control_files_to_run=""
local chrome_autotests="${CHROME_ROOT}/src/chrome/test/chromeos/autotest/files"
# Now search for tests which unambiguously include the given identifier
local search_path=$(echo {client,server}/{tests,site_tests})
# Include chrome autotest in the search path
if [ -n "${CHROME_ROOT}" ]; then
search_path="${search_path} ${chrome_autotests}/client/site_tests"
fi
pushd ${AUTOTEST_DIR} > /dev/null
for test_request in $FLAGS_ARGV; do
test_request=$(remove_quotes "${test_request}")
! finds=$(find ${search_path} -maxdepth 2 -type f \( -name control.\* -or \
-name control \) | egrep -v "~$" | egrep "${test_request}")
if [[ -z "${finds}" ]]; then
die "Cannot find match for \"${test_request}\""
fi
local matches=$(echo "${finds}" | wc -l)
if [[ ${matches} -gt 1 ]]; then
echo ">>> \"${test_request}\" is an ambiguous pattern. Disambiguate by" \
"passing one of these patterns instead:"
for FIND in ${finds}; do
echo " ^${FIND}\$"
done
exit 1
fi
for i in $(seq 1 $FLAGS_iterations); do
control_files_to_run="${control_files_to_run} '${finds}'"
done
done
echo ""
if [[ -z "${control_files_to_run}" ]]; then
die "Found no control files"
fi
[ ${FLAGS_build} -eq ${FLAGS_TRUE} ] && prepare_build_env
info "Running the following control files:"
for control_file in ${control_files_to_run}; do
info " * ${control_file}"
done
for control_file in ${control_files_to_run}; do
# Assume a line starts with TEST_TYPE =
control_file=$(remove_quotes "${control_file}")
local test_type=$(read_test_type "${AUTOTEST_DIR}/${control_file}")
# Check if the control file is an absolute path (i.e. chrome autotests case)
if [[ ${control_file:0:1} == "/" ]]; then
test_type=$(read_test_type "${control_file}")
fi
local option
if [[ "${test_type}" == "client" ]]; then
option="-c"
else
option="-s"
fi
echo ""
info "Running ${test_type} test ${control_file}"
local control_file_name=$(basename "${control_file}")
local short_name=$(basename "$(dirname "${control_file}")")
# testName/control --> testName
# testName/control.bvt --> testName.bvt
# testName/control.regression --> testName.regression
# testName/some_control --> testName.some_control
if [[ "${control_file_name}" != control ]]; then
if [[ "${control_file_name}" == control.* ]]; then
short_name=${short_name}.${control_file_name/control./}
else
short_name=${short_name}.${control_file_name}
fi
fi
local results_dir_name="${short_name}"
local results_dir="${TMP}/${results_dir_name}"
rm -rf "${results_dir}"
local verbose=""
if [[ ${FLAGS_verbose} -eq $FLAGS_TRUE ]]; then
verbose="--verbose"
fi
RAN_ANY_TESTS=${FLAGS_TRUE}
# Remove chrome autotest location prefix from control_file if needed
if [[ ${control_file:0:${#chrome_autotests}} == \
"${chrome_autotests}" ]]; then
control_file="${control_file:${#chrome_autotests}+1}"
info "Running chrome autotest ${control_file}"
fi
local autoserv_args="-m ${FLAGS_remote} --ssh-port ${FLAGS_ssh_port} \
${option} ${control_file} -r ${results_dir} ${verbose}"
if [ -n "${FLAGS_args}" ]; then
autoserv_args="${autoserv_args} --args=${FLAGS_args}"
fi
sudo chmod a+w ./server/{tests,site_tests}
echo ./server/autoserv ${autoserv_args}
if [ ${FLAGS_build} -eq ${FLAGS_TRUE} ]; then
# run autoserv in subshell
(. ${BUILD_ENV} && tc-export CC CXX PKG_CONFIG &&
./server/autoserv ${autoserv_args})
else
./server/autoserv ${autoserv_args}
fi
done
popd > /dev/null
echo ""
info "Test results:"
./generate_test_report "${TMP}" --strip="${TMP}/"
print_time_elapsed
}
restart_in_chroot_if_needed "$@"
main "$@"

1
run_remote_tests.sh Symbolic link
View File

@ -0,0 +1 @@
../platform/crostestutils/run_remote_tests.sh

View File

@ -1,54 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Run remote access test to ensure ssh access to a host is working. Exits with
# a code of 0 if successful and non-zero otherwise. Used by test infrastructure
# scripts.
# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities. Inside the chroot this file is installed in
# /usr/lib/crosutils. Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
local common_paths=(/usr/lib/crosutils $(dirname "$(readlink -f "$0")"))
local path
SCRIPT_ROOT=
for path in "${common_paths[@]}"; do
if [ -r "${path}/common.sh" ]; then
SCRIPT_ROOT=${path}
break
fi
done
}
find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---
. "${SCRIPT_ROOT}/remote_access.sh" || die "Unable to load remote_access.sh"
function cleanup {
cleanup_remote_access
rm -rf "${TMP}"
}
function main() {
cd "${SCRIPTS_DIR}"
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
set -e
trap cleanup EXIT
TMP=$(mktemp -d /tmp/ssh_test.XXXX)
remote_access_init
}
main $@

1
ssh_test.sh Symbolic link
View File

@ -0,0 +1 @@
../platform/crostestutils/ssh_test.sh