Move bootperf-bin from src/scripts to src/platform/crostestutils

BUG=chromium-os:16682
TEST=None

Change-Id: I28fe5985b1ca43c8584a84c03ab7d1db844f3092
Reviewed-on: http://gerrit.chromium.org/gerrit/2996
Tested-by: Richard Barnette <jrbarnette@chromium.org>
Reviewed-by: Chris Sosa <sosa@chromium.org>
Reviewed-by: Anush Elangovan <anush@chromium.org>
This commit is contained in:
J. Richard Barnette 2011-06-22 09:39:43 -07:00 committed by Richard Barnette
parent 8d87dbfb28
commit a655cf045d
6 changed files with 0 additions and 805 deletions

View File

@ -1,244 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Wrapper to run the platform_BootPerfServer autotest, and store the
# results for later analysis by the 'showbootdata' script.
#
# NOTE: This script must be run from inside the chromeos build
# chroot environment.
#
# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities. Inside the chroot this file is installed in
# /usr/lib/crosutils. Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
local common_paths=(/usr/lib/crosutils "$(dirname "$(readlink -f "$0")")/..")
local path
SCRIPT_ROOT=
for path in "${common_paths[@]}"; do
if [ -r "${path}/common.sh" ]; then
SCRIPT_ROOT=${path}
break
fi
done
}
find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---
# TODO(jrbarnette) Although run_remote_tests.sh works outside the
# chroot, the log files it produces will be stored inside the
# chroot. So, from outside the chroot, this script doesn't work.
# Yes, I think it's a bug, too. You're welcome to fix it, if you
# think it's so easy. :-)
assert_inside_chroot
DEFINE_string output_dir "" "output directory for results" o
DEFINE_boolean keep_logs "$FLAGS_FALSE" "keep autotest results" k
RUN_TEST="$SCRIPTS_DIR/run_remote_tests.sh"
TEST=server/site_tests/platform_BootPerfServer/control
TMP_RESULTS="/tmp/bootperf.$(date '+%Y%j%H%M').$$"
RESULTS_DIR=platform_BootPerfServer/platform_BootPerfServer/results
RESULTS_KEYVAL=$RESULTS_DIR/keyval
RESULTS_SUMMARY_FILES=(
$RESULTS_DIR
platform_BootPerfServer/keyval
platform_BootPerfServer/platform_BootPerfServer/keyval
platform_BootPerfServer/platform_BootPerfServer/platform_BootPerf/keyval
platform_BootPerfServer/platform_BootPerfServer/status
platform_BootPerfServer/platform_BootPerfServer/status.log
platform_BootPerfServer/status
platform_BootPerfServer/status.log
platform_BootPerfServer/sysinfo/cmdline
platform_BootPerfServer/sysinfo/cpuinfo
platform_BootPerfServer/sysinfo/modules
platform_BootPerfServer/sysinfo/uname
platform_BootPerfServer/sysinfo/version
)
# Structure of a results directory:
# $RUNDIR.$ITER/ - directory
# $RUNDIR_LOG - file
# $RUNDIR_SUMMARY/ - directory
# $RUNDIR_ALL_RESULTS/ - optional directory
# $KEYVAL_SUMMARY/ - file
# If you add any other content under the results directory, you'll
# probably need to change extra_files(), below.
RUNDIR=run
RUNDIR_LOG=log.txt
RUNDIR_SUMMARY=summary
RUNDIR_ALL_RESULTS=logs
KEYVAL_SUMMARY=results_keyval
# Usage/help function. This function is known to the shflags library,
# and mustn't be renamed.
flags_help() {
cat <<END_USAGE >&2
usage: $(basename $0) [ <options> ] <ip-address> [ <count> ]
Options:
--output_dir <directory>
--o <directory> Specify output directory for results
--[no]keep_logs
-k Keep [don't keep] autotest log files
Summary:
Run the platform_BootPerfServer autotest, and store results in the
given destination directory. The test target is specified by
<ip-address>.
By default, the test is run once; if <count> is given, the test is
run that many times. Note that the platform_BootPerfServer test
reboots the target 10 times, so the total number of reboots will
be 10*<count>.
If the destination directory doesn't exist, it is created. If the
destination directory already holds test results, additional
results are added in without overwriting earlier results.
If no destination is specified, the current directory is used,
provided that the directory is empty, or has been previously used
as a destination directory for this command.
By default, only a summary subset of the log files created by
autotest is preserved; with --keep_logs the (potentially large)
autotest logs are preserved with the test results.
END_USAGE
return $FLAGS_TRUE
}
usage() {
if [ $# -gt 0 ]; then
error "$(basename $0): $*"
echo >&2
fi
flags_help
exit 1
}
# List any files in the current directory not created as output
# from running this script.
extra_files() {
ls | grep -v "^$RUNDIR[.]...\$" |
grep -v $KEYVAL_SUMMARY
}
# Main function to run the boot performance test. Run the boot
# performance test for the given count, putting output into the
# current directory.
#
# Arguments are <ip-address> and <count> arguments, as for the main
# command.
#
# We terminate test runs if "run_remote_tests" ever fails to produce
# the results file; generally this is the result of a serious error
# (e.g. disk full) that won't go away if we just plow on.
run_boot_test() {
local remote="$1"
local count="${2:-1}"
local iter=$(expr "$(echo $RUNDIR.???)" : '.*\(...\)')
if [ "$iter" != "???" ]; then
iter=$(echo $iter | awk '{printf "%03d\n", $1 + 1}')
else
iter=000
fi
i=0
while [ $i -lt $count ]; do
local iter_rundir=$RUNDIR.$iter
local logfile=$(pwd)/$iter_rundir/$RUNDIR_LOG
local summary_dir=$iter_rundir/$RUNDIR_SUMMARY
local all_results_dir=$iter_rundir/$RUNDIR_ALL_RESULTS
mkdir $iter_rundir
date
echo " logging in $logfile"
# BEWARE: The --use_emerged option means that you must manually
# emerge chromeos-base/autotest-tests if a) you are working on
# the package, and b) you also want use this script to test your
# changes to the package. (The option is here because IMO the
# alternative is a bigger nuisance.)
$RUN_TEST --use_emerged --results_dir_root="$TMP_RESULTS" \
--remote="$remote" $TEST >$logfile 2>&1
if [ ! -e "$TMP_RESULTS/$RESULTS_KEYVAL" ]; then
error "No results file; terminating test runs."
error "Check $logfile for output from the test run,"
error "and see $TMP_RESULTS for full test logs and output."
return
fi
mkdir $summary_dir
tar cf - -C $TMP_RESULTS "${RESULTS_SUMMARY_FILES[@]}" |
tar xf - -C $summary_dir
if [ $FLAGS_keep_logs -eq $FLAGS_TRUE ]; then
mv $TMP_RESULTS $all_results_dir
chmod 755 $all_results_dir
else
rm -rf $TMP_RESULTS
fi
i=$(expr $i + 1)
iter=$(echo $iter | awk '{printf "%03d\n", $1 + 1}')
done
date
cat $RUNDIR.???/$RUNDIR_SUMMARY/$RESULTS_KEYVAL >$KEYVAL_SUMMARY
}
# Main routine - check validity of the (already parsed) command line
# options. 'cd' to the results directory, if it was specified. If
# all the arguments checks pass, hand control to run_boot_test
main() {
if [ $# -lt 1 ]; then
usage "Missing target host address"
elif [ $# -gt 2 ]; then
usage "Too many arguments"
fi
if [ -n "${FLAGS_output_dir}" ]; then
if [ ! -d "${FLAGS_output_dir}" ]; then
if ! mkdir "${FLAGS_output_dir}"; then
usage "Unable to create ${FLAGS_output_dir}"
fi
fi
cd "${FLAGS_output_dir}" ||
usage "No permissions to chdir to ${FLAGS_output_dir}"
elif [ -n "$(extra_files)" ]; then
error "No results directory specified, and current directory"
error "contains contents other than run results."
error "You can override this error by using the --output_dir option"
usage
fi
# Check the count argument.
# N.B. the test [ "$2" -eq "$2" ] tests whether "$2" is valid as a
# number; when it fails it will also report a syntax error (which
# we suppress).
if [ -n "$2" ]; then
if ! [ "$2" -eq "$2" ] 2>/dev/null || [ "$2" -le 0 ]; then
usage "<count> argument must be a positive number"
fi
fi
run_boot_test "$@"
}
# shflags defines --help implicitly; if it's used on the command
# line FLAGS will invoke flags_help, set FLAGS_help to TRUE, and
# then return false. To avoid printing help twice, we have to check
# for that case here.
if ! FLAGS "$@"; then
if [ ${FLAGS_help} -eq ${FLAGS_TRUE} ]; then
exit 0
else
usage
fi
fi
eval main "${FLAGS_ARGV}"

View File

@ -1,79 +0,0 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines for printing boot time performance test results."""
import fnmatch
import os
import os.path
import re
def PrintRawData(reader, dirlist, use_timestats, keylist):
"""Print 'bootperf' results in "raw data" format."""
for dir_ in dirlist:
results = reader(dir_)
if use_timestats:
keyset = results.TimeKeySet()
else:
keyset = results.DiskKeySet()
for i in range(0, keyset.num_iterations):
if len(dirlist) > 1:
line = "%s %3d" % (results.name, i)
else:
line = "%3d" % i
if keylist is not None:
markers = keylist
else:
markers = keyset.markers
for stat in markers:
(_, v) = keyset.PrintableStatistic(keyset.RawData(stat)[i])
line += " %5s" % str(v)
print line
def PrintStatisticsSummary(reader, dirlist, use_timestats, keylist):
"""Print 'bootperf' results in "summary of averages" format."""
if use_timestats:
header = "%5s %3s %5s %3s %s" % (
"time", "s%", "dt", "s%", "event")
format = "%5s %2d%% %5s %2d%% %s"
else:
header = "%6s %3s %6s %3s %s" % (
"diskrd", "s%", "delta", "s%", "event")
format = "%6s %2d%% %6s %2d%% %s"
havedata = False
for dir_ in dirlist:
results = reader(dir_)
if use_timestats:
keyset = results.TimeKeySet()
else:
keyset = results.DiskKeySet()
if keylist is not None:
markers = keylist
else:
markers = keyset.markers
if havedata:
print
if len(dirlist) > 1:
print "%s" % results.name,
print "(on %d cycles):" % keyset.num_iterations
print header
prevvalue = 0
prevstat = None
for stat in markers:
(valueavg, valuedev) = keyset.Statistics(stat)
valuepct = int(100 * valuedev / valueavg + 0.5)
if prevstat:
(deltaavg, deltadev) = keyset.DeltaStatistics(prevstat, stat)
deltapct = int(100 * deltadev / deltaavg + 0.5)
else:
deltapct = valuepct
(valstring, val_printed) = keyset.PrintableStatistic(valueavg)
delta = val_printed - prevvalue
(deltastring, _) = keyset.PrintableStatistic(delta)
print format % (valstring, valuepct, "+" + deltastring, deltapct, stat)
prevvalue = val_printed
prevstat = stat
havedata = True

View File

@ -1,69 +0,0 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines for reading performance results from a directory.
The directory should match the format created by the 'bootperf'
script; see comments in that script for a summary of the layout.
"""
import fnmatch
import os
import re
import resultset
_PERF_KEYVAL_PATTERN = re.compile("(.*){perf}=(.*)\n")
def _ReadKeyvalFile(results, file_):
"""Read an autotest keyval file, and process the results.
The `file_` parameter is a file object with contents in autotest
perf keyval format:
<keyname>{perf}=<value>
Each iteration of the test is terminated with a single blank line,
including the last iteration. Each iteration's results are added
to the `results` parameter, which should be an instance of
TestResultSet.
"""
kvd = {}
for line in iter(file_):
if line == "\n":
results.AddIterationResults(kvd)
kvd = {}
continue
m = _PERF_KEYVAL_PATTERN.match(line)
if m is None:
continue
kvd[m.group(1)] = m.group(2)
_RESULTS_PATH = (
"summary/platform_BootPerfServer/platform_BootPerfServer/results/keyval")
def ReadResultsDirectory(dir_):
"""Process results from a 'bootperf' output directory.
The accumulated results are returned in a newly created
TestResultSet object.
"""
res_set = resultset.TestResultSet(dir_)
dirlist = fnmatch.filter(os.listdir(dir_), "run.???")
dirlist.sort()
for rundir in dirlist:
keyval_path = os.path.join(dir_, rundir, _RESULTS_PATH)
try:
kvf = open(keyval_path)
except IOError:
continue
_ReadKeyvalFile(res_set, kvf)
res_set.FinalizeResults()
return res_set

View File

@ -1,274 +0,0 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes and functions for managing platform_BootPerf results.
Results from the platform_BootPerf test in the ChromiumOS autotest
package are stored in as performance 'keyvals', that is, a mapping
of names to numeric values. For each iteration of the test, one
set of keyvals is recorded.
This module currently tracks two kinds of keyval results, the boot
time results, and the disk read results. These results are stored
with keyval names such as 'seconds_kernel_to_login' and
'rdbytes_kernel_to_login'. Additionally, some older versions of the
test produced keyval names such as 'sectors_read_kernel_to_login'.
These keyvals record an accumulated total measured from a fixed
time in the past (kernel startup), e.g. 'seconds_kernel_to_login'
records the total seconds from kernel startup to login screen
ready.
The boot time keyval names all start with the prefix
'seconds_kernel_to_', and record time in seconds since kernel
startup.
The disk read keyval names all start with the prefix
'rdbytes_kernel_to_', and record bytes read from the boot device
since kernel startup. The obsolete disk keyvals start with the
prefix 'sectors_read_kernel_to_' and record the same statistic
measured in 512-byte sectors.
Boot time and disk kevyal values have a consistent ordering
across iterations. For instance, if in one iteration the value of
'seconds_kernel_to_login' is greater than the value of
'seconds_kernel_to_x_started', then it will be greater in *all*
iterations. This property is a consequence of the underlying
measurement procedure; it is not enforced by this module.
"""
import math
def _ListStats(list_):
# Utility function - calculate the average and (sample) standard
# deviation of a list of numbers. Result is float, even if the
# input list is full of int's
sum_ = 0.0
sumsq = 0.0
for v in list_:
sum_ += v
sumsq += v * v
n = len(list_)
avg = sum_ / n
var = (sumsq - sum_ * avg) / (n - 1)
if var < 0.0:
var = 0.0
dev = math.sqrt(var)
return (avg, dev)
def _DoCheck(dict_):
# Utility function - check the that all keyvals occur the same
# number of times. On success, return the number of occurrences;
# on failure return None
check = map(len, dict_.values())
if not check:
return None
for i in range(1, len(check)):
if check[i] != check[i-1]:
return None
return check[0]
def _KeyDelta(dict_, key0, key1):
# Utility function - return a list of the vector difference between
# two keyvals.
return map(lambda a, b: b - a, dict_[key0], dict_[key1])
class TestResultSet(object):
"""A set of boot time and disk usage result statistics.
Objects of this class consist of two sets of result statistics:
the boot time statistics and the disk statistics.
Class TestResultsSet does not interpret or store keyval mappings
directly; iteration results are processed by attached _KeySet
objects, one for boot time (`_timekeys`), one for disk read
(`_diskkeys`). These attached _KeySet objects can be obtained
with appropriate methods; various methods on these objects will
calculate statistics on the results, and provide the raw data.
"""
def __init__(self, name):
self.name = name
self._timekeys = _TimeKeySet()
self._diskkeys = _DiskKeySet()
self._olddiskkeys = _OldDiskKeySet()
def AddIterationResults(self, runkeys):
"""Add keyval results from a single iteration.
A TestResultSet is constructed by repeatedly calling
AddRunResults(), iteration by iteration. Iteration results are
passed in as a dictionary mapping keyval attributes to values.
When all iteration results have been added, FinalizeResults()
makes the results available for analysis.
"""
self._timekeys.AddRunResults(runkeys)
self._diskkeys.AddRunResults(runkeys)
self._olddiskkeys.AddRunResults(runkeys)
def FinalizeResults(self):
"""Make results available for analysis.
A TestResultSet is constructed by repeatedly feeding it results,
iteration by iteration. Iteration results are passed in as a
dictionary mapping keyval attributes to values. When all iteration
results have been added, FinalizeResults() makes the results
available for analysis.
"""
self._timekeys.FinalizeResults()
if not self._diskkeys.FinalizeResults():
self._olddiskkeys.FinalizeResults()
self._diskkeys = self._olddiskkeys
self._olddiskkeys = None
def TimeKeySet(self):
"""Return the boot time statistics result set."""
return self._timekeys
def DiskKeySet(self):
"""Return the disk read statistics result set."""
return self._diskkeys
class _KeySet(object):
"""Container for a set of related statistics.
_KeySet is an abstract superclass for containing collections of
either boot time or disk read statistics. Statistics are stored
as a dictionary (`_keyvals`) mapping keyval names to lists of
values.
The mapped keyval names are shortened by stripping the prefix
that identifies the type of prefix (keyvals that don't start with
the proper prefix are ignored). So, for example, with boot time
keyvals, 'seconds_kernel_to_login' becomes 'login' (and
'rdbytes_kernel_to_login' is ignored).
A list of all valid keyval names is stored in the `markers`
instance variable. The list is sorted by the natural ordering of
the underlying values (see the module comments for more details).
The list of values associated with a given keyval name are indexed
in the order in which they were added. So, all values for a given
iteration are stored at the same index.
"""
def __init__(self):
self._keyvals = {}
def AddRunResults(self, runkeys):
"""Add results for one iteration."""
for key, value in runkeys.iteritems():
if not key.startswith(self.PREFIX):
continue
shortkey = key[len(self.PREFIX):]
keylist = self._keyvals.setdefault(shortkey, [])
keylist.append(self._ConvertVal(value))
def FinalizeResults(self):
"""Finalize this object's results.
This method makes available the `markers` and `num_iterations`
instance variables. It also ensures that every keyval occurred
in every iteration by requiring that all keyvals have the same
number of data points.
"""
count = _DoCheck(self._keyvals)
if count is None:
self.num_iterations = 0
self.markers = []
return False
self.num_iterations = count
keylist = map(lambda k: (self._keyvals[k][0], k),
self._keyvals.keys())
keylist.sort(key=lambda tp: tp[0])
self.markers = map(lambda tp: tp[1], keylist)
return True
def RawData(self, key):
"""Return the list of values for the given marker key."""
return self._keyvals[key]
def DeltaData(self, key0, key1):
"""Return vector difference of the values of the given keys."""
return _KeyDelta(self._keyvals, key0, key1)
def Statistics(self, key):
"""Return the average and standard deviation of the key's values."""
return _ListStats(self._keyvals[key])
def DeltaStatistics(self, key0, key1):
"""Return the average and standard deviation of the differences
between two keys.
"""
return _ListStats(self.DeltaData(key0, key1))
class _TimeKeySet(_KeySet):
"""Concrete subclass of _KeySet for boot time statistics."""
# TIME_KEY_PREFIX = 'seconds_kernel_to_'
PREFIX = 'seconds_kernel_to_'
# Time-based keyvals are reported in seconds and get converted to
# milliseconds
TIME_SCALE = 1000
def _ConvertVal(self, value):
# We use a "round to nearest int" formula here to make sure we
# don't lose anything in the conversion from decimal.
return int(self.TIME_SCALE * float(value) + 0.5)
def PrintableStatistic(self, value):
v = int(value + 0.5)
return ("%d" % v, v)
class _DiskKeySet(_KeySet):
"""Concrete subclass of _KeySet for disk read statistics."""
PREFIX = 'rdbytes_kernel_to_'
# Disk read keyvals are reported in bytes and get converted to
# MBytes (1 MByte = 1 million bytes, not 2**20)
DISK_SCALE = 1.0e-6
def _ConvertVal(self, value):
return self.DISK_SCALE * float(value)
def PrintableStatistic(self, value):
v = round(value, 1)
return ("%.1fM" % v, v)
class _OldDiskKeySet(_DiskKeySet):
"""Concrete subclass of _KeySet for the old-style disk read statistics."""
# Older versions of platform_BootPerf reported total sectors read
# using names of the form sectors_read_kernel_to_* (instead of the
# more recent rdbytes_kernel_to_*), but some of those names
# exceeded the 30-character limit in the MySQL database schema.
PREFIX = 'sectors_read_kernel_to_'
# Old sytle disk read keyvals are reported in 512-byte sectors and
# get converted to MBytes (1 MByte = 1 million bytes, not 2**20)
SECTOR_SCALE = 512 * _DiskKeySet.DISK_SCALE
def _ConvertVal(self, value):
return self.SECTOR_SCALE * float(value)

View File

@ -1,139 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A command to display summary statistics from runs of 'bootperf'.
Command line options allow selecting from one of two sets of
performance statistics: The boot time statistics (selected by
--timestats) measure time spent from kernel startup in milliseconds.
The disk statistics (selected by --diskstats) measure total bytes
read from the boot device since kernel startup.
Boot time statistics are recorded as cumulative time (or disk read)
since kernel startup, measured when specific events occur during
boot. Events include such things as 'startup', the moment when the
upstart 'startup' job begins running, and 'login', when the
Chrome OS login screen is displayed. By default, all recorded events
are included in the output; command line options allow restricting
the view to a selected subset of events.
Separate command line options allow selecting from one of two
different display modes. When --averages is selected, the display
shows the average value and sample standard deviation (as a percent
of the average) for all selected events. The --averages display also
calculates the time (or bytes) between adjacent events, and shows
the avearage and sample standard deviation of the differences.
The --rawdata display shows the raw data value associated with each
event for each boot: Each line of output represents the event values
for one boot cycle.
"""
import sys
import optparse
import resultsdir
import perfprinter
_USAGE = "%prog [options] [results-directory ...]"
_DESCRIPTION = """\
Summarize boot time performance results. The result directory
arguments are directories previously specified as output for the
'bootperf' script.
"""
def _SetupOptions():
optparser = optparse.OptionParser(usage=_USAGE, description=_DESCRIPTION)
optgroup = optparse.OptionGroup(
optparser, "Selecting boot time or disk statistics (choose one)")
optgroup.add_option(
"-t", "--timestats", action="store_true",
dest="use_timestats",
help="use statistics for time since kernel startup (default)")
optgroup.add_option(
"-d", "--diskstats", action="store_true",
dest="use_diskstats",
help="use statistics for bytes read since kernel startup")
optparser.add_option_group(optgroup)
optparser.set_defaults(use_diskstats=False)
optparser.set_defaults(use_timestats=False)
optgroup = optparse.OptionGroup(optparser, "Event selection")
optgroup.add_option(
"-e", "--event", action="append",
dest="eventnames",
help="restrict statistics to the comma-separated list of events")
optparser.add_option_group(optgroup)
optgroup = optparse.OptionGroup(
optparser, "Display mode selection (choose one)")
optgroup.add_option(
"-a", "--averages", action="store_true",
dest="print_averages",
help="display a summary of the averages of chosen statistics (default)")
optgroup.add_option(
"-r", "--rawdata", action="store_true",
dest="print_raw",
help="display raw data from all boot iterations")
optparser.add_option_group(optgroup)
optparser.set_defaults(print_averages=False)
optparser.set_defaults(print_raw=False)
return optparser
def _ProcessDisplayOptions(options):
display_count = 0
if options.print_averages:
display_count += 1
printfunc = perfprinter.PrintStatisticsSummary
if options.print_raw:
display_count += 1
printfunc = perfprinter.PrintRawData
if display_count == 0:
printfunc = perfprinter.PrintStatisticsSummary
elif display_count > 1:
print >>sys.stderr, "Can't use -a and -r together.\n"
return None
return printfunc
def _ProcessStatsOptions(options):
if options.use_timestats and options.use_diskstats:
print >>sys.stderr, "Can't use -t and -d together.\n"
return None
return not options.use_diskstats
def _ProcessKeylistOptions(options):
if not options.eventnames:
return None
keylist = []
for kl in options.eventnames:
keylist.extend(kl.split(','))
return keylist
def main(argv):
optparser = _SetupOptions()
(options, args) = optparser.parse_args(argv)
printfunc = _ProcessDisplayOptions(options)
use_timestats = _ProcessStatsOptions(options)
keylist = _ProcessKeylistOptions(options)
if printfunc is None or use_timestats is None:
optparser.print_help()
sys.exit(1)
if not args:
args = ["."]
printfunc(resultsdir.ReadResultsDirectory,
args, use_timestats, keylist)
if __name__ == "__main__":
main(sys.argv[1:])