mirror of
https://github.com/flatcar/scripts.git
synced 2025-09-22 14:11:07 +02:00
Merge pull request #252 from marineam/clean
Clean out some old ChromeOS junk
This commit is contained in:
commit
38fa0f92a8
@ -1,2 +0,0 @@
|
||||
[Hook Scripts]
|
||||
hook0=../../chromite/bin/cros lint ${PRESUBMIT_FILES}
|
150
call_autoserv.py
150
call_autoserv.py
@ -1,150 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Script to run client or server tests on a live remote image.
|
||||
|
||||
This script can be used to save results of each test run in timestamped
|
||||
unique results directory.
|
||||
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from optparse import OptionParser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
conlog = logging.StreamHandler()
|
||||
conlog.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(asctime)s %(levelname)s | %(message)s")
|
||||
conlog.setFormatter(formatter)
|
||||
logger.addHandler(conlog)
|
||||
|
||||
def ConnectRemoteMachine(machine_ip):
|
||||
os.system("eval `ssh-agent -s`")
|
||||
os.system("ssh-add testing_rsa")
|
||||
username = os.environ['USER']
|
||||
|
||||
# Removing the machine IP entry from known hosts to avoid identity clash.
|
||||
logger.info("Removing machine IP entry from known hosts to avoid identity"
|
||||
" clash.")
|
||||
host_list = open("/home/%s/.ssh/known_hosts" % username, "r").readlines()
|
||||
index = 0
|
||||
for host in host_list:
|
||||
if machine_ip in host:
|
||||
del host_list[index]
|
||||
break
|
||||
index += 1
|
||||
|
||||
open("/home/%s/.ssh/known_hosts" % username, "w").writelines(host_list)
|
||||
|
||||
# Starting ssh connection to remote test machine.
|
||||
logger.info("Starting ssh connection to remote test machine.")
|
||||
os.system("ssh root@%s true; echo $? > ssh_result_file" % machine_ip)
|
||||
ssh_result = open("ssh_result_file", "r").read()
|
||||
logger.info("Status of ssh connection to remote machine : %s" % ssh_result)
|
||||
|
||||
if ssh_result.strip() != '0':
|
||||
logger.error("Ssh connection to remote test machine FAILED. Exiting the"
|
||||
" test.")
|
||||
sys.exit()
|
||||
|
||||
def TestSearch(suite_path, test_name):
|
||||
test_path = ""
|
||||
filenames = glob.glob(os.path.join(suite_path, test_name))
|
||||
for filename in filenames:
|
||||
if filename == ("%s/%s" % (suite_path, test_name)):
|
||||
test_path = filename
|
||||
break
|
||||
return test_path
|
||||
|
||||
def TriggerTest(test_name, machine_ip):
|
||||
# Creating unique time stamped result folder name.
|
||||
current_time = datetime.datetime.now()
|
||||
result_name = "results." + test_name + current_time.strftime("_%d-%m-%y"
|
||||
"_%H:%M")
|
||||
|
||||
# Setting the test path location based on the test_name.
|
||||
suite_path = "./autotest/client/site_tests/suite_HWQual"
|
||||
test_path = TestSearch(suite_path, "control.%s" % test_name)
|
||||
|
||||
# Looking for test_name under client/site_tests if not under suite_HWQual.
|
||||
if test_path == "":
|
||||
suite_path = ("./autotest/client/site_tests/%s" % test_name)
|
||||
test_path = TestSearch(suite_path, "control")
|
||||
|
||||
# Looking for test_name under server/site_tests if not present under client.
|
||||
if test_path == "":
|
||||
suite_path = ("./autotest/server/site_tests/%s" % test_name)
|
||||
test_path = TestSearch(suite_path, "control")
|
||||
# Looking for test_name under server/site_tests/suites.
|
||||
if test_path == "":
|
||||
suite_path = "./autotest/server/site_tests/suites"
|
||||
test_path = TestSearch(suite_path, "control.%s" % test_name)
|
||||
# Setting command for server tests.
|
||||
run_command = ("./autotest/server/autoserv -r ./autotest/%s -m %s"
|
||||
" -s %s" % (result_name, machine_ip, test_path))
|
||||
else:
|
||||
run_command = ("./autotest/server/autoserv -r ./autotest/%s -m %s "
|
||||
"-c %s" % (result_name, machine_ip, test_path))
|
||||
|
||||
if test_path == "":
|
||||
logger.error("Test not found under client or server directories! Check the "
|
||||
"name of test and do not prefix 'control.' to test name.")
|
||||
sys.exit()
|
||||
|
||||
# Making the call to HWQual test.
|
||||
logger.info("Starting the HWQual test : %s" % test_path)
|
||||
os.system(run_command)
|
||||
|
||||
# Displaying results on test completion.
|
||||
test_result = os.system("./generate_test_report ./autotest/%s" % result_name)
|
||||
|
||||
result_path = ("./autotest/%s" % result_name)
|
||||
if test_result != 0:
|
||||
# Grabbing the results directory as test failed & return value nonzero.
|
||||
log_name = ("%s.tar.bz2" % result_path)
|
||||
os.system("tar cjf %s %s" % (log_name, result_path))
|
||||
logger.info("Logs for the failed test at : %s" % log_name)
|
||||
|
||||
logger.info("Results of test run at : %s" % result_path)
|
||||
|
||||
def main(argv):
|
||||
# Checking the arguments for help, machine ip and test name.
|
||||
parser = OptionParser(usage="USAGE : ./%prog [options]")
|
||||
|
||||
parser.add_option("--ip", dest="dut_ip",
|
||||
help="accepts IP address of device under test <DUT>.")
|
||||
parser.add_option("--test", dest="test_name",
|
||||
help="accepts HWQual test name without prefix 'control.'")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
# Checking for presence of both ip and test parameters.
|
||||
if (options.dut_ip == None) or (options.test_name == None):
|
||||
parser.error("Argument missing! Both --ip and --test arguments required.")
|
||||
|
||||
# Checking for blank values of both ip and test parameters.
|
||||
arg_ip, arg_testname = options.dut_ip, options.test_name
|
||||
if (arg_ip == "") or (arg_testname == ""):
|
||||
parser.error("Blank values are not accepted for arguments.")
|
||||
|
||||
logger.info("HWQual test to trigger : %s" % arg_testname)
|
||||
logger.info("Remote test machine IP : %s" % arg_ip)
|
||||
|
||||
# Setting up ssh connection to remote machine.
|
||||
ConnectRemoteMachine(arg_ip)
|
||||
|
||||
# Triggerring the HWQual test and result handling.
|
||||
TriggerTest(arg_testname, arg_ip)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
@ -1,290 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Script to generate minidump symbols in the format required by
|
||||
# minidump_stackwalk to dump stack information.
|
||||
#
|
||||
# NOTE: This script must be run from the chromeos build chroot environment.
|
||||
#
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
# Script must be run inside the chroot
|
||||
restart_in_chroot_if_needed "$@"
|
||||
|
||||
# Flags
|
||||
DEFINE_string board "$DEFAULT_BOARD" "The board to build packages for."
|
||||
DEFINE_string minidump_symbol_root "" \
|
||||
"Symbol root (defaults to /usr/lib/debug/breakpad for board)"
|
||||
DEFINE_boolean verbose ${FLAGS_FALSE} "Be verbose."
|
||||
|
||||
DUMP_SYMS="dump_syms"
|
||||
DUMP_SYMS32="dump_syms.32"
|
||||
|
||||
ERROR_COUNT=0
|
||||
|
||||
debug() {
|
||||
if [ ${FLAGS_verbose} -eq ${FLAGS_TRUE} ]; then
|
||||
info "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Each job sets this on their own; we declare it
|
||||
# globally so the exit trap can always see the last
|
||||
# setting of it w/in a job worker.
|
||||
SYM_FILE=
|
||||
JOB_FILE=
|
||||
NOTIFIED=
|
||||
|
||||
# The master process sets these up, which each worker
|
||||
# than uses for communicating once they've finished.
|
||||
CONTROL_PIPE=
|
||||
CONTROL_PIPE_FD=
|
||||
|
||||
_worker_finished() {
|
||||
if [ -z "${NOTIFIED}" ]; then
|
||||
debug "Sending notification of $BASHPID ${1-1}"
|
||||
echo "$BASHPID ${1-1}" > /dev/fd/${CONTROL_PIPE_FD}
|
||||
NOTIFIED=1
|
||||
fi
|
||||
}
|
||||
|
||||
_cleanup_worker() {
|
||||
rm -f "${SYM_FILE}" "${ERR_FILE}"
|
||||
_worker_finished 1
|
||||
}
|
||||
|
||||
_cleanup_master() {
|
||||
set +eu
|
||||
rm -f "${CONTROL_PIPE}"
|
||||
if [ ${#JOBS_ARRAY[@]} != 0 ]; then
|
||||
kill -s SIGINT "${!JOBS_ARRAY[@]}" &> /dev/null
|
||||
wait
|
||||
# Clear the array.
|
||||
JOBS_ARRAY=( )
|
||||
fi
|
||||
}
|
||||
|
||||
declare -A JOBS_ARRAY
|
||||
|
||||
finish_job() {
|
||||
local finished result
|
||||
read -r -u ${CONTROL_PIPE_FD} finished result
|
||||
# Bash doesn't allow for zombies, but tell it to clean up its intenral
|
||||
# bookkeeping. Note bash can be buggy here- if a new process has slipped
|
||||
# into that pid, bash doesn't use its internal accounting first, and
|
||||
# can throw an error; doesn't matter, thus this form.
|
||||
! wait ${finished} &> /dev/null
|
||||
if [ "${result-1}" -ne "0" ]; then
|
||||
: $(( ++ERROR_COUNT ))
|
||||
fi
|
||||
# Bit of a hack, but it works well enough.
|
||||
debug "finished ${finished} with result ${result-1}"
|
||||
unset JOBS_ARRAY[${finished}]
|
||||
}
|
||||
|
||||
run_job() {
|
||||
local debug_file=${1} text_file=${2} newpid
|
||||
|
||||
if [ ${#JOBS_ARRAY[@]} -ge ${NUM_JOBS} ]; then
|
||||
# Reclaim a spot.
|
||||
finish_job
|
||||
fi
|
||||
|
||||
dump_file "${debug_file}" "${text_file}" &
|
||||
newpid=$!
|
||||
debug "Started ${debug_file} ${text_file} at ${newpid}"
|
||||
JOBS_ARRAY[$newpid]=1
|
||||
}
|
||||
|
||||
# Given path to a debug file, return its text file
|
||||
get_text_for_debug() {
|
||||
local debug_file=$1
|
||||
local text_dir=$(dirname "${debug_file#$DEBUG_ROOT}")
|
||||
local text_path=${SYSROOT}${text_dir}/$(basename "${debug_file}" .debug)
|
||||
echo ${text_path}
|
||||
}
|
||||
|
||||
# Given path to a text file, return its debug file
|
||||
get_debug_for_text() {
|
||||
local text_file=$1
|
||||
local text_path=${text_file#${SYSROOT}}
|
||||
local debug_path=${DEBUG_ROOT}${text_path}.debug
|
||||
echo ${debug_path}
|
||||
}
|
||||
|
||||
# Returns true if the file given is a 32-bit ELF file.
|
||||
is_32b_elf() {
|
||||
local elf="$1"
|
||||
sudo file "${elf}" | grep -q "ELF 32-bit"
|
||||
}
|
||||
|
||||
# Dump given debug and text file. Returns 1 if any errors, even
|
||||
# if they can be ignored, but only sets ERROR_COUNT if the error should not
|
||||
# be ignored (and we should not proceed to upload).
|
||||
dump_file() {
|
||||
trap '_cleanup_worker; exit 1' INT TERM
|
||||
trap _cleanup_worker EXIT
|
||||
local debug_file="$1"
|
||||
local text_file="$2"
|
||||
local debug_directory="$(dirname "${debug_file}")"
|
||||
local dump_syms_prog="${DUMP_SYMS}"
|
||||
# 32-bit dump_syms must be used to dump a 32-bit ELF file
|
||||
if is_32b_elf "${text_file}"; then
|
||||
dump_syms_prog="${DUMP_SYMS32}"
|
||||
debug "Using ${dump_syms_prog} for 32-bit file ${text_file}"
|
||||
fi
|
||||
SYM_FILE=$(mktemp -t "breakpad.sym.XXXXXX")
|
||||
# Dump symbols as root in order to read all files.
|
||||
if ! sudo "${dump_syms_prog}" "${text_file}" "${debug_directory}" \
|
||||
> "${SYM_FILE}" 2> /dev/null; then
|
||||
# Try dumping just the main file to get public-only symbols.
|
||||
ERR_FILE=$(mktemp -t "breakpad.err.XXXXXX")
|
||||
if ! sudo "${dump_syms_prog}" "${text_file}" > "${SYM_FILE}" \
|
||||
2> "${ERR_FILE}"; then
|
||||
# A lot of files (like kernel files) contain no debug information, do
|
||||
# not consider such occurrences as errors.
|
||||
if grep -q "file contains no debugging information" "${ERR_FILE}"; then
|
||||
warn "No symbols found for ${text_file}"
|
||||
_worker_finished 0
|
||||
exit 0
|
||||
fi
|
||||
error "Unable to dump symbols for ${text_file}:"
|
||||
error "$(<"${ERR_FILE}")"
|
||||
exit 1
|
||||
else
|
||||
warn "File ${text_file} did not have debug info, using linkage symbols"
|
||||
fi
|
||||
fi
|
||||
local file_id=$(head -1 ${SYM_FILE} | cut -d' ' -f4)
|
||||
local module_name=$(head -1 ${SYM_FILE} | cut -d' ' -f5)
|
||||
# Show file upload success and symbol info for easier lookup
|
||||
debug "Dumped symbols from ${text_file} for ${module_name}|${file_id}."
|
||||
# Sanity check: if we've created the same named file in the /usr/lib/debug
|
||||
# directory during the src_compile stage of an ebuild, verify our sym file
|
||||
# is the same.
|
||||
local installed_sym="${DEBUG_ROOT}"/$(basename "${text_file}").sym
|
||||
if [ -e "${installed_sym}" ]; then
|
||||
if ! cmp --quiet "${installed_sym}" "${SYM_FILE}"; then
|
||||
error "${installed_sym} differ from current sym file:"
|
||||
error "$(diff "${installed_sym}" "${SYM_FILE}")"
|
||||
: $(( ++ERROR_COUNT ))
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
local container_dir="${FLAGS_minidump_symbol_root}/${module_name}/${file_id}"
|
||||
sudo mkdir -p "${container_dir}"
|
||||
sudo mv "${SYM_FILE}" "${container_dir}/${module_name}.sym"
|
||||
_worker_finished 0
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Convert the given debug file. No return value.
|
||||
process_file() {
|
||||
local debug_file="$1"
|
||||
local text_file="$(get_text_for_debug ${debug_file})"
|
||||
if [ -h "${debug_file}" ]; then
|
||||
# Don't follow symbolic links. In particular, we don't want to bother
|
||||
# with the *.debug links in the "debug/.build-id/" directory.
|
||||
debug "Skipping symbolic link: ${debug_file}"
|
||||
return 0
|
||||
fi
|
||||
if [ "${text_file##*.}" == "ko" ]; then
|
||||
# Skip kernel objects. We can't use their symbols and they sometimes
|
||||
# have objects with empty text sections which trigger errors in dump_sym.
|
||||
debug "Skipping kernel object: ${text_file}"
|
||||
return 0
|
||||
fi
|
||||
if [ ! -f "${text_file}" ]; then
|
||||
# Allow files to not exist, for instance if they are in the INSTALL_MASK.
|
||||
warn "Binary does not exist: ${text_file}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
run_job "${debug_file}" "${text_file}"
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
# Parse command line
|
||||
FLAGS_HELP="usage: $0 [flags] [<files...>]"
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
|
||||
switch_to_strict_mode
|
||||
|
||||
[ -n "$FLAGS_board" ] || die_notrace "--board is required."
|
||||
|
||||
SYSROOT="/build/${FLAGS_board}"
|
||||
|
||||
if [[ -z "${FLAGS_minidump_symbol_root}" ]]; then
|
||||
FLAGS_minidump_symbol_root="${SYSROOT}/usr/lib/debug/breakpad"
|
||||
fi
|
||||
|
||||
info "Writing minidump symbols to ${FLAGS_minidump_symbol_root}"
|
||||
|
||||
DEBUG_ROOT="${SYSROOT}/usr/lib/debug"
|
||||
sudo rm -rf "${FLAGS_minidump_symbol_root}"
|
||||
|
||||
# Open our control pipe.
|
||||
trap '_cleanup_master; exit 1' INT TERM
|
||||
trap _cleanup_master EXIT
|
||||
CONTROL_PIPE=$(mktemp -t "breakpad.fifo.XXXXXX")
|
||||
rm "${CONTROL_PIPE}"
|
||||
mkfifo "${CONTROL_PIPE}"
|
||||
exec {CONTROL_PIPE_FD}<>${CONTROL_PIPE}
|
||||
|
||||
# We require our stderr (which error/info/warn go through) to be a
|
||||
# pipe for atomic write reasons; thus if it isn't, abuse cat to make it
|
||||
# so.
|
||||
if [ ! -p /dev/stderr ]; then
|
||||
debug "Replacing stderr with a cat process for pipe requirements..."
|
||||
exec 2> >(cat 1>&2)
|
||||
fi
|
||||
|
||||
if [ -z "${FLAGS_ARGV}" ]; then
|
||||
# Sort on size; we want to start the largest first since it's likely going
|
||||
# to be the chrome binary (which can take 98% of the runtime when we're
|
||||
# running with parallelization for 6 or higher).
|
||||
for debug_file in $(find "${DEBUG_ROOT}" -name \*.debug \
|
||||
-type f -exec stat -c '%s %n' {} + | sort -gr | cut -d' ' -f2-); do
|
||||
process_file "${debug_file}"
|
||||
done
|
||||
else
|
||||
for either_file in ${FLAGS_ARGV}; do
|
||||
either_file=${either_file#\'}
|
||||
either_file=${either_file%\'}
|
||||
if [ ! -h "${either_file}" -a ! -f "${either_file}" ]; then
|
||||
error "Specified file ${either_file} does not exist"
|
||||
: $(( ++ERROR_COUNT ))
|
||||
continue
|
||||
fi
|
||||
if [ "${either_file##*.}" == "debug" ]; then
|
||||
debug_file="${either_file}"
|
||||
else
|
||||
debug_file="$(get_debug_for_text ${either_file})"
|
||||
fi
|
||||
process_file "${debug_file}"
|
||||
done
|
||||
fi
|
||||
|
||||
while [[ ${#JOBS_ARRAY[@]} != 0 ]]; do
|
||||
finish_job
|
||||
done
|
||||
|
||||
local size=$(sudo find "${FLAGS_minidump_symbol_root}" \
|
||||
-type f -name '*.sym' -exec du -b {} + | \
|
||||
awk '{t += $1} END {print t}')
|
||||
info "Generated ${size:-0}B of unique debug information"
|
||||
|
||||
if [[ ${ERROR_COUNT} == 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
die_notrace "Encountered ${ERROR_COUNT} problems"
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,78 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Script to generate stackdumps from BVT failures.
|
||||
|
||||
# This can only run inside the chroot since we need minidump_stackwalk.
|
||||
. "$(dirname $0)/common.sh" || exit 1
|
||||
assert_inside_chroot "$@"
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 url_or_path_to_debug_tgz url_to_bvt_test_results"
|
||||
}
|
||||
|
||||
if [ -z "$1" ] ; then
|
||||
usage
|
||||
die_notrace "The URL or path to symbols tarball (debug.tgz) is required"
|
||||
fi
|
||||
|
||||
if [ -z "$2" ] ; then
|
||||
usage
|
||||
die_notrace "The URL to BVT test results is required"
|
||||
fi
|
||||
|
||||
# Die on any errors.
|
||||
switch_to_strict_mode
|
||||
|
||||
BREAKPAD_DIR="debug/breakpad"
|
||||
STACKS_GENERATED=""
|
||||
OUTPUT_DIR="$(mktemp -d)"
|
||||
|
||||
extract_tarball() {
|
||||
info "Extracting breakpad symbols from $1..."
|
||||
tar zxf "$1" -C "${OUTPUT_DIR}" "${BREAKPAD_DIR}"
|
||||
}
|
||||
|
||||
generate_stacktrace() {
|
||||
echo "$1.txt"
|
||||
minidump_stackwalk "$1" "${OUTPUT_DIR}/${BREAKPAD_DIR}" \
|
||||
>"$1.txt" 2>/dev/null
|
||||
}
|
||||
|
||||
find_and_generate_stacktraces() {
|
||||
find "${OUTPUT_DIR}" -name "*.dmp" |
|
||||
while read filename ; do
|
||||
generate_stacktrace "${filename}"
|
||||
done
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
if [ -n "${OUTPUT_DIR}" -a -z "${STACKS_GENERATED}" ] ; then
|
||||
rm -rf "${OUTPUT_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
info "Downloading minidumps from $2..."
|
||||
wget -q -nv -r -l 10 -np -A "*.dmp" -P "${OUTPUT_DIR}" $2
|
||||
if [[ -z "$(find "${OUTPUT_DIR}" -name "*.dmp")" ]] ; then
|
||||
die "No minidumps found"
|
||||
fi
|
||||
|
||||
if [[ -f "$1" ]] ; then
|
||||
extract_tarball "$1"
|
||||
else
|
||||
info "Downloading symbols tarball from $1..."
|
||||
wget -P "${OUTPUT_DIR}" "$1"
|
||||
TARBALL="${OUTPUT_DIR}/$(basename $1)"
|
||||
extract_tarball "$TARBALL"
|
||||
rm -f "$TARBALL"
|
||||
fi
|
||||
|
||||
info "Generating stack traces..."
|
||||
STACKS_GENERATED=$(find_and_generate_stacktraces)
|
||||
echo $STACKS_GENERATED
|
@ -1 +0,0 @@
|
||||
../platform/crostestutils/cros_run_unit_tests
|
@ -1,2 +0,0 @@
|
||||
var/*
|
||||
usr/local/*
|
@ -1 +0,0 @@
|
||||
../platform/crostestutils/utils_py/generate_test_report.py
|
@ -1,88 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
check_compiler_flags()
|
||||
{
|
||||
local binary="$1"
|
||||
local flags=false
|
||||
local fortify=true
|
||||
local stack=true
|
||||
${readelf} -p .GCC.command.line "${binary}" | \
|
||||
{
|
||||
while read flag ; do
|
||||
flags=true
|
||||
case "${flag}" in
|
||||
*"-U_FORTIFY_SOURCE"*)
|
||||
fortify=false
|
||||
;;
|
||||
*"-fno-stack-protector"*)
|
||||
stack=false
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if ! ${flags}; then
|
||||
echo "File not built with -frecord-gcc-switches: ${binary}"
|
||||
return
|
||||
fi
|
||||
${fortify} || echo "File not built with -D_FORTIFY_SOURCE: ${binary}"
|
||||
${stack} || echo "File not built with -fstack-protector: ${binary}"
|
||||
}
|
||||
}
|
||||
|
||||
check_linker_flags()
|
||||
{
|
||||
local binary="$1"
|
||||
local pie=false
|
||||
local relro=false
|
||||
local now=false
|
||||
local gold=false
|
||||
${readelf} -dlSW "${binary}" | \
|
||||
{
|
||||
while read line ; do
|
||||
case "${line}" in
|
||||
*".note.gnu.gold-version"*)
|
||||
gold=true
|
||||
;;
|
||||
*"Shared object file"*)
|
||||
pie=true
|
||||
;;
|
||||
*"GNU_RELRO"*)
|
||||
relro=true
|
||||
;;
|
||||
*"BIND_NOW"*)
|
||||
now=true
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
${pie} || echo "File not PIE: ${binary}"
|
||||
${relro} || echo "File not built with -Wl,-z,relro: ${binary}"
|
||||
${now} || echo "File not built with -Wl,-z,now: ${binary}"
|
||||
${gold} || echo "File not built with gold: ${binary}"
|
||||
}
|
||||
}
|
||||
|
||||
check_binaries()
|
||||
{
|
||||
local CTARGET="${CTARGET:-${CHOST}}"
|
||||
local readelf="${CTARGET}-readelf"
|
||||
local binary
|
||||
scanelf -y -B -F '%F' -R "${D}" | \
|
||||
while read binary ; do
|
||||
case "${binary}" in
|
||||
*.ko)
|
||||
;;
|
||||
${D}usr/lib/debug/*)
|
||||
;;
|
||||
*)
|
||||
check_compiler_flags "${binary}"
|
||||
check_linker_flags "${binary}"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
check_binaries
|
@ -1 +0,0 @@
|
||||
../platform/dev/host/image_to_live.sh
|
230
remote_access.sh
230
remote_access.sh
@ -1,230 +0,0 @@
|
||||
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Library for setting up remote access and running remote commands.
|
||||
|
||||
DEFAULT_PRIVATE_KEY="${GCLIENT_ROOT}/src/scripts/mod_for_test_scripts/\
|
||||
ssh_keys/testing_rsa"
|
||||
|
||||
DEFINE_string remote "" "remote hostname/IP of running Chromium OS instance"
|
||||
DEFINE_string private_key "$DEFAULT_PRIVATE_KEY" \
|
||||
"Private key of root account on remote host"
|
||||
DEFINE_integer ssh_port 22 \
|
||||
"SSH port of the remote machine running Chromium OS instance"
|
||||
DEFINE_integer ssh_connect_timeout 30 \
|
||||
"SSH connect timeout in seconds"
|
||||
DEFINE_integer ssh_connection_attempts 4 \
|
||||
"SSH connection attempts"
|
||||
|
||||
ssh_connect_settings() {
|
||||
if [[ -n "$SSH_CONNECT_SETTINGS" ]]; then
|
||||
# If connection settings were fixed in an environment variable, just return
|
||||
# those values.
|
||||
echo -n "$SSH_CONNECT_SETTINGS"
|
||||
else
|
||||
# Otherwise, return the default (or user overridden) settings.
|
||||
local settings=(
|
||||
"Protocol=2"
|
||||
"ConnectTimeout=${FLAGS_ssh_connect_timeout}"
|
||||
"ConnectionAttempts=${FLAGS_ssh_connection_attempts}"
|
||||
"ServerAliveInterval=10"
|
||||
"ServerAliveCountMax=3"
|
||||
"StrictHostKeyChecking=no"
|
||||
)
|
||||
printf -- '-o %s ' "${settings[@]}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Copies $1 to $2 on remote host
|
||||
remote_cp_to() {
|
||||
REMOTE_OUT=$(scp -P ${FLAGS_ssh_port} $(ssh_connect_settings) \
|
||||
-o UserKnownHostsFile=$TMP_KNOWN_HOSTS -i $TMP_PRIVATE_KEY $1 \
|
||||
root@$FLAGS_remote:$2)
|
||||
return ${PIPESTATUS[0]}
|
||||
}
|
||||
|
||||
# Raw rsync access to the remote
|
||||
# Use like: remote_rsync_raw -a /path/from/ root@${FLAGS_remote}:/path/to/
|
||||
remote_rsync_raw() {
|
||||
rsync -e "ssh -p ${FLAGS_ssh_port} $(ssh_connect_settings) \
|
||||
-o UserKnownHostsFile=$TMP_KNOWN_HOSTS -i $TMP_PRIVATE_KEY" \
|
||||
"$@"
|
||||
}
|
||||
|
||||
# Copies a list of remote files specified in file $1 to local location
|
||||
# $2. Directory paths in $1 are collapsed into $2.
|
||||
remote_rsync_from() {
|
||||
remote_rsync_raw --no-R --files-from="$1" root@${FLAGS_remote}:/ "$2"
|
||||
}
|
||||
|
||||
# Send a directory from $1 to $2 on remote host
|
||||
#
|
||||
# Tries to use rsync -a but will fall back to tar if the remote doesn't
|
||||
# have rsync.
|
||||
#
|
||||
# Use like: remote_send_to /build/board/lib/modules/ /lib/modules/
|
||||
remote_send_to() {
|
||||
if [ ! -d "$1" ]; then
|
||||
die "$1 must be a directory"
|
||||
fi
|
||||
|
||||
if remote_sh rsync --version >/dev/null 2>&1; then
|
||||
remote_rsync_raw -a "$1/" root@${FLAGS_remote}:"$2/"
|
||||
else
|
||||
tar -C "$1" -cz . | remote_sh tar -C "$2" -xz
|
||||
fi
|
||||
}
|
||||
|
||||
_remote_sh() {
|
||||
REMOTE_OUT=$(ssh -p ${FLAGS_ssh_port} $(ssh_connect_settings) \
|
||||
-o UserKnownHostsFile=$TMP_KNOWN_HOSTS -i $TMP_PRIVATE_KEY \
|
||||
root@$FLAGS_remote "$@")
|
||||
return ${PIPESTATUS[0]}
|
||||
}
|
||||
|
||||
# Wrapper for ssh that runs the commmand given by the args on the remote host
|
||||
# If an ssh error occurs, re-runs the ssh command.
|
||||
remote_sh() {
|
||||
local ssh_status=0
|
||||
_remote_sh "$@" || ssh_status=$?
|
||||
# 255 indicates an ssh error.
|
||||
if [ ${ssh_status} -eq 255 ]; then
|
||||
_remote_sh "$@"
|
||||
else
|
||||
return ${ssh_status}
|
||||
fi
|
||||
}
|
||||
|
||||
remote_sh_raw() {
|
||||
ssh -p ${FLAGS_ssh_port} $(ssh_connect_settings) \
|
||||
-o UserKnownHostsFile=$TMP_KNOWN_HOSTS -i $TMP_PRIVATE_KEY \
|
||||
$EXTRA_REMOTE_SH_ARGS root@$FLAGS_remote "$@"
|
||||
return $?
|
||||
}
|
||||
|
||||
remote_sh_allow_changed_host_key() {
|
||||
rm -f $TMP_KNOWN_HOSTS
|
||||
remote_sh "$@"
|
||||
}
|
||||
|
||||
set_up_remote_access() {
|
||||
cp $FLAGS_private_key $TMP_PRIVATE_KEY
|
||||
chmod 0400 $TMP_PRIVATE_KEY
|
||||
|
||||
# Verify the client is reachable before continuing
|
||||
local output
|
||||
local status=0
|
||||
if output=$(remote_sh -n "true" 2>&1); then
|
||||
:
|
||||
else
|
||||
status=$?
|
||||
echo "Could not initiate first contact with remote host"
|
||||
echo "$output"
|
||||
fi
|
||||
return $status
|
||||
}
|
||||
|
||||
# Ask the target what board it is
|
||||
learn_board() {
|
||||
[ -n "${FLAGS_board}" ] && return
|
||||
remote_sh -n grep COREOS_RELEASE_BOARD /etc/lsb-release
|
||||
FLAGS_board=$(echo "${REMOTE_OUT}" | cut -d '=' -f 2)
|
||||
if [ -z "${FLAGS_board}" ]; then
|
||||
error "Board required"
|
||||
exit 1
|
||||
fi
|
||||
info "Target reports board is ${FLAGS_board}"
|
||||
}
|
||||
|
||||
learn_arch() {
|
||||
[ -n "${FLAGS_arch}" ] && return
|
||||
remote_sh uname -m
|
||||
FLAGS_arch=$(echo "${REMOTE_OUT}" | sed -e s/armv7l/arm/ -e s/i686/x86/ )
|
||||
if [ -z "${FLAGS_arch}" ]; then
|
||||
error "Arch required"
|
||||
exit 1
|
||||
fi
|
||||
info "Target reports arch is ${FLAGS_arch}"
|
||||
}
|
||||
|
||||
# Checks whether a remote device has rebooted successfully.
|
||||
#
|
||||
# This uses a rapidly-retried SSH connection, which will wait for at most
|
||||
# about ten seconds. If the network returns an error (e.g. host unreachable)
|
||||
# the actual delay may be shorter.
|
||||
#
|
||||
# Return values:
|
||||
# 0: The device has rebooted successfully
|
||||
# 1: The device has not yet rebooted
|
||||
# 255: Unable to communicate with the device
|
||||
_check_if_rebooted() {
|
||||
(
|
||||
# In my tests SSH seems to be waiting rather longer than would be expected
|
||||
# from these parameters. These values produce a ~10 second wait.
|
||||
# (in a subshell to avoid clobbering the global settings)
|
||||
SSH_CONNECT_SETTINGS="$(sed \
|
||||
-e 's/\(ConnectTimeout\)=[0-9]*/\1=2/' \
|
||||
-e 's/\(ConnectionAttempts\)=[0-9]*/\1=2/' \
|
||||
<<<"$(ssh_connect_settings)")"
|
||||
remote_sh_allow_changed_host_key -q -- '[ ! -e /tmp/awaiting_reboot ]'
|
||||
)
|
||||
}
|
||||
|
||||
# Triggers a reboot on a remote device and waits for it to complete.
|
||||
#
|
||||
# This function will not return until the SSH server on the remote device
|
||||
# is available after the reboot.
|
||||
#
|
||||
remote_reboot() {
|
||||
info "Rebooting ${FLAGS_remote}..."
|
||||
remote_sh "touch /tmp/awaiting_reboot; reboot"
|
||||
local start_time=${SECONDS}
|
||||
|
||||
# Wait for five seconds before we start polling
|
||||
sleep 5
|
||||
|
||||
# Add a hard timeout of 5 minutes before giving up.
|
||||
local timeout=300
|
||||
local timeout_expiry=$(( start_time + timeout ))
|
||||
while [ ${SECONDS} -lt ${timeout_expiry} ]; do
|
||||
# Used to throttle the loop -- see step_remaining_time at the bottom.
|
||||
local step_start_time=${SECONDS}
|
||||
|
||||
local status=0
|
||||
_check_if_rebooted || status=$?
|
||||
|
||||
local elapsed=$(( SECONDS - start_time ))
|
||||
case ${status} in
|
||||
0) printf ' %4ds: reboot complete\n' ${elapsed} >&2 ; return 0 ;;
|
||||
1) printf ' %4ds: device has not yet shut down\n' ${elapsed} >&2 ;;
|
||||
255) printf ' %4ds: can not connect to device\n' ${elapsed} >&2 ;;
|
||||
*) die " internal error" ;;
|
||||
esac
|
||||
|
||||
# To keep the loop from spinning too fast, delay until it has taken at
|
||||
# least five seconds. When we are actively trying SSH connections this
|
||||
# should never happen.
|
||||
local step_remaining_time=$(( step_start_time + 5 - SECONDS ))
|
||||
if [ ${step_remaining_time} -gt 0 ]; then
|
||||
sleep ${step_remaining_time}
|
||||
fi
|
||||
done
|
||||
die "Reboot has not completed after ${timeout} seconds; giving up."
|
||||
}
|
||||
|
||||
# Called by clients before exiting.
|
||||
# Part of the remote_access.sh interface but now empty.
|
||||
cleanup_remote_access() {
|
||||
true
|
||||
}
|
||||
|
||||
remote_access_init() {
|
||||
TMP_PRIVATE_KEY=$TMP/private_key
|
||||
TMP_KNOWN_HOSTS=$TMP/known_hosts
|
||||
if [ -z "$FLAGS_remote" ]; then
|
||||
echo "Please specify --remote=<IP-or-hostname> of the Chromium OS instance"
|
||||
exit 1
|
||||
fi
|
||||
set_up_remote_access
|
||||
}
|
@ -1 +0,0 @@
|
||||
../platform/crostestutils/run_remote_tests.sh
|
@ -216,7 +216,7 @@ fi
|
||||
|
||||
info "Configuring portage in ${BOARD_ROOT}"
|
||||
cmds=(
|
||||
"mkdir -p '${BOARD_ROOT}' '${BOARD_PROFILE}' '${BOARD_ETC}/portage/hooks'"
|
||||
"mkdir -p '${BOARD_ROOT}' '${BOARD_PROFILE}'"
|
||||
"ROOT='${BOARD_ROOT}' eselect profile set --force '${PORTAGE_PROFILE}'"
|
||||
)
|
||||
|
||||
@ -231,9 +231,6 @@ cmds+=(
|
||||
'${BOARD_ETC}/make.conf.common'"
|
||||
"touch '${BOARD_ROOT}/etc/make.conf.user'"
|
||||
)
|
||||
for d in "${SCRIPTS_DIR}"/hooks/*; do
|
||||
cmds+=( "ln -sfT '${d}' '${BOARD_ROOT}/etc/portage/hooks/${d##*/}'" )
|
||||
done
|
||||
sudo_multi "${cmds[@]}"
|
||||
|
||||
sudo_clobber "${BOARD_SETUP}" <<EOF
|
||||
|
@ -1,50 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# This should be able to run outside the chroot on a standard Ubuntu system.
|
||||
|
||||
BINFILE="$1"
|
||||
|
||||
if [ -z "$BINFILE" ]; then
|
||||
echo "usage: $0 .../path/to/file.bin"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
parted "$BINFILE" unit s print | awk -v BF="$BINFILE" '
|
||||
/KERN-|ROOT-/ {
|
||||
# Common things
|
||||
printf "Partition " $1 " (" $NF "): "
|
||||
start=substr($2, 0, length($2) - 1) # strip trailing "s"
|
||||
}
|
||||
|
||||
/KERN-/ {
|
||||
cnt=substr($4, 0, length($4) - 1)
|
||||
system("dd if=\"" BF "\" bs=512 skip=" start " count=" cnt \
|
||||
" 2>/dev/null | openssl dgst -sha256 -binary | openssl base64")
|
||||
}
|
||||
|
||||
/ROOT-/ {
|
||||
# we have rootfs. find the filesystem size
|
||||
"mktemp" | getline tmpfile
|
||||
close("mktemp")
|
||||
system("dd if=" BF " bs=512 skip=" start \
|
||||
" count=400 of=" tmpfile " 2>/dev/null") # copy superblock
|
||||
blkcnt = 0
|
||||
cmd = "dumpe2fs " tmpfile " 2>/dev/null | grep \"Block count\" | \
|
||||
sed \"s/[^0-9]*//\""
|
||||
cmd | getline blkcnt
|
||||
close(cmd)
|
||||
system("rm -f " tmpfile)
|
||||
if (blkcnt > 0) {
|
||||
blkcnt *= 8 # 4096 byte blocks -> 512 byte sectors
|
||||
system("dd if=\"" BF "\" bs=512 skip=" start " count=" blkcnt \
|
||||
" 2>/dev/null | openssl dgst -sha256 -binary | openssl base64")
|
||||
} else {
|
||||
print "invalid filesystem"
|
||||
}
|
||||
}
|
||||
|
||||
'
|
35
ssh_test.sh
35
ssh_test.sh
@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Run remote access test to ensure ssh access to a host is working. Exits with
|
||||
# a code of 0 if successful and non-zero otherwise. Used by test infrastructure
|
||||
# scripts.
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
. "${SCRIPT_ROOT}/remote_access.sh" || exit 1
|
||||
|
||||
cleanup() {
|
||||
cleanup_remote_access
|
||||
rm -rf "${TMP}"
|
||||
}
|
||||
|
||||
main() {
|
||||
cd "${SCRIPTS_DIR}"
|
||||
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
|
||||
switch_to_strict_mode
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
TMP=$(mktemp -d /tmp/ssh_test.XXXX)
|
||||
|
||||
remote_access_init
|
||||
}
|
||||
|
||||
main $@
|
240
upload_symbols
240
upload_symbols
@ -1,240 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Script to upload all debug symbols required for crash reporting
|
||||
# purposes. This script need only be used to upload release builds
|
||||
# symbols or to debug crashes on non-release builds (in which case try
|
||||
# to only upload the symbols for those executables involved).
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
# Script must be run inside the chroot if not in "testing" mode.
|
||||
if [[ "$1" != "--testing" ]]; then
|
||||
restart_in_chroot_if_needed "$@"
|
||||
fi
|
||||
|
||||
# Flags
|
||||
DEFINE_string board "$DEFAULT_BOARD" "The board to build packages for."
|
||||
DEFINE_string breakpad_root "" "Root directory for breakpad symbols."
|
||||
DEFINE_boolean official_build ${FLAGS_FALSE} "Point to official symbol server."
|
||||
DEFINE_boolean regenerate ${FLAGS_FALSE} "Regenerate all symbols."
|
||||
# Default of 290M is relative to current 300M limit the Crash server enforces.
|
||||
DEFINE_integer strip_cfi 290000000 "Strip CFI data for files above this size."
|
||||
DEFINE_boolean testing ${FLAGS_FALSE} \
|
||||
"Run in testing mode (should be first argument)."
|
||||
DEFINE_boolean verbose ${FLAGS_FALSE} "Be verbose."
|
||||
DEFINE_boolean yes ${FLAGS_FALSE} "Answer yes to all prompts."
|
||||
|
||||
# Number of seconds to wait before retrying an upload. The delay will double
|
||||
# for each subsequent retry of the same symbol file.
|
||||
INITIAL_RETRY_DELAY=1
|
||||
# Allow up to 7 attempts to upload a symbol file (total delay may be
|
||||
# 1+2+4+8+16+32=63 seconds).
|
||||
MAX_RETRIES=6
|
||||
# Number of total errors, ${TOTAL_ERROR_COUNT}, before retries are no longer
|
||||
# attempted. This is used to avoid lots of errors causing unreasonable delays.
|
||||
MAX_TOTAL_ERRORS_FOR_RETRY=3 # don't bother retrying after 3 errors
|
||||
|
||||
# Testing parameters. These are only relevant if the "--testing" command-line
|
||||
# option is passed.
|
||||
# Specifies how many attempts should pretend to give an error before
|
||||
# succeeding. NOTE: If this number is greater than ${TEST_MAX_RETRIES}, then
|
||||
# it will never succeed.
|
||||
TEST_ERRORS_FOR_THIS_MANY_ATTEMPTS=3
|
||||
# Overrides ${MAX_RETRIES} in "testing" mode.
|
||||
TEST_MAX_RETRIES=2
|
||||
# Overrides ${MAX_TOTAL_ERRORS_FOR_RETRY} in "testing" mode.
|
||||
TEST_MAX_TOTAL_ERRORS_FOR_RETRY=2
|
||||
|
||||
SYM_UPLOAD="sym_upload"
|
||||
|
||||
TOTAL_ERROR_COUNT=0
|
||||
|
||||
OUT_DIR=$(mktemp -d "/tmp/err.XXXX")
|
||||
|
||||
cleanup() {
|
||||
rm -rf "${OUT_DIR}"
|
||||
}
|
||||
|
||||
really_upload() {
|
||||
if [ ${FLAGS_yes} -eq ${FLAGS_TRUE} ]; then
|
||||
return 0
|
||||
fi
|
||||
echo "Uploading symbols for an entire Chromium OS build is really only "
|
||||
echo "necessary for release builds and in a few cases for developers "
|
||||
echo "to debug problems. It will take considerable time to run. For "
|
||||
echo "developer debugging purposes, consider instead passing specific files "
|
||||
echo "to upload."
|
||||
read -p "Are you sure you want to upload all build symbols (y/N)? " SURE
|
||||
SURE="${SURE:0:1}" # Get just the first character
|
||||
if [ "${SURE}" != "y" ]; then
|
||||
echo "Ok, better safe than sorry."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Upload the given symbol file to given URL.
|
||||
upload_file() {
|
||||
local symbol_file="$1"
|
||||
local upload_url="$2"
|
||||
local upload_file="${symbol_file}"
|
||||
# If the symbols size is too big, strip out the call frame info. The CFI is
|
||||
# unnecessary for 32b x86 targets where the frame pointer is used (as all of
|
||||
# ours have) and it accounts for over half the size of the symbols uploaded.
|
||||
if [ ${FLAGS_strip_cfi} -ne 0 ]; then
|
||||
local symbol_size="$(stat -c%s ${symbol_file})"
|
||||
if [ ${symbol_size} -gt ${FLAGS_strip_cfi} ]; then
|
||||
if [ ${FLAGS_verbose} -eq ${FLAGS_TRUE} ]; then
|
||||
warn "Stripping CFI for ${symbol_file} due to size ${symbol_size} > \
|
||||
${FLAGS_strip_cfi}."
|
||||
fi
|
||||
upload_file="${OUT_DIR}/stripped.sym"
|
||||
sed '/^STACK CFI/d' < "${symbol_file}" > "${upload_file}"
|
||||
fi
|
||||
fi
|
||||
if [ ${FLAGS_verbose} -eq ${FLAGS_TRUE} ]; then
|
||||
info "Uploading ${symbol_file}"
|
||||
fi
|
||||
local upload_size="$(stat -c%s ${upload_file})"
|
||||
if [ ${upload_size} -gt ${FLAGS_strip_cfi} ]; then
|
||||
# Emit an annotation in order to flag the current step in buildbots.
|
||||
# NOTE: Must be on a line by itself.
|
||||
echo "@@@STEP_WARNINGS@@@" >&2
|
||||
error "Upload file ${upload_file} is awfully large, risking rejection by \
|
||||
symbol server (${upload_size} > ${FLAGS_strip_cfi})"
|
||||
let ++TOTAL_ERROR_COUNT
|
||||
fi
|
||||
|
||||
# Upload the symbol file, allowing for ${MAX_RETRIES} number of retries
|
||||
# before giving an error. However, don't retry if the total errors have
|
||||
# reached ${MAX_TOTAL_ERRORS_FOR_RETRY}.
|
||||
local success=0
|
||||
local attempts=0
|
||||
local retry_delay=${INITIAL_RETRY_DELAY}
|
||||
while [ ${attempts} -le ${MAX_RETRIES} ]; do
|
||||
if [ ${attempts} -gt 0 ]; then
|
||||
if [ ${TOTAL_ERROR_COUNT} -ge ${MAX_TOTAL_ERRORS_FOR_RETRY} ]; then
|
||||
warn "Not retrying to upload symbols in ${symbol_file} \
|
||||
due to too many total errors"
|
||||
break
|
||||
fi
|
||||
warn "Retry #${attempts} to upload symbols in ${symbol_file} \
|
||||
(sleeping ${retry_delay} seconds)"
|
||||
sleep "${retry_delay}"
|
||||
let retry_delay=retry_delay*2
|
||||
fi
|
||||
# In testing mode show command that would be run.
|
||||
if [ ${FLAGS_testing} -eq ${FLAGS_TRUE} ]; then
|
||||
echo "TEST: ${SYM_UPLOAD}" "${upload_file}" "${upload_url}"
|
||||
fi
|
||||
# Run the sym_upload command, redirecting its output to files so we can
|
||||
# check them.
|
||||
{
|
||||
if [ ${FLAGS_testing} -eq ${FLAGS_FALSE} ]; then
|
||||
"${SYM_UPLOAD}" "${upload_file}" "${upload_url}"
|
||||
elif [ ${attempts} -lt ${TEST_ERRORS_FOR_THIS_MANY_ATTEMPTS} ]; then
|
||||
# Pretend to fail with an error message.
|
||||
(echo "INFO: Testing info message";
|
||||
echo "ERROR: Testing error message" >&2;
|
||||
exit 1)
|
||||
else
|
||||
# Pretend to succeed.
|
||||
(echo "Successfully sent the symbol file.")
|
||||
fi
|
||||
} > "${OUT_DIR}/stdout" 2> "${OUT_DIR}/stderr"
|
||||
# Check if sym_upload command succeeded.
|
||||
if grep -q "Successfully sent the symbol file." "${OUT_DIR}/stdout"; then
|
||||
success=1
|
||||
break
|
||||
fi
|
||||
let ++attempts
|
||||
done
|
||||
if [ ${success} -ne 1 ]; then
|
||||
error "Unable to upload symbols in ${symbol_file}:"
|
||||
cat "${OUT_DIR}/stderr" >&2
|
||||
let ++TOTAL_ERROR_COUNT
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ ${FLAGS_verbose} -eq ${FLAGS_TRUE} ]; then
|
||||
size=$(wc -c "${upload_file}" | cut -d' ' -f1)
|
||||
info "Successfully uploaded ${size}B."
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
main() {
|
||||
trap cleanup EXIT
|
||||
|
||||
# Parse command line
|
||||
FLAGS_HELP="usage: $0 [flags] [<files...>]"
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
|
||||
if [ ${FLAGS_testing} -eq ${FLAGS_TRUE} ]; then
|
||||
info "Running in testing mode:"
|
||||
info " MAX_RETRIES=${TEST_ERRORS_FOR_THIS_MANY_ATTEMPTS}"
|
||||
MAX_RETRIES=${TEST_MAX_RETRIES}
|
||||
info " MAX_TOTAL_ERRORS_FOR_RETRY=${TEST_MAX_TOTAL_ERRORS_FOR_RETRY}"
|
||||
MAX_TOTAL_ERRORS_FOR_RETRY=${TEST_MAX_TOTAL_ERRORS_FOR_RETRY}
|
||||
fi
|
||||
|
||||
[ -n "$FLAGS_board" ] || die_notrace "--board is required."
|
||||
|
||||
SYSROOT="/build/${FLAGS_board}"
|
||||
|
||||
local upload_url=""
|
||||
if [ $FLAGS_official_build -eq $FLAGS_TRUE ]; then
|
||||
upload_url="http://clients2.google.com/cr/symbol"
|
||||
else
|
||||
upload_url="http://clients2.google.com/cr/staging_symbol"
|
||||
warn "This is an unofficial build, uploading to staging server."
|
||||
fi
|
||||
info "Uploading symbols to ${upload_url} from ${SYSROOT}."
|
||||
|
||||
DEFAULT_BREAKPAD_ROOT="${SYSROOT}/usr/lib/debug/breakpad"
|
||||
if [ -z "${FLAGS_breakpad_root}" ]; then
|
||||
FLAGS_breakpad_root="${DEFAULT_BREAKPAD_ROOT}"
|
||||
else
|
||||
if [ ${FLAGS_regenerate} -eq ${FLAGS_TRUE} ]; then
|
||||
warn "Assuming --noregenerate when --breakpad_root is specified"
|
||||
FLAGS_regenerate=${FLAGS_FALSE}
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "${FLAGS_ARGV}" ]; then
|
||||
if [ ${FLAGS_regenerate} -eq ${FLAGS_TRUE} ]; then
|
||||
really_upload || exit 1
|
||||
info "Clearing ${DEFAULT_BREAKPAD_ROOT}"
|
||||
sudo rm -rf "${DEFAULT_BREAKPAD_ROOT}"
|
||||
info "Generating all breakpad symbol files."
|
||||
local verbosity=""
|
||||
local generate_script="${SCRIPTS_DIR}/cros_generate_breakpad_symbols"
|
||||
[ ${FLAGS_verbose} -eq ${FLAGS_TRUE} ] && verbosity="--verbose"
|
||||
if ! "${generate_script}" --board=${FLAGS_board} ${verbosity}; then
|
||||
error "Some errors while generating symbols; uploading anyway"
|
||||
let ++TOTAL_ERROR_COUNT
|
||||
fi
|
||||
fi
|
||||
|
||||
info "Uploading all breakpad symbol files."
|
||||
for sym_file in $(find "${FLAGS_breakpad_root}" -name \*.sym); do
|
||||
# sleep for 200ms to avoid DoS'ing symbol server (crosbug.com/26596)
|
||||
sleep .2
|
||||
upload_file "${sym_file}" "${upload_url}"
|
||||
done
|
||||
else
|
||||
error "Unexpected args ${FLAGS_ARGV}"
|
||||
fi
|
||||
|
||||
if [ ${TOTAL_ERROR_COUNT} -ne 0 ]; then
|
||||
die "Encountered ${TOTAL_ERROR_COUNT} problem(s)"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
Loading…
x
Reference in New Issue
Block a user