mirror of
https://github.com/flatcar/scripts.git
synced 2025-09-24 15:11:19 +02:00
Merge branch 'master' of ssh://gitrw.chromium.org:9222/crosutils
This commit is contained in:
commit
13f82d8da1
@ -13,12 +13,13 @@ sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
|
||||
from cros_build_lib import RunCommand, Info, Warning, ReinterpretPathForChroot
|
||||
|
||||
_KVM_PID_FILE = '/tmp/harness_pid'
|
||||
_SCRIPTS_DIR = os.path.join(os.path.dirname(__file__), '..')
|
||||
_FULL_VDISK_SIZE = 6072
|
||||
_FULL_STATEFULFS_SIZE = 2048
|
||||
|
||||
# Globals to communicate options to unit tests.
|
||||
global base_image_path
|
||||
global board
|
||||
global remote
|
||||
global target_image_path
|
||||
|
||||
_VERIFY_SUITE = 'suite_Smoke'
|
||||
@ -26,6 +27,20 @@ _VERIFY_SUITE = 'suite_Smoke'
|
||||
class AUTest(object):
|
||||
"""Abstract interface that defines an Auto Update test."""
|
||||
|
||||
def setUp(self):
|
||||
unittest.TestCase.setUp(self)
|
||||
# Set these up as they are used often.
|
||||
self.crosutils = os.path.join(os.path.dirname(__file__), '..')
|
||||
self.crosutilsbin = os.path.join(os.path.dirname(__file__))
|
||||
|
||||
def GetStatefulChangeFlag(self, stateful_change):
|
||||
"""Returns the flag to pass to image_to_vm for the stateful change."""
|
||||
stateful_change_flag = ''
|
||||
if stateful_change:
|
||||
stateful_change_flag = '--stateful_update_flag=%s' % stateful_change
|
||||
|
||||
return stateful_change_flag
|
||||
|
||||
def PrepareBase(self):
|
||||
"""Prepares target with base_image_path."""
|
||||
pass
|
||||
@ -48,6 +63,11 @@ class AUTest(object):
|
||||
pass
|
||||
|
||||
def testFullUpdateKeepStateful(self):
|
||||
"""Tests if we can update normally.
|
||||
|
||||
This test checks that we can update by updating the stateful partition
|
||||
rather than wiping it.
|
||||
"""
|
||||
# Prepare and verify the base image has been prepared correctly.
|
||||
self.PrepareBase()
|
||||
self.VerifyImage()
|
||||
@ -62,7 +82,14 @@ class AUTest(object):
|
||||
self.UpdateImage(base_image_path)
|
||||
self.VerifyImage()
|
||||
|
||||
def testFullUpdateWipeStateful(self):
|
||||
# TODO(sosa): Re-enable once we have a good way of checking for version
|
||||
# compatability.
|
||||
def NotestFullUpdateWipeStateful(self):
|
||||
"""Tests if we can update after cleaning the stateful partition.
|
||||
|
||||
This test checks that we can update successfully after wiping the
|
||||
stateful partition.
|
||||
"""
|
||||
# Prepare and verify the base image has been prepared correctly.
|
||||
self.PrepareBase()
|
||||
self.VerifyImage()
|
||||
@ -78,6 +105,34 @@ class AUTest(object):
|
||||
self.VerifyImage()
|
||||
|
||||
|
||||
class RealAUTest(unittest.TestCase, AUTest):
|
||||
"""Test harness for updating real images."""
|
||||
|
||||
def setUp(self):
|
||||
AUTest.setUp(self)
|
||||
|
||||
def UpdateImage(self, image_path, stateful_change='old'):
|
||||
"""Updates a remote image using image_to_live.sh."""
|
||||
stateful_change_flag = self.GetStatefulChangeFlag(stateful_change)
|
||||
|
||||
RunCommand([
|
||||
'%s/image_to_live.sh' % self.crosutils,
|
||||
'--image=%s' % image_path,
|
||||
'--remote=%s' % remote,
|
||||
stateful_change_flag,
|
||||
'--verify',
|
||||
], enter_chroot=False)
|
||||
|
||||
|
||||
def VerifyImage(self):
|
||||
"""Verifies an image using run_remote_tests.sh with verification suite."""
|
||||
RunCommand([
|
||||
'%s/run_remote_tests.sh' % self.crosutils,
|
||||
'--remote=%s' % remote,
|
||||
_VERIFY_SUITE,
|
||||
], error_ok=False, enter_chroot=False)
|
||||
|
||||
|
||||
class VirtualAUTest(unittest.TestCase, AUTest):
|
||||
"""Test harness for updating virtual machines."""
|
||||
vm_image_path = None
|
||||
@ -95,7 +150,7 @@ class VirtualAUTest(unittest.TestCase, AUTest):
|
||||
|
||||
def setUp(self):
|
||||
"""Unit test overriden method. Is called before every test."""
|
||||
|
||||
AUTest.setUp(self)
|
||||
self._KillExistingVM(_KVM_PID_FILE)
|
||||
|
||||
def PrepareBase(self):
|
||||
@ -105,7 +160,7 @@ class VirtualAUTest(unittest.TestCase, AUTest):
|
||||
base_image_path))
|
||||
if not os.path.exists(self.vm_image_path):
|
||||
Info('Qemu image not found, creating one.')
|
||||
RunCommand(['%s/image_to_vm.sh' % _SCRIPTS_DIR,
|
||||
RunCommand(['%s/image_to_vm.sh' % self.crosutils,
|
||||
'--full',
|
||||
'--from %s' % ReinterpretPathForChroot(
|
||||
os.path.dirname(base_image_path)),
|
||||
@ -120,12 +175,9 @@ class VirtualAUTest(unittest.TestCase, AUTest):
|
||||
|
||||
def UpdateImage(self, image_path, stateful_change='old'):
|
||||
"""Updates VM image with image_path."""
|
||||
stateful_change_flag = self.GetStatefulChangeFlag(stateful_change)
|
||||
|
||||
stateful_change_flag = ''
|
||||
if stateful_change:
|
||||
stateful_change_flag = '--stateful_flags=%s' % stateful_change
|
||||
|
||||
RunCommand(['%s/cros_run_vm_update' % os.path.dirname(__file__),
|
||||
RunCommand(['%s/cros_run_vm_update' % self.crosutilsbin,
|
||||
'--update_image_path=%s' % image_path,
|
||||
'--vm_image_path=%s' % self.vm_image_path,
|
||||
'--snapshot',
|
||||
@ -136,18 +188,15 @@ class VirtualAUTest(unittest.TestCase, AUTest):
|
||||
|
||||
def VerifyImage(self):
|
||||
"""Runs vm smoke suite to verify image."""
|
||||
|
||||
# image_to_live already verifies lsb-release matching. This is just
|
||||
# for additional steps.
|
||||
|
||||
# TODO(sosa): Compare output with results of base image.
|
||||
RunCommand(['%s/cros_run_vm_test' % os.path.dirname(__file__),
|
||||
RunCommand(['%s/cros_run_vm_test' % self.crosutilsbin,
|
||||
'--image_path=%s' % self.vm_image_path,
|
||||
'--snapshot',
|
||||
'--persist',
|
||||
'--kvm_pid=%s' % _KVM_PID_FILE,
|
||||
'--test_case=%s' % _VERIFY_SUITE,
|
||||
], error_ok=True, enter_chroot=False)
|
||||
], error_ok=False, enter_chroot=False)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@ -155,9 +204,13 @@ if __name__ == '__main__':
|
||||
parser.add_option('-b', '--base_image',
|
||||
help='path to the base image.')
|
||||
parser.add_option('-t', '--target_image',
|
||||
help='path to the target image')
|
||||
help='path to the target image.')
|
||||
parser.add_option('-r', '--board',
|
||||
help='board for the images')
|
||||
help='board for the images.')
|
||||
parser.add_option('-p', '--type', default='vm',
|
||||
help='type of test to run: [vm, real]. Default: vm.')
|
||||
parser.add_option('-m', '--remote',
|
||||
help='Remote address for real test.')
|
||||
# Set the usage to include flags.
|
||||
parser.set_usage(parser.format_help())
|
||||
# Parse existing sys.argv so we can pass rest to unittest.main.
|
||||
@ -176,4 +229,21 @@ if __name__ == '__main__':
|
||||
if not board:
|
||||
parser.error('Need board to convert base image to vm.')
|
||||
|
||||
unittest.main()
|
||||
return_code = 0
|
||||
|
||||
# Only run the test harness we care about.
|
||||
if options.type == 'vm':
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(VirtualAUTest)
|
||||
return_code = unittest.TextTestRunner(verbosity=2).run(suite)
|
||||
elif options.type == 'real':
|
||||
if not options.remote:
|
||||
parser.error('Real tests require a remote test machine.')
|
||||
else:
|
||||
remote = options.remote
|
||||
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(RealAUTest)
|
||||
return_code = unittest.TextTestRunner(verbosity=2).run(suite)
|
||||
else:
|
||||
parser.error('Could not parse harness type %s.' % options.type)
|
||||
|
||||
sys.exit(return_code)
|
||||
|
@ -28,8 +28,9 @@ if [ $# -lt 2 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BOOT_DESC_FILE="${1}/boot.desc"
|
||||
IMAGE="${1}/${2}"
|
||||
IMAGE_DIR="$(readlink -f "${1}")"
|
||||
BOOT_DESC_FILE="${IMAGE_DIR}/boot.desc"
|
||||
IMAGE="${IMAGE_DIR}/${2}"
|
||||
shift
|
||||
shift
|
||||
FLAG_OVERRIDES="${@}"
|
||||
@ -99,6 +100,9 @@ DEFINE_string espfs_mountpoint "/tmp/espfs" \
|
||||
DEFINE_boolean use_dev_keys ${FLAGS_FALSE} \
|
||||
"Use developer keys for signing. (Default: false)"
|
||||
|
||||
# TODO(sosa): Remove once known images no longer use this in their config.
|
||||
DEFINE_string arm_extra_bootargs "" "DEPRECATED FLAG. Do not use."
|
||||
|
||||
# Parse the boot.desc and any overrides
|
||||
eval set -- "${BOOT_DESC} ${FLAG_OVERRIDES}"
|
||||
FLAGS "${@}" || exit 1
|
||||
@ -233,6 +237,16 @@ make_image_bootable() {
|
||||
-s "${FLAGS_statefulfs_mountpoint}"
|
||||
}
|
||||
|
||||
# Use default of current image location if the output dir doesn't exist.
|
||||
if [ ! -d ${FLAGS_output_dir} ]; then
|
||||
warn "Output dir not found, using ${IMAGE_DIR}."
|
||||
FLAGS_output_dir="${IMAGE_DIR}"
|
||||
FLAGS_rootfs_hash="${IMAGE_DIR}/rootfs.hash"
|
||||
FLAGS_rootfs_mountpoint="${IMAGE_DIR}/rootfs_dir"
|
||||
FLAGS_statefulfs_mountpoint="${IMAGE_DIR}/stateful_dir"
|
||||
FLAGS_espfs_mountpoint="${IMAGE_DIR}/esp"
|
||||
fi
|
||||
|
||||
# Create the directories if they don't exist.
|
||||
mkdir -p ${FLAGS_rootfs_mountpoint}
|
||||
mkdir -p ${FLAGS_statefulfs_mountpoint}
|
||||
|
@ -9,7 +9,7 @@
|
||||
. "$(dirname $0)/../common.sh"
|
||||
. "$(dirname $0)/../lib/cros_vm_lib.sh"
|
||||
|
||||
DEFINE_string stateful_flags "" "Flags to pass to stateful update." s
|
||||
DEFINE_string stateful_update_flag "" "Flags to pass to stateful update." s
|
||||
DEFINE_string update_image_path "" "Path of the image to update to." u
|
||||
DEFINE_string vm_image_path "" "Path of the VM image to update from." v
|
||||
|
||||
@ -30,7 +30,7 @@ start_kvm "${FLAGS_vm_image_path}"
|
||||
$(dirname $0)/../image_to_live.sh \
|
||||
--remote=${HOSTNAME} \
|
||||
--ssh_port=${FLAGS_ssh_port} \
|
||||
--stateful_update_flag=${stateful_flags} \
|
||||
--stateful_update_flag=${FLAGS_stateful_update_flag} \
|
||||
--verify \
|
||||
--image=$(readlink -f ${FLAGS_update_image_path})
|
||||
|
||||
|
195
bin/ctest.py
Executable file
195
bin/ctest.py
Executable file
@ -0,0 +1,195 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Wrapper for tests that are run on builders."""
|
||||
|
||||
import fileinput
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
import urllib
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
|
||||
from cros_build_lib import Info, RunCommand, ReinterpretPathForChroot
|
||||
|
||||
_IMAGE_TO_EXTRACT = 'chromiumos_test_image.bin'
|
||||
|
||||
|
||||
def ModifyBootDesc(download_folder, redirect_file=None):
|
||||
"""Modifies the boot description of a downloaded image to work with path.
|
||||
|
||||
The default boot.desc from another system is specific to the directory
|
||||
it was created in. This modifies the boot description to be compatiable
|
||||
with the download folder.
|
||||
|
||||
Args:
|
||||
download_folder: Absoulte path to the download folder.
|
||||
redirect_file: For testing. Where to copy new boot desc.
|
||||
"""
|
||||
boot_desc_path = os.path.join(download_folder, 'boot.desc')
|
||||
in_chroot_folder = ReinterpretPathForChroot(download_folder)
|
||||
|
||||
for line in fileinput.input(boot_desc_path, inplace=1):
|
||||
# Has to be done here to get changes to sys.stdout from fileinput.input.
|
||||
if not redirect_file:
|
||||
redirect_file = sys.stdout
|
||||
split_line = line.split('=')
|
||||
if len(split_line) > 1:
|
||||
var_part = split_line[0]
|
||||
potential_path = split_line[1].replace('"', '').strip()
|
||||
|
||||
if potential_path.startswith('/home') and not 'output_dir' in var_part:
|
||||
new_path = os.path.join(in_chroot_folder,
|
||||
os.path.basename(potential_path))
|
||||
new_line = '%s="%s"' % (var_part, new_path)
|
||||
Info('Replacing line %s with %s' % (line, new_line))
|
||||
redirect_file.write('%s\n' % new_line)
|
||||
continue
|
||||
elif 'output_dir' in var_part:
|
||||
# Special case for output_dir.
|
||||
new_line = '%s="%s"' % (var_part, in_chroot_folder)
|
||||
Info('Replacing line %s with %s' % (line, new_line))
|
||||
redirect_file.write('%s\n' % new_line)
|
||||
continue
|
||||
|
||||
# Line does not need to be modified.
|
||||
redirect_file.write(line)
|
||||
|
||||
fileinput.close()
|
||||
|
||||
|
||||
def GetLatestZipUrl(board, channel, latest_url_base, zip_server_base):
|
||||
"""Returns the url of the latest image zip for the given arguments.
|
||||
|
||||
Args:
|
||||
board: board for the image zip.
|
||||
channel: channel for the image zip.
|
||||
latest_url_base: base url for latest links.
|
||||
zip_server_base: base url for zipped images.
|
||||
"""
|
||||
# Grab the latest image info.
|
||||
latest_file_url = os.path.join(latest_url_base, channel,
|
||||
'LATEST-%s' % board)
|
||||
latest_image_file = urllib.urlopen(latest_file_url)
|
||||
latest_image = latest_image_file.read()
|
||||
latest_image_file.close()
|
||||
|
||||
# Convert bin.gz into zip.
|
||||
latest_image = latest_image.replace('.bin.gz', '.zip')
|
||||
version = latest_image.split('-')[1]
|
||||
zip_base = os.path.join(zip_server_base, channel, board)
|
||||
return os.path.join(zip_base, version, latest_image)
|
||||
|
||||
|
||||
def GrabZipAndExtractImage(zip_url, download_folder, image_name) :
|
||||
"""Downloads the zip and extracts the given image.
|
||||
|
||||
Doesn't re-download if matching version found already in download folder.
|
||||
Args:
|
||||
zip_url - url for the image.
|
||||
download_folder - download folder to store zip file and extracted images.
|
||||
image_name - name of the image to extract from the zip file.
|
||||
"""
|
||||
zip_path = os.path.join(download_folder, 'image.zip')
|
||||
versioned_url_path = os.path.join(download_folder, 'download_url')
|
||||
found_cached = False
|
||||
|
||||
if os.path.exists(versioned_url_path):
|
||||
fh = open(versioned_url_path)
|
||||
version_url = fh.read()
|
||||
fh.close()
|
||||
|
||||
if version_url == zip_url and os.path.exists(os.path.join(download_folder,
|
||||
image_name)):
|
||||
Info('Using cached %s' % image_name)
|
||||
found_cached = True
|
||||
|
||||
if not found_cached:
|
||||
Info('Downloading %s' % zip_url)
|
||||
RunCommand(['rm', '-rf', download_folder], print_cmd=False)
|
||||
os.mkdir(download_folder)
|
||||
urllib.urlretrieve(zip_url, zip_path)
|
||||
|
||||
# Using unzip because python implemented unzip in native python so
|
||||
# extraction is really slow.
|
||||
Info('Unzipping image %s' % image_name)
|
||||
RunCommand(['unzip', '-d', download_folder, zip_path],
|
||||
print_cmd=False, error_message='Failed to download %s' % zip_url)
|
||||
|
||||
ModifyBootDesc(download_folder)
|
||||
|
||||
# Put url in version file so we don't have to do this every time.
|
||||
fh = open(versioned_url_path, 'w+')
|
||||
fh.write(zip_url)
|
||||
fh.close()
|
||||
|
||||
|
||||
def RunAUTestHarness(board, channel, latest_url_base, zip_server_base):
|
||||
"""Runs the auto update test harness.
|
||||
|
||||
The auto update test harness encapsulates testing the auto-update mechanism
|
||||
for the latest image against the latest official image from the channel. This
|
||||
also tests images with suite_Smoke (built-in as part of its verification
|
||||
process).
|
||||
|
||||
Args:
|
||||
board: the board for the latest image.
|
||||
channel: the channel to run the au test harness against.
|
||||
latest_url_base: base url for getting latest links.
|
||||
zip_server_base: base url for zipped images.
|
||||
"""
|
||||
crosutils_root = os.path.join(os.path.dirname(__file__), '..')
|
||||
download_folder = os.path.abspath('latest_download')
|
||||
zip_url = GetLatestZipUrl(board, channel, latest_url_base, zip_server_base)
|
||||
GrabZipAndExtractImage(zip_url, download_folder, _IMAGE_TO_EXTRACT)
|
||||
|
||||
# Tests go here.
|
||||
latest_image = RunCommand(['./get_latest_image.sh', '--board=%s' % board],
|
||||
cwd=crosutils_root, redirect_stdout=True,
|
||||
print_cmd=True)
|
||||
|
||||
RunCommand(['bin/cros_au_test_harness',
|
||||
'--base_image=%s' % os.path.join(download_folder,
|
||||
_IMAGE_TO_EXTRACT),
|
||||
'--target_image=%s' % latest_image,
|
||||
'--board=%s' % board], cwd=crosutils_root)
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-b', '--board',
|
||||
help='board for the image to compare against.')
|
||||
parser.add_option('-c', '--channel',
|
||||
help='channel for the image to compare against.')
|
||||
parser.add_option('-l', '--latestbase',
|
||||
help='Base url for latest links.')
|
||||
parser.add_option('-z', '--zipbase',
|
||||
help='Base url for hosted images.')
|
||||
# Set the usage to include flags.
|
||||
parser.set_usage(parser.format_help())
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if args:
|
||||
parser.error('Extra args found %s.' % args)
|
||||
|
||||
if not options.board:
|
||||
parser.error('Need board for image to compare against.')
|
||||
|
||||
if not options.channel:
|
||||
parser.error('Need channel for image to compare against.')
|
||||
|
||||
if not options.latestbase:
|
||||
parser.error('Need latest url base to get images.')
|
||||
|
||||
if not options.zipbase:
|
||||
parser.error('Need zip url base to get images.')
|
||||
|
||||
RunAUTestHarness(options.board, options.channel, options.latestbase,
|
||||
options.zipbase)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
165
bin/ctest_unittest.py
Executable file
165
bin/ctest_unittest.py
Executable file
@ -0,0 +1,165 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Unit tests for ctest."""
|
||||
|
||||
import ctest
|
||||
import mox
|
||||
import os
|
||||
import unittest
|
||||
import urllib
|
||||
|
||||
_TEST_BOOT_DESC = """
|
||||
--arch="x86"
|
||||
--output_dir="/home/chrome-bot/0.8.70.5-a1"
|
||||
--espfs_mountpoint="/home/chrome-bot/0.8.70.5-a1/esp"
|
||||
--enable_rootfs_verification
|
||||
"""
|
||||
|
||||
class CrosTestTest(mox.MoxTestBase):
|
||||
"""Test class for CTest."""
|
||||
|
||||
def setUp(self):
|
||||
mox.MoxTestBase.setUp(self)
|
||||
self.board = 'test-board'
|
||||
self.channel = 'test-channel'
|
||||
self.version = '1.2.3.4.5'
|
||||
self.revision = '7ghfa9999-12345'
|
||||
self.image_name = 'TestOS-%s-%s' % (self.version, self.revision)
|
||||
self.download_folder = 'test_folder'
|
||||
self.latestbase = 'http://test-latest/TestOS'
|
||||
self.zipbase = 'http://test-zips/archive/TestOS'
|
||||
self.image_url = '%s/%s/%s/%s/%s.zip' % (self.zipbase, self.channel,
|
||||
self.board, self.version,
|
||||
self.image_name)
|
||||
|
||||
def testModifyBootDesc(self):
|
||||
"""Tests to make sure we correctly modify a boot desc."""
|
||||
in_chroot_path = ctest.ReinterpretPathForChroot(os.path.abspath(
|
||||
self.download_folder))
|
||||
self.mox.StubOutWithMock(__builtins__, 'open')
|
||||
self.mox.StubOutWithMock(ctest.fileinput, 'input')
|
||||
m_file = self.mox.CreateMock(file)
|
||||
|
||||
mock_file = _TEST_BOOT_DESC.splitlines(True)
|
||||
ctest.fileinput.input('%s/%s' % (os.path.abspath(self.download_folder),
|
||||
'boot.desc'),
|
||||
inplace=1).AndReturn(mock_file)
|
||||
|
||||
m_file.write('\n')
|
||||
m_file.write(' --arch="x86"\n')
|
||||
m_file.write(' --output_dir="%s"\n' % in_chroot_path)
|
||||
m_file.write(' --espfs_mountpoint="%s/%s"\n' % (in_chroot_path, 'esp'))
|
||||
m_file.write(' --enable_rootfs_verification\n')
|
||||
|
||||
self.mox.ReplayAll()
|
||||
ctest.ModifyBootDesc(os.path.abspath(self.download_folder), m_file)
|
||||
self.mox.VerifyAll()
|
||||
|
||||
|
||||
def testGetLatestZipUrl(self):
|
||||
"""Test case that tests GetLatestZipUrl with test urls."""
|
||||
self.mox.StubOutWithMock(urllib, 'urlopen')
|
||||
m_file = self.mox.CreateMock(file)
|
||||
|
||||
urllib.urlopen('%s/%s/LATEST-%s' % (self.latestbase, self.channel,
|
||||
self.board)).AndReturn(m_file)
|
||||
m_file.read().AndReturn('%s.bin.gz' % self.image_name)
|
||||
m_file.close()
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertEquals(ctest.GetLatestZipUrl(self.board, self.channel,
|
||||
self.latestbase, self.zipbase),
|
||||
self.image_url)
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def testGrabZipAndExtractImageUseCached(self):
|
||||
"""Test case where cache holds our image."""
|
||||
self.mox.StubOutWithMock(os.path, 'exists')
|
||||
self.mox.StubOutWithMock(__builtins__, 'open')
|
||||
m_file = self.mox.CreateMock(file)
|
||||
|
||||
os.path.exists('%s/%s' % (
|
||||
self.download_folder, 'download_url')).AndReturn(True)
|
||||
|
||||
open('%s/%s' % (self.download_folder, 'download_url')).AndReturn(m_file)
|
||||
m_file.read().AndReturn(self.image_url)
|
||||
m_file.close()
|
||||
|
||||
os.path.exists('%s/%s' % (
|
||||
self.download_folder, ctest._IMAGE_TO_EXTRACT)).AndReturn(True)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
ctest.GrabZipAndExtractImage(self.image_url, self.download_folder,
|
||||
ctest._IMAGE_TO_EXTRACT)
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def CommonDownloadAndExtractImage(self):
|
||||
"""Common code to mock downloading image, unzipping it and setting url."""
|
||||
zip_path = os.path.join(self.download_folder, 'image.zip')
|
||||
m_file = self.mox.CreateMock(file)
|
||||
|
||||
ctest.RunCommand(['rm', '-rf', self.download_folder], print_cmd=False)
|
||||
os.mkdir(self.download_folder)
|
||||
urllib.urlretrieve(self.image_url, zip_path)
|
||||
ctest.RunCommand(['unzip', '-d', self.download_folder, zip_path],
|
||||
print_cmd=False, error_message=mox.IgnoreArg())
|
||||
|
||||
ctest.ModifyBootDesc(self.download_folder)
|
||||
|
||||
open('%s/%s' % (self.download_folder, 'download_url'),
|
||||
'w+').AndReturn(m_file)
|
||||
m_file.write(self.image_url)
|
||||
m_file.close()
|
||||
|
||||
self.mox.ReplayAll()
|
||||
ctest.GrabZipAndExtractImage(self.image_url, self.download_folder,
|
||||
ctest._IMAGE_TO_EXTRACT)
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def testGrabZipAndExtractImageNoCache(self):
|
||||
"""Test case where download_url doesn't exist."""
|
||||
self.mox.StubOutWithMock(os.path, 'exists')
|
||||
self.mox.StubOutWithMock(os, 'mkdir')
|
||||
self.mox.StubOutWithMock(__builtins__, 'open')
|
||||
self.mox.StubOutWithMock(ctest, 'RunCommand')
|
||||
self.mox.StubOutWithMock(urllib, 'urlretrieve')
|
||||
self.mox.StubOutWithMock(ctest, 'ModifyBootDesc')
|
||||
|
||||
m_file = self.mox.CreateMock(file)
|
||||
|
||||
os.path.exists('%s/%s' % (
|
||||
self.download_folder, 'download_url')).AndReturn(False)
|
||||
|
||||
self.CommonDownloadAndExtractImage()
|
||||
|
||||
|
||||
def testGrabZipAndExtractImageWrongCache(self):
|
||||
"""Test case where download_url exists but doesn't match our url."""
|
||||
self.mox.StubOutWithMock(os.path, 'exists')
|
||||
self.mox.StubOutWithMock(os, 'mkdir')
|
||||
self.mox.StubOutWithMock(__builtins__, 'open')
|
||||
self.mox.StubOutWithMock(ctest, 'RunCommand')
|
||||
self.mox.StubOutWithMock(urllib, 'urlretrieve')
|
||||
self.mox.StubOutWithMock(ctest, 'ModifyBootDesc')
|
||||
|
||||
m_file = self.mox.CreateMock(file)
|
||||
|
||||
os.path.exists('%s/%s' % (
|
||||
self.download_folder, 'download_url')).AndReturn(True)
|
||||
|
||||
open('%s/%s' % (self.download_folder, 'download_url')).AndReturn(m_file)
|
||||
m_file.read().AndReturn(self.image_url)
|
||||
m_file.close()
|
||||
|
||||
os.path.exists('%s/%s' % (
|
||||
self.download_folder, ctest._IMAGE_TO_EXTRACT)).AndReturn(False)
|
||||
|
||||
self.CommonDownloadAndExtractImage()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
10
build_image
10
build_image
@ -69,13 +69,13 @@ DEFINE_string usb_disk /dev/sdb3 \
|
||||
|
||||
DEFINE_boolean enable_rootfs_verification ${FLAGS_TRUE} \
|
||||
"Default all bootloaders to use kernel-based root fs integrity checking."
|
||||
DEFINE_integer verity_error_behavior 2 \
|
||||
"Kernel verified boot error behavior (0: I/O errors, 1: reboot, 2: nothing) \
|
||||
Default: 2"
|
||||
DEFINE_integer verity_error_behavior 1 \
|
||||
"Kernel verified boot error behavior (0: I/O errors, 1: panic, 2: nothing) \
|
||||
Default: 1"
|
||||
DEFINE_integer verity_depth 1 \
|
||||
"Kernel verified boot hash tree depth. Default: 1"
|
||||
DEFINE_integer verity_max_ios 1024 \
|
||||
"Number of outstanding I/O operations dm-verity caps at. Default: 1024"
|
||||
DEFINE_integer verity_max_ios -1 \
|
||||
"Number of outstanding I/O operations dm-verity caps at. Default: -1"
|
||||
DEFINE_string verity_algorithm "sha1" \
|
||||
"Cryptographic hash algorithm used for kernel vboot. Default : sha1"
|
||||
|
||||
|
@ -19,14 +19,14 @@ HOSTNAME=$(hostname)
|
||||
# Major/minor versions.
|
||||
# Primarily for product marketing.
|
||||
export CHROMEOS_VERSION_MAJOR=0
|
||||
export CHROMEOS_VERSION_MINOR=8
|
||||
export CHROMEOS_VERSION_MINOR=9
|
||||
|
||||
# Branch number.
|
||||
# Increment by 1 in a new release branch.
|
||||
# Increment by 2 in trunk after making a release branch.
|
||||
# Does not reset on a major/minor change (always increases).
|
||||
# (Trunk is always odd; branches are always even).
|
||||
export CHROMEOS_VERSION_BRANCH=75
|
||||
export CHROMEOS_VERSION_BRANCH=79
|
||||
|
||||
# Patch number.
|
||||
# Increment by 1 each release on a branch.
|
||||
|
@ -56,8 +56,12 @@ cleanup() {
|
||||
extract_partition_to_temp_file() {
|
||||
local filename="$1"
|
||||
local partition="$2"
|
||||
local temp_file=$(mktemp /tmp/generate_update_payload.XXXXXX)
|
||||
|
||||
local temp_file="$3"
|
||||
if [ -z "$temp_file" ]; then
|
||||
temp_file=$(mktemp /tmp/cros_generate_update_payload.XXXXXX)
|
||||
echo "$temp_file"
|
||||
fi
|
||||
|
||||
local offset=$(partoffset "${filename}" ${partition}) # 512-byte sectors
|
||||
local length=$(partsize "${filename}" ${partition}) # 512-byte sectors
|
||||
local bs=512
|
||||
@ -71,7 +75,6 @@ extract_partition_to_temp_file() {
|
||||
warn "partition offset or length not at 2MiB boundary"
|
||||
fi
|
||||
dd if="$filename" of="$temp_file" bs=$bs count="$length" skip="$offset"
|
||||
echo "$temp_file"
|
||||
}
|
||||
|
||||
patch_kernel() {
|
||||
@ -92,6 +95,25 @@ patch_kernel() {
|
||||
STATE_LOOP_DEV=""
|
||||
}
|
||||
|
||||
extract_kern_root() {
|
||||
local bin_file="$1"
|
||||
local kern_out="$2"
|
||||
local root_out="$3"
|
||||
|
||||
if [ -z "$kern_out" ]; then
|
||||
die "missing kernel output filename"
|
||||
fi
|
||||
if [ -z "$root_out" ]; then
|
||||
die "missing root output filename"
|
||||
fi
|
||||
|
||||
extract_partition_to_temp_file "$bin_file" 2 "$kern_out"
|
||||
if [ "$FLAGS_patch_kernel" -eq "$FLAGS_TRUE" ]; then
|
||||
patch_kernel "$bin_file" "$kern_out"
|
||||
fi
|
||||
extract_partition_to_temp_file "$bin_file" 3 "$root_out"
|
||||
}
|
||||
|
||||
DEFINE_string image "" "The image that should be sent to clients."
|
||||
DEFINE_string src_image "" "Optional: a source image. If specified, this makes\
|
||||
a delta update."
|
||||
@ -100,6 +122,8 @@ DEFINE_string output "" "Output file"
|
||||
DEFINE_boolean patch_kernel "$FLAGS_FALSE" "Whether or not to patch the kernel \
|
||||
with the patch from the stateful partition (default: false)"
|
||||
DEFINE_string private_key "" "Path to private key in .pem format."
|
||||
DEFINE_boolean extract "$FLAGS_FALSE" "If set, extract old/new kernel/rootfs \
|
||||
to [old|new]_[kern|root].dat. Useful for debugging (default: false)"
|
||||
|
||||
# Parse command line
|
||||
FLAGS "$@" || exit 1
|
||||
@ -114,6 +138,17 @@ fi
|
||||
|
||||
locate_gpt
|
||||
|
||||
if [ "$FLAGS_extract" -eq "$FLAGS_TRUE" ]; then
|
||||
if [ -n "$FLAGS_src_image" ]; then
|
||||
extract_kern_root "$FLAGS_src_image" old_kern.dat old_root.dat
|
||||
fi
|
||||
if [ -n "$FLAGS_image" ]; then
|
||||
extract_kern_root "$FLAGS_image" new_kern.dat new_root.dat
|
||||
fi
|
||||
echo Done extracting kernel/root
|
||||
exit 0
|
||||
fi
|
||||
|
||||
DELTA=$FLAGS_TRUE
|
||||
[ -n "$FLAGS_output" ] || die \
|
||||
"Error: you must specify an output filename with --output FILENAME"
|
||||
|
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
TEST_DIR="${ROOT_FS_DIR}/usr/local/autotest/site_tests/factory_WriteGBB"
|
||||
|
||||
pushd ${TEST_DIR} 1> /dev/null
|
||||
|
||||
GBB_FILE="gbb_${BOARD}*"
|
||||
FIRST_GBB_FILE=$(ls $GBB_FILE 2> /dev/null | head -1)
|
||||
if [ -e "${FIRST_GBB_FILE}" -o "${BOARD}" = "x86-generic" ]; then
|
||||
# Remove GBB files belonging to other boards
|
||||
ls gbb* 2> /dev/null | grep -v gbb_${BOARD} | xargs rm -f
|
||||
else
|
||||
echo "No GBB file found at: ${GBB_FILE}"
|
||||
fi
|
||||
|
||||
popd 1> /dev/null
|
@ -40,6 +40,7 @@ Basic operation:
|
||||
|
||||
import codecs
|
||||
import copy
|
||||
import errno
|
||||
import multiprocessing
|
||||
import os
|
||||
import Queue
|
||||
@ -1308,6 +1309,12 @@ def EmergeWorker(task_queue, job_queue, emerge, package_db):
|
||||
# Wait for a new item to show up on the queue. This is a blocking wait,
|
||||
# so if there's nothing to do, we just sit here.
|
||||
target = task_queue.get()
|
||||
if not target:
|
||||
# If target is None, this means that the main thread wants us to quit.
|
||||
# The other workers need to exit too, so we'll push the message back on
|
||||
# to the queue so they'll get it too.
|
||||
task_queue.put(target)
|
||||
return
|
||||
db_pkg = package_db[target]
|
||||
db_pkg.root_config = emerge.root_config
|
||||
install_list = [db_pkg]
|
||||
@ -1412,14 +1419,33 @@ class JobPrinter(object):
|
||||
|
||||
def PrintWorker(queue):
|
||||
"""A worker that prints stuff to the screen as requested."""
|
||||
SetupWorkerSignals()
|
||||
|
||||
def ExitHandler(signum, frame):
|
||||
# Switch to default signal handlers so that we'll die after two signals.
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
|
||||
# Don't exit on the first SIGINT / SIGTERM, because the parent worker will
|
||||
# handle it and tell us when we need to exit.
|
||||
signal.signal(signal.SIGINT, ExitHandler)
|
||||
signal.signal(signal.SIGTERM, ExitHandler)
|
||||
|
||||
# seek_locations is a map indicating the position we are at in each file.
|
||||
# It starts off empty, but is set by the various Print jobs as we go along
|
||||
# to indicate where we left off in each file.
|
||||
seek_locations = {}
|
||||
while True:
|
||||
job = queue.get()
|
||||
if job:
|
||||
job.Print(seek_locations)
|
||||
else:
|
||||
break
|
||||
try:
|
||||
job = queue.get()
|
||||
if job:
|
||||
job.Print(seek_locations)
|
||||
else:
|
||||
break
|
||||
except IOError as ex:
|
||||
if ex.errno == errno.EINTR:
|
||||
# Looks like we received a signal. Keep printing.
|
||||
continue
|
||||
raise
|
||||
|
||||
|
||||
class EmergeQueue(object):
|
||||
@ -1490,9 +1516,8 @@ class EmergeQueue(object):
|
||||
# Notify the user that we are exiting
|
||||
self._Print("Exiting on signal %s" % signum)
|
||||
|
||||
# Exit when print worker is done.
|
||||
self._print_queue.put(None)
|
||||
self._print_worker.join()
|
||||
# Kill child threads, then exit.
|
||||
self._Exit()
|
||||
sys.exit(1)
|
||||
|
||||
# Print out job status when we are killed
|
||||
@ -1558,6 +1583,17 @@ class EmergeQueue(object):
|
||||
self._Schedule(target)
|
||||
self._Print("Retrying emerge of %s." % target)
|
||||
|
||||
def _Exit(self):
|
||||
# Tell emerge workers to exit. They all exit when 'None' is pushed
|
||||
# to the queue.
|
||||
self._emerge_queue.put(None)
|
||||
self._pool.close()
|
||||
self._pool.join()
|
||||
|
||||
# Now that our workers are finished, we can kill the print queue.
|
||||
self._print_queue.put(None)
|
||||
self._print_worker.join()
|
||||
|
||||
def Run(self):
|
||||
"""Run through the scheduled ebuilds.
|
||||
|
||||
@ -1574,9 +1610,8 @@ class EmergeQueue(object):
|
||||
if self._retry_queue:
|
||||
self._Retry()
|
||||
else:
|
||||
# Tell the print worker we're done, and wait for it to exit.
|
||||
self._print_queue.put(None)
|
||||
self._print_worker.join()
|
||||
# Tell child threads to exit.
|
||||
self._Exit()
|
||||
|
||||
# The dependency map is helpful for debugging failures.
|
||||
PrintDepsMap(self._deps_map)
|
||||
@ -1637,9 +1672,9 @@ class EmergeQueue(object):
|
||||
# Print an update.
|
||||
self._Status()
|
||||
|
||||
# Tell the print worker we're done, and wait for it to exit.
|
||||
self._print_queue.put(None)
|
||||
self._print_worker.join()
|
||||
# Tell child threads to exit.
|
||||
self._Print("Merge complete")
|
||||
self._Exit()
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -227,7 +227,21 @@ function main() {
|
||||
fi
|
||||
echo ""
|
||||
echo_color "yellow" ">>> Running ${type} test " ${control_file}
|
||||
local control_file_name=$(basename "${control_file}")
|
||||
local short_name=$(basename $(dirname "${control_file}"))
|
||||
|
||||
# testName/control --> testName
|
||||
# testName/control.bvt --> testName.bvt
|
||||
# testName/control.regression --> testName.regression
|
||||
# testName/some_control --> testName.some_control
|
||||
if [[ "${control_file_name}" != control ]]; then
|
||||
if [[ "${control_file_name}" == control.* ]]; then
|
||||
short_name=${short_name}.${control_file_name/control./}
|
||||
else
|
||||
short_name=${short_name}.${control_file_name}
|
||||
fi
|
||||
fi
|
||||
|
||||
local results_dir_name="${short_name}"
|
||||
local results_dir="${TMP_INSIDE_CHROOT}/${results_dir_name}"
|
||||
rm -rf "${results_dir}"
|
||||
|
Loading…
x
Reference in New Issue
Block a user