crosutils:Remove dead scripts

Change-Id: I666296627fc87910903b0a4c3f9bcfd7362daa41

BUG=n0ne
TEST=Verify they are not in the developer workflow

Review URL: http://codereview.chromium.org/6287010
This commit is contained in:
Anush Elangovan 2011-01-22 12:13:15 -08:00
parent 60dabf1a79
commit f3e7025a75
10 changed files with 0 additions and 1510 deletions

View File

@ -1,100 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Load common constants. This should be the first executable line.
# The path to common.sh should be relative to your script's location.
. "$(dirname $0)/common.sh"
# Script must be run inside the chroot
restart_in_chroot_if_needed $*
DEFINE_string version "" \
"Assume current chroot version is this."
DEFINE_boolean force_latest "${FLAGS_false}" \
"Assume latest version and recreate the version file"
DEFINE_boolean skipfirst "${FLAGS_false}" \
"Skip the first upgrade. This may be dangerous."
FLAGS "$@" || exit 1
VERSION_FILE=~/.version
######################################################################
# Latest version is the version of last upgrade.d file.
# Name format is ${number}_${short_description}
# Versions must be -n sorted, that is, the first continuous sequence
# of numbers is what counts. 12_ is before 111_, etc.
LATEST_VERSION=$(
ls "$(dirname $0)/upgrade.d" | grep "^[0-9]*_" | \
sort -n | tail -n 1 | cut -f1 -d'_'
)
CHROOT_VERSION=$(cat ${VERSION_FILE})
# Check if it's a number.
if ! [ "${CHROOT_VERSION}" -ge "0" ] &> /dev/null; then
error "Your chroot version file ${VERSION_FILE} is bogus: ${CHROOT_VERSION}"
exit 1
fi
if [ -n "${FLAGS_force_latest}" ]; then
echo "${LATEST_VERSION}" > "${VERSION_FILE}"
exit 0
fi
if [ -n "${FLAGS_skipfirst}" ]; then
if [ "${CHROOT_VERSION}" -lt "${LATEST_VERSION}" ]; then
CHROOT_VERSION=$(expr ${CHROOT_VERSION} + 1)
fi
fi
if [ -n "${FLAGS_version}" ]; then
# Check if it's a number.
if [ "${FLAGS_version}" -ge "0" ] &> /dev/null; then
CHROOT_VERSION="${FLAGS_version}"
else
error "Trying to force invalid version: ${FLAGS_version}"
exit 1
fi
fi
# default goes here
if ! [ -f "${VERSION_FILE}" ]; then
warn "Warning: chroot of unknown version, assuming 0"
echo "0" > "${VERSION_FILE}"
fi
if [ "${LATEST_VERSION}" -gt "${CHROOT_VERSION}" ]; then
echo "Outdated chroot found"
pushd "$(dirname $0)/upgrade.d/" 1> /dev/null
for n in $(seq "$(expr ${CHROOT_VERSION} + 1)" "${LATEST_VERSION}"); do
# Deprecation check; Deprecation can be done by removing old upgrade
# scripts and causing too old chroots to have to start over.
# This also means that the scripts have to form a continuous sequence.
if ! [ -f ${n}_* ]; then
error "Fatal: Upgrade ${n} doesn't exist."
error "Your chroot is too old, you need to re-create it!"
exit 1
fi
info "Rollup $(echo ${n}_*)"
# Attempt the upgrade.
# NOTE: We source the upgrade scripts because:
# 1) We can impose set -something on them.
# 2) They can reuse local variables and functions (fe. from common.sh)
# A side effect is that the scripts have to be internally enclosed in
# a code block, otherwise simply running "exit" in any of them would
# terminate the master script, and there would be no way to pass the
# return value from them.
if ! source ${n}_*; then
error "Fatal: failed to upgrade ${n}!"
exit 1
fi
echo "${n}" > "${VERSION_FILE}"
done
popd 1> /dev/null
fi

View File

@ -1,52 +0,0 @@
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Debug a 32 bit binary on 64 bit linux. Can be run from inside or outside
# the chroot. If inside, then the 32 bit gdb from the chroot is used, otherwise
# the system's 64 bit gdb is used.
. "$(dirname "$0")/common.sh"
# Command line options
DEFINE_string chroot "$DEFAULT_CHROOT_DIR" "Location of chroot"
# Parse command line and update positional args
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
# Die on any errors
set -e
if [ -z "$SYSROOT" ]; then
if [ $INSIDE_CHROOT == 1 ]; then
SYSROOT=/build/x86-generic
else
SYSROOT=$FLAGS_chroot/build/x86-generic
fi
fi
if [ -z "$CHOST" ]; then
CHOST="x86-generic"
fi
SYSROOT="$FLAGS_chroot/build/$CHOST"
LIB_PATHS="/lib32:/usr/lib32:$LIB_PATHS:$SYSROOT/usr/lib:$SYSROOT/lib:."
LIB_PATHS="$LIB_PATHS:$SYSROOT/opt/google/chrome/chromeos"
if [ $INSIDE_CHROOT == 1 ]; then
# if we're inside the chroot, the we'll be running a 32 bit gdb, so we'll
# need the same library path as the target
export LD_LIBRARY_PATH=$LIB_PATHS
GDB="$SYSROOT/usr/bin/gdb"
else
GDB="gdb"
fi
exec $GDB \
--eval-command "set environment LD_LIBRARY_PATH=$LIB_PATHS" \
--eval-command "set sysroot $SYSROOT " \
--eval-command "set prompt (cros-gdb) " \
--args "$@"

View File

@ -1,11 +0,0 @@
#!/bin/sh
#Temp Hack to get SVN repos for ebuilds that haven't switched to tar balls or getting code via SVN.
# O3D selenium tests
svn checkout http://o3d.googlecode.com/svn/trunk/googleclient/third_party/selenium_rc@178 ../third_party/autotest/files/client/site_tests/graphics_O3DSelenium/O3D/third_party/selenium_rc
svn checkout http://src.chromium.org/svn/trunk/src/o3d/tests/selenium@44717 ../third_party/autotest/files/client/site_tests/graphics_O3DSelenium/O3D/o3d/tests/selenium
svn checkout http://src.chromium.org/svn/trunk/src/o3d/samples@46579 ../third_party/autotest/files/client/site_tests/graphics_O3DSelenium/O3D/o3d/samples
svn checkout http://o3d.googlecode.com/svn/trunk/googleclient/o3d_assets/tests@155 ../third_party/autotest/files/client/site_tests/graphics_O3DSelenium/O3D/o3d/o3d_assets/tests
svn checkout http://google-gflags.googlecode.com/svn/trunk@29 ../third_party/autotest/files/client/site_tests/graphics_O3DSelenium/O3D/o3d/third_party/gflags
svn checkout https://cvs.khronos.org/svn/repos/registry/trunk/public/webgl/sdk/tests@11002 ../third_party/autotest/files/client/site_tests/graphics_WebGLConformance/WebGL

View File

@ -1,144 +0,0 @@
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Print a list of installed packages
#
# This list is used by make_local_repo.sh to construct a local repository
# with only those packages.
#
# Usage:
# list_installed_packages.sh > package_list.txt
# Die on error
set -e
USAGE='usage: '"$0"' [options]
options:
-v Print verbose output.
-? Print this help.
'
# Handle command line options.
# Note: Can't use shflags, since this must run inside the rootfs image.
VERBOSE=0
# Option processing using getopts
while getopts "v?" OPTVAR
do
case $OPTVAR in
"v")
VERBOSE=1
;;
"?")
echo "$USAGE";
exit 1
;;
esac
done
shift `expr $OPTIND - 1`
# Print information on a single package
function print_deb {
# Positional parameters from calling script. :? means "fail if unset".
DEB_NAME=${1:?}
# Get the installed version of the package.
DEB_VER=`dpkg-query --show -f='${Version}' $DEB_NAME`
# Get information on package from apt-cache. Use a temporary file since
# we need to extract multiple fields.
rm -f /tmp/print_deb
apt-cache show $DEB_NAME > /tmp/print_deb
# The apt cache may have more than one version of the package available.
# For example, if the user has added another repository to
# /etc/apt/sources.list to install/upgrade packages. Use bash arrays to
# hold all the results until we can find information on the version we want.
# TODO: Is there a way to do this using only awk, so we can use /bin/sh
# instead of /bin/bash?
ALL_VER=( `grep '^Version: ' < /tmp/print_deb | awk '{print $2}'` )
ALL_PRIO=( `grep '^Priority: ' < /tmp/print_deb | awk '{print $2}'` )
ALL_SECTION=( `grep '^Section: ' < /tmp/print_deb | awk '{print $2}'` )
ALL_FILENAME=( `grep '^Filename: ' < /tmp/print_deb | awk '{print $2}'` )
rm -f /tmp/print_deb
# Find only the package version the user has installed.
NUM_VER=${#ALL_VER[@]}
FOUND_MATCH=0
for ((I=0; I<$NUM_VER; I++));
do
if [ "${ALL_VER[$I]}" = "$DEB_VER" ]
then
FOUND_MATCH=1
DEB_PRIO="${ALL_PRIO[$I]}"
DEB_SECTION="${ALL_SECTION[$I]}"
DEB_FILENAME="${ALL_FILENAME[$I]}"
fi
done
# Determine if the package filename appears to be from a locally-built
# repository (as created in build_image.sh). Use ! to ignore non-zero
# exit code, since grep exits 1 if no match.
! DEB_FILENAME_IS_LOCAL=`echo $DEB_FILENAME | grep 'local_packages'`
if [ $FOUND_MATCH -eq 0 ]
then
# Can't find information on package in apt cache
if [ $VERBOSE -eq 1 ]
then
echo "Unable to locate package $DEB_NAME version $DEB_VER" 1>&2
echo "in apt cache. It may have been installed directly, or the" 1>&2
echo "cache has been updated since installation and no longer" 1>&2
echo "contains information on that version. Omitting it in the" 1>&2
echo "list, since we can't determine where it came from." 1>&2
fi
echo "# Skipped $DEB_NAME $DEB_VER: not in apt cache"
elif [ "x$DEB_FILENAME" = "x" ]
then
# No filename, so package was installed via dpkg -i.
if [ $VERBOSE -eq 1 ]
then
echo "Package $DEB_NAME appears to have been installed directly" 1>&2
echo "(perhaps using 'dpkg -i'). Omitting it in the list, since we" 1>&2
echo "can't determine where it came from." 1>&2
fi
echo "# Skipped $DEB_NAME $DEB_VER: installed directly"
elif [ "x$DEB_FILENAME_IS_LOCAL" != "x" ]
then
# Package was installed from a local_packages directory.
# For example, chromeos-wm
if [ $VERBOSE -eq 1 ]
then
echo "Package $DEB_NAME appears to have been installed from a local" 1>&2
echo "package repository. Omitting it in the list, since future" 1>&2
echo "installs will also need to be local." 1>&2
fi
echo "# Skipped $DEB_NAME $DEB_VER $DEB_FILENAME: local install"
else
# Package from external repository.
# Don't change the order of these fields; make_local_repo.sh depends
# upon this order.
echo "$DEB_NAME $DEB_VER $DEB_PRIO $DEB_SECTION $DEB_FILENAME"
fi
}
# Header
echo "# Copyright (c) 2009 The Chromium Authors. All rights reserved."
echo "# Use of this source code is governed by a BSD-style license that can be"
echo "# found in the LICENSE file."
echo
echo "# Package list created by list_installed_packages.sh"
echo "# Creation time: `date`"
echo "#"
echo "# Contents of /etc/apt/sources.list:"
cat /etc/apt/sources.list | sed 's/^/# /'
echo "#"
echo "# package_name version priority section repo_filename"
# List all installed packages
for DEB in `dpkg-query --show -f='${Package}\n'`
do
print_deb $DEB
done

View File

@ -1,207 +0,0 @@
#!/usr/bin/python
#
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a pinned solutions file with relative git repository URLs.
make_relative_solution reads a pinned solution file generated by
'gclient revinfo --snapshot' and writes to stdout a pinned solution file
with relative git repository URLs.
The resulting solution file can be used to check out a fixed version of
a gclient set of repositories. The base URL to fetch from can be changed
by editing one line in the generated solution file.
"""
import optparse
import sys
def ReadSnapshot(filename):
"""Reads a gclient revinfo snapshot file.
Minimal verification of the structure of the file is performed.
Args:
filename: The name of a snapshot file to read.
Returns:
The solutions array parsed from the snapshot file.
"""
env = {}
execfile(filename, env)
assert 'solutions' in env
assert env['solutions']
return env['solutions']
def BaseRepository(url):
"""Finds the base repository path.
This only works if the top level repository is not in a subdirectory relative
to the other repositories on the server.
Args:
url: git repository url
Returns:
The prefix of the URL that does not contain the repository name and SHA.
"""
base, versioned_repository = url.rsplit('/', 1)
assert base and versioned_repository
return base
def WriteBaseURL(base, solution):
print ' "%s": "%s",' % (solution['name'], base)
def IsRelativeRepository(base, url):
return url.startswith(base)
def RelativeRepository(base, url):
if IsRelativeRepository(base, url):
return url[len(base):]
else:
return url
def RelativeDep(base, dep):
path, repository = dep
return (path,
RelativeRepository(base, repository),
IsRelativeRepository(base, repository))
def RelativeDeps(base, solution):
return [RelativeDep(base, dep) for dep in solution['custom_deps'].items()]
def WritePinnedDep(name, dep, indent):
"""Writes a pinned dep.
The output is indented so that the URLs all line up for ease of reading. If
the dep is for a relative git repository then we emit the base_url lookup as
well.
Args:
name: The name of the solution that is being written out.
dep: The relative dep that is to be written out.
indent: The total number of characters to use for the path component.
Returns:
Nothing
"""
path, repository, relative = dep
remainder = path.partition('/')[2]
spaces = indent - len(path)
if remainder == 'deps':
return
if relative:
print ' "%s": %*sbase_url["%s"] + "%s",' % (path,
spaces, '',
name,
repository)
else:
print ' "%s": %*s"%s",' % (path,
spaces, '',
repository)
def WritePinnedSolution(solution):
"""Writes out a pinned and solution file with relative repository paths.
The relative repository paths make it easier for a user to modify where
they are pulling source from.
Args:
solution: gclient solution object.
Returns:
Nothing
"""
base = BaseRepository(solution['url'])
url = RelativeRepository(base, solution['url'])
deps = RelativeDeps(base, solution)
indent = max(len(dep[0]) for dep in deps)
deps.sort(key=lambda dep: dep[1])
print (' { "name" : "%s",\n'
' "url" : base_url["%s"] + "%s",\n'
' "custom_deps" : {') % (solution['name'],
solution['name'],
url)
for dep in deps:
WritePinnedDep(solution['name'], dep, indent)
print (' },\n'
' },')
def main(argv):
usage = 'Usage: %prog [options] filename'
option_parser = optparse.OptionParser(usage=usage)
option_parser.disable_interspersed_args()
option_parser.add_option('-s', '--substitute',
action='store_true',
dest='substitute',
default=False,
help='substitute a new base git URL')
option_parser.add_option('-b', '--base',
dest='base',
default='http://src.chromium.org/git',
metavar='URL',
help='base git URL to substitute [%default]')
options, args = option_parser.parse_args(argv[1:])
if len(args) != 1:
option_parser.print_help()
return 1
filename = args.pop(0)
solutions = ReadSnapshot(filename)
print ('#\n'
'# Autogenerated pinned gclient solution file. This file was\n'
'# created by running make_relative_solution.\n'
'#\n'
'\n'
'base_url = {')
for solution in solutions:
if options.substitute:
base = options.base
else:
base = BaseRepository(solution['url'])
WriteBaseURL(base, solution)
print ('}\n'
'\n'
'solutions = [')
for solution in solutions:
WritePinnedSolution(solution)
print ']\n'
if __name__ == '__main__':
main(sys.argv)

View File

@ -1,204 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to generate ARM beagleboard SD card image from kernel, root fs
This script must be passed a uImage file and a tarred up root filesystem.
It also needs EITHER an output device or a file + size. If you use a real
device, the entire device will be used. if you specify a file, the file
will be truncated to the given length and be formatted as a disk image.
To copy a disk image to a device (e.g. /dev/sdb):
# dd if=disk_image.img of=/dev/sdb bs=4M
"""
from optparse import OptionParser
import math
import os
import re
import shutil
import subprocess
import sys
def DieWithUsage(exec_path):
print 'usage:', exec_path, ' [-f file] [-s filesize] [-d device] ', \
'path/to/uImage path/to/armel-rootfs.tgz'
print 'You must pass either -d or both -f and -s'
print 'size may end in k, m, or g for kibibyte, mebibytes, gibibytes.'
print 'This will erase all data on the device or in the file passed.'
print 'This script must be run as root.'
sys.exit(1)
def ParseFilesize(size):
if size == '':
return -1
multiplier = 1
number_part = size[:-1]
last_char = size[-1]
if (last_char == 'k') or (last_char == 'K'):
multiplier = 1024
elif (last_char == 'm') or (last_char == 'M'):
multiplier = 1024 * 1024
elif (last_char == 'g') or (last_char == 'G'):
multiplier = 1024 * 1024 * 1024
else:
number_part = size
return long(number_part) * multiplier
def ParseArgs(argv):
use_file = False
file_size = 0
device_path = ''
uimage_path = ''
rootfs_path = ''
parser = OptionParser()
parser.add_option('-f', action='store', type='string', dest='filename')
parser.add_option('-s', action='store', type='string', dest='filesize')
parser.add_option('-d', action='store', type='string', dest='devname')
(options, args) = parser.parse_args()
# check for valid arg presence
if len(args) != 2:
DieWithUsage(argv[0])
if (options.filename != None) != (options.filesize != None):
DieWithUsage(argv[0])
if not (bool((options.filename != None) and (options.filesize != None)) ^
bool(options.devname != None)):
DieWithUsage(argv[0])
# check the device isn't a partition
if options.devname != None:
if (options.devname[-1] >= '0') and (options.devname[-1] <= '9'):
print 'Looks like you specified a partition device, rather than the ' \
'entire device. try using -d',options.devname[:-1]
DieWithUsage(argv[0])
# if size passed, parse size
if options.filesize != None:
file_size = ParseFilesize(options.filesize)
if file_size < 0:
DieWithUsage(argv[0])
if options.devname != None:
device_path = options.devname
if options.filename != None:
use_file = True
device_path = options.filename
uimage_path = args[0]
rootfs_path = args[1]
# print args
if use_file:
print "file size:", file_size
print "dev path:", device_path
print "uimage:", uimage_path
print 'rootfs:', rootfs_path
return use_file, file_size, device_path, uimage_path, rootfs_path
def CreateSparseFile(path, size):
fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0644)
if (fd < 0):
print 'os.open() failed'
exit(1)
os.ftruncate(fd, size)
os.close(fd)
# creates the partion table with the first partition having enough
# space for the uimage, the second partition takingn the rest of the space
def CreatePartitions(uimage_path, device_path):
# get size of first partition in mebibytes
statinfo = os.stat(uimage_path)
first_part_size = int(math.ceil(statinfo.st_size / (1024.0 * 1024.0)) + 1)
System('echo -e ",' + str(first_part_size) \
+ ',c,*\\n,,83,-" | sfdisk -uM \'' + device_path + '\'')
# uses losetup to set up two loopback devices for the two partitions
# returns the two loopback device paths
def SetupLoopbackDevices(device_path):
sector_size = 512 # bytes
# get size of partitons
output = subprocess.Popen(['sfdisk', '-d', device_path],
stdout=subprocess.PIPE).communicate()[0]
m = re.search('start=\\s+(\\d+), size=\\s+(\\d+),.*?start=\\s+(\\d+), size=\\s+(\\d+),', output, re.DOTALL)
part1_start = long(m.group(1)) * sector_size
part1_size = long(m.group(2)) * sector_size
part2_start = long(m.group(3)) * sector_size
part2_size = long(m.group(4)) * sector_size
if part1_start < 1 or part1_size < 1 or part2_start < 1 or part2_size < 1:
print 'failed to read partition table'
sys.exit(1)
return SetupLoopbackDevice(device_path, part1_start, part1_size), \
SetupLoopbackDevice(device_path, part2_start, part2_size)
# returns loopback device path
def SetupLoopbackDevice(path, start, size):
# get a device
device = subprocess.Popen(['losetup', '-f'],
stdout=subprocess.PIPE).communicate()[0].rstrip()
if device == '':
print 'can\'t get device'
sys.exit(1)
System('losetup -o ' + str(start) + ' --sizelimit ' + str(size) + ' ' + device + ' ' + path)
return device
def DeleteLoopbackDevice(dev):
System('losetup -d ' + dev)
def FormatDevices(first, second):
System('mkfs.msdos -F 32 ' + first)
System('mkfs.ext3 ' + second)
# returns mounted paths
def MountFilesystems(paths):
i = 0
ret = []
for path in paths:
i = i + 1
mntpoint = 'mnt' + str(i)
System('mkdir ' + mntpoint)
System('mount ' + path + ' ' + mntpoint)
ret.append(mntpoint)
return ret
def UnmountFilesystems(mntpoints):
for mntpoint in mntpoints:
System('umount ' + mntpoint)
os.rmdir(mntpoint)
def System(cmd):
print 'system(' + cmd + ')'
p = subprocess.Popen(cmd, shell=True)
return os.waitpid(p.pid, 0)
def main(argv):
(use_file, file_size, device_path, uimage_path, rootfs_path) = ParseArgs(argv)
if use_file:
CreateSparseFile(device_path, file_size)
CreatePartitions(uimage_path, device_path)
if use_file:
(dev1, dev2) = SetupLoopbackDevices(device_path)
else:
dev1 = device_path + '1'
dev2 = device_path + '2'
FormatDevices(dev1, dev2)
(mnt1, mnt2) = MountFilesystems([dev1, dev2])
# copy data in
shutil.copy(uimage_path, mnt1 + '/uImage')
System('tar xzpf ' + rootfs_path + ' -C ' + mnt2)
UnmountFilesystems([mnt1, mnt2])
if use_file:
DeleteLoopbackDevice(dev1)
DeleteLoopbackDevice(dev2)
print 'all done!'
if use_file:
print 'you may want to run dd if=' + device_path + ' of=/some/device bs=4M'
if __name__ == '__main__':
main(sys.argv)

View File

@ -1,38 +0,0 @@
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Run a 32 bit binary on 64 bit linux, can be run from inside or outside
# the chroot.
. "$(dirname "$0")/common.sh"
# Command line options
DEFINE_string chroot "$DEFAULT_CHROOT_DIR" "Location of chroot"
# Parse command line and update positional args
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
# Die on any errors
set -e
if [ -z "$SYSROOT" ]; then
if [ $INSIDE_CHROOT == 1 ]; then
SYSROOT=/build/x86-generic
else
SYSROOT=$FLAGS_chroot/build/x86-generic
fi
fi
if [ -z "$CHOST" ]; then
CHOST=i686-pc-linux-gnu
fi
LIB_PATHS="/lib32:/usr/lib32:$LIB_PATHS:$SYSROOT/usr/lib:$SYSROOT/lib:."
LIB_PATHS="$LIB_PATHS:$SYSROOT/opt/google/chrome/chromeos"
export LD_LIBRARY_PATH=$LIB_PATHS
exec "$@"

View File

@ -1,128 +0,0 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Save the current state of the tree into a pinned deps file that can later
# be used to reconstruct the same tree.
# Load common constants. This should be the first executable line.
# The path to common.sh should be relative to your script's location.
. "$(dirname "$0")/common.sh"
# Script must be run outside the chroot, I am not sure why this is but inside
# the chroot "gclient" is aliased to a warning about using gclient in the
# chroot.
assert_outside_chroot
# Flags
BASE_URL="http://src.chromium.org/git"
DEFINE_string depfile "" "The path to the depfile to create."
DEFINE_boolean commit ${FLAGS_FALSE} "Commit the resulting depfile."
DEFINE_boolean substitute ${FLAGS_FALSE} "Substitute a new base git URL."
DEFINE_string base ${BASE_URL} "Base git URL to substitute"
# Parse command line
FLAGS_HELP="usage: $0 [flags]"
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
check_flags_only_and_allow_null_arg "$@" && set --
# Die on any errors.
set -e
if [ -z "$FLAGS_depfile" ] ; then
echo "Error: --depfile is required."
exit 1
fi
DEPPATH="${GCLIENT_ROOT}/deps"
DEPFILE="${DEPPATH}/${FLAGS_depfile}"
TEMPFILE=$(tempfile)
DIRNAME=$(dirname "${DEPFILE}")
FILENAME=$(basename "${DEPFILE}")
cleanup() {
# Disable die on error.
set +e
if [ -f "${TEMPFILE}" ]; then
rm "${TEMPFILE}"
fi
if [ -f "${DEPFILE}" ]; then
rm "${DEPFILE}"
fi
# Turn die on error back on.
set -e
}
reset_repository() {
echo "Resetting DEPS repository"
pushd "${DEPPATH}"
[ -d ".git" ] || die "${DEPPATH} is not a git repository."
git reset --hard origin/master
popd
}
generate_depfile() {
echo "Writing pinned DEPS file to ${DEPFILE}"
mkdir -p "${DIRNAME}"
gclient revinfo --snapshot > "${TEMPFILE}"
ARGS=""
if [[ $FLAGS_substitute -eq $FLAGS_TRUE ]]; then
ARGS="${ARGS} -s -b ${FLAGS_base}"
fi
ARGS="${ARGS} ${TEMPFILE}"
"${SCRIPTS_DIR}/make_relative_solution" ${ARGS} > ${DEPFILE}
rm -f "${TEMPFILE}"
}
commit_depfile() {
echo "Commiting pinned DEPS file"
pushd "${DEPPATH}"
git add "${FLAGS_depfile}"
git commit -m "Automated buildbot update of pinned DEPS file." --allow-empty
git reset --hard HEAD
git clean -f
git remote update
git rebase -s ours origin/master
git push
popd
}
#
# Generate a pinned deps file from the current gclient sync and check it into
# the deps.git repository.
#
trap "cleanup" EXIT
if [[ $FLAGS_commit -eq $FLAGS_TRUE ]]; then
reset_repository
fi
generate_depfile
if [[ $FLAGS_commit -eq $FLAGS_TRUE ]]; then
commit_depfile
fi
trap - EXIT

View File

@ -1,622 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# For Spreadsheets:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
import atom
# For Issue Tracker:
import gdata.projecthosting.client
import gdata.projecthosting.data
import gdata.gauth
import gdata.client
import gdata.data
import atom.http_core
import atom.core
# For this script:
import getpass
from optparse import OptionParser
import pickle
from sets import Set
# Settings
credentials_store = 'creds.dat'
class Merger(object):
def __init__(self, ss_key, ss_ws_key, tracker_message, tracker_project,
debug, pretend):
self.ss_key = ss_key
self.ss_ws_key = ss_ws_key
self.tracker_message = tracker_message
self.tracker_project = tracker_project
self.debug_enabled = debug
self.pretend = pretend
self.user_agent = 'adlr-tracker-spreadsheet-merger'
self.it_keys = ['id', 'owner', 'status', 'title']
def debug(self, message):
"""Prints message if debug mode is set."""
if self.debug_enabled:
print message
def print_feed(self, feed):
'Handy for debugging'
for i, entry in enumerate(feed.entry):
print 'id:', entry.id
if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed):
print '%s %s\n' % (entry.title.text, entry.content.text)
elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed):
print '%s %s %s' % (i, entry.title.text, entry.content.text)
# Print this row's value for each column (the custom dictionary is
# built using the gsx: elements in the entry.)
print 'Contents:'
for key in entry.custom:
print ' %s: %s' % (key, entry.custom[key].text)
print '\n',
else:
print '%s %s\n' % (i, entry.title.text)
def tracker_login(self):
"""Logs user into Tracker, using cached credentials if possible.
Saves credentials after login."""
self.it_client = gdata.projecthosting.client.ProjectHostingClient()
self.it_client.source = self.user_agent
self.load_creds()
if self.tracker_token and self.tracker_user:
print 'Using existing credential for tracker login'
self.it_client.auth_token = self.tracker_token
else:
self.tracker_user = raw_input('Issue Tracker Login:')
password = getpass.getpass('Password:')
self.it_client.ClientLogin(self.tracker_user, password,
source=self.user_agent, service='code',
account_type='GOOGLE')
self.tracker_token = self.it_client.auth_token
self.store_creds()
def spreadsheet_login(self):
"""Logs user into Google Spreadsheets, using cached credentials if possible.
Saves credentials after login."""
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
self.gd_client.source = self.user_agent
self.load_creds()
if self.docs_token:
print 'Using existing credential for docs login'
self.gd_client.SetClientLoginToken(self.docs_token)
else:
self.gd_client.email = raw_input('Google Docs Login:')
self.gd_client.password = getpass.getpass('Password:')
self.gd_client.ProgrammaticLogin()
self.docs_token = self.gd_client.GetClientLoginToken()
self.store_creds()
def fetch_spreadsheet_issues(self):
"""Fetches all issues from the user-specified spreadsheet. Returns
them as an array or dictionaries."""
feed = self.gd_client.GetListFeed(self.ss_key, self.ss_ws_key)
issues = []
for entry in feed.entry:
issue = {}
for key in entry.custom:
issue[key] = entry.custom[key].text
issue['__raw_entry'] = entry
issues.append(issue)
return issues
def ids_for_spreadsheet_issues(self, ss_issues):
"""Returns a Set of strings, each string an id from ss_issues"""
ret = Set()
for ss_issue in ss_issues:
ret.add(ss_issue['id'])
return ret
def tracker_issues_for_query_feed(self, feed):
"""Converts a feed object from a query to a list of tracker issue
dictionaries."""
issues = []
for issue in feed.entry:
issue_dict = {}
issue_dict['labels'] = [label.text for label in issue.label]
issue_dict['id'] = issue.id.text.split('/')[-1]
issue_dict['title'] = issue.title.text
issue_dict['status'] = issue.status.text
if issue.owner:
issue_dict['owner'] = issue.owner.username.text
issues.append(issue_dict)
return issues
def fetch_tracker_issues(self, ss_issues):
"""Fetches all relevant issues from traacker and returns them as an array
of dictionaries. Relevance is:
- has an ID that's in ss_issues, OR
- (is Area=Installer AND status is open).
Open status is one of: Unconfirmed, Untriaged, Available, Assigned,
Started, Upstream"""
issues = []
got_results = True
index = 1
while got_results:
query = gdata.projecthosting.client.Query(label='Area-Installer',
max_results=50,
start_index=index)
feed = self.it_client.get_issues('chromium-os', query=query)
if not feed.entry:
got_results = False
index = index + len(feed.entry)
issues.extend(self.tracker_issues_for_query_feed(feed))
# Now, remove issues that are open or in ss_issues.
ss_ids = self.ids_for_spreadsheet_issues(ss_issues)
open_statuses = ['Unconfirmed', 'Untriaged', 'Available', 'Assigned',
'Started', 'Upstream']
new_issues = []
for issue in issues:
if issue['status'] in open_statuses or issue['id'] in ss_ids:
new_issues.append(issue)
# Remove id from ss_ids, if it's there
ss_ids.discard(issue['id'])
issues = new_issues
# Now, for each ss_id that didn't turn up in the query, explicitly add it
for id_ in ss_ids:
query = gdata.projecthosting.client.Query(issue_id=id_,
max_results=50,
start_index=index)
feed = self.it_client.get_issues('chromium-os', query=query)
if not feed.entry:
print 'No result for id', id_
continue
issues.extend(self.tracker_issues_for_query_feed(feed))
return issues
def store_creds(self):
"""Stores login credentials to disk."""
obj = {}
if self.docs_token:
obj['docs_token'] = self.docs_token
if self.tracker_token:
obj['tracker_token'] = self.tracker_token
if self.tracker_user:
obj['tracker_user'] = self.tracker_user
try:
f = open(credentials_store, 'w')
pickle.dump(obj, f)
f.close()
except IOError:
print 'Unable to store credentials'
def load_creds(self):
"""Loads login credentials from disk."""
self.docs_token = None
self.tracker_token = None
self.tracker_user = None
try:
f = open(credentials_store, 'r')
obj = pickle.load(f)
f.close()
if obj.has_key('docs_token'):
self.docs_token = obj['docs_token']
if obj.has_key('tracker_token'):
self.tracker_token = obj['tracker_token']
if obj.has_key('tracker_user'):
self.tracker_user = obj['tracker_user']
except IOError:
print 'Unable to load credentials'
def browse(self):
"""Browses Spreadsheets to help the user find the spreadsheet and
worksheet keys"""
print 'Browsing spreadsheets...'
if self.ss_key and self.ss_ws_key:
print 'You already passed in --ss_key and --ss_ws_key. No need to browse.'
return
print 'Logging in...'
self.spreadsheet_login()
if not self.ss_key:
print 'Fetching spreadsheets...'
feed = self.gd_client.GetSpreadsheetsFeed()
print ''
print 'Spreadsheet key - Title'
for entry in feed.entry:
key = entry.id.text.split('/')[-1]
title = entry.title.text
print '"%s" - "%s"' % (key, title)
print ''
print 'Done. Rerun with --ss_key=KEY to browse a list of worksheet keys.'
else:
print 'Fetching worksheets for spreadsheet', self.ss_key
feed = self.gd_client.GetWorksheetsFeed(self.ss_key)
for entry in feed.entry:
key = entry.id.text.split('/')[-1]
title = entry.title.text
print ''
print 'Worksheet key - Title'
print '"%s" - "%s"' % (key, title)
print ''
print 'Done. You now have keys for --ss_key and --ss_ws_key.'
def tracker_issue_for_id(self, issues, id_):
"""Returns the element of issues which has id_ for the key 'id'"""
for issue in issues:
if issue['id'] == id_:
return issue
return None
def spreadsheet_issue_to_tracker_dict(self, ss_issue):
"""Converts a spreadsheet issue to the dict format that is used to
represent a tracker issue."""
ret = {}
ret['project'] = self.tracker_project
ret['title'] = ss_issue['title']
ret['summary'] = self.tracker_message
ret['owner'] = ss_issue['owner']
if ss_issue.get('status') is not None:
ret['status'] = ss_issue['status']
ret['labels'] = []
for (key, value) in ss_issue.items():
if key.endswith('-') and (value is not None):
ret['labels'].append(key.title() + value)
return ret
def label_from_prefix(self, prefix, corpus):
"""Given a corpus (array of lable strings), return the first label
that begins with the specified prefix."""
for label in corpus:
if label.startswith(prefix):
return label
return None
def update_spreadsheet_issue_to_tracker_dict(self, ss_issue, t_issue):
"""Updates a given tracker issue with data from the spreadsheet issue."""
ret = {}
ret['title'] = ss_issue['title']
ret['id'] = ss_issue['id']
ret['summary'] = self.tracker_message
if ss_issue['status'] != t_issue['status']:
ret['status'] = ss_issue['status']
if ss_issue.get('owner'):
if (not t_issue.has_key('owner')) or \
(ss_issue['owner'] != t_issue['owner']):
ret['owner'] = ss_issue['owner']
# labels
ret['labels'] = []
for (key, value) in ss_issue.items():
caps_key = key.title()
if not caps_key.endswith('-'):
continue
ss_label = None
if value:
ss_label = caps_key + value.title()
t_label = self.label_from_prefix(caps_key, t_issue['labels'])
if t_label is None and ss_label is None:
# Nothing
continue
if (t_label is not None) and \
((ss_label is None) or (ss_label != t_label)):
ret['labels'].append('-' + t_label)
if (ss_label is not None) and \
((t_label is None) or (t_label != ss_label)):
ret['labels'].append(ss_label)
return ret
def tracker_issue_has_changed(self, t_issue, ss_issue):
"""Returns True iff ss_issue indicates changes in t_issue that need to be
committed up to the Issue Tracker."""
if t_issue is None:
return True
potential_commit = \
self.update_spreadsheet_issue_to_tracker_dict(ss_issue, t_issue)
if potential_commit.has_key('status') or \
potential_commit.has_key('owner') or \
(len(potential_commit['labels']) > 0):
return True
if potential_commit['title'] != t_issue['title']:
return True
return False
def spreadsheet_to_tracker_commits(self, ss_issues, t_issues):
"""Given the current state of all spreadsheet issues and tracker issues,
returns a list of all commits that need to go to tracker to get it in
line with the spreadsheet."""
ret = []
for ss_issue in ss_issues:
t_issue = self.tracker_issue_for_id(t_issues, ss_issue['id'])
commit = {}
# TODO see if an update is needed at all
if t_issue is None:
commit['type'] = 'append'
commit['dict'] = self.spreadsheet_issue_to_tracker_dict(ss_issue)
commit['__ss_issue'] = ss_issue
else:
if not self.tracker_issue_has_changed(t_issue, ss_issue):
continue
commit['type'] = 'update'
commit['dict'] = \
self.update_spreadsheet_issue_to_tracker_dict(ss_issue, t_issue)
ret.append(commit)
return ret
def fetch_issues(self):
"""Logs into Docs/Tracker, and fetches spreadsheet and tracker issues"""
print 'Logging into Docs...'
self.spreadsheet_login()
print 'Logging into Tracker...'
self.tracker_login()
print 'Fetching spreadsheet issues...'
ss_issues = self.fetch_spreadsheet_issues()
self.debug('Spreadsheet issues: %s' % ss_issues)
print 'Fetching tracker issues...'
t_issues = self.fetch_tracker_issues(ss_issues)
self.debug('Tracker issues: %s' % t_issues)
return (t_issues, ss_issues)
def spreadsheet_to_tracker(self):
"""High-level function to manage migrating data from the spreadsheet
to Tracker."""
(t_issues, ss_issues) = self.fetch_issues()
print 'Calculating deltas...'
commits = self.spreadsheet_to_tracker_commits(ss_issues, t_issues)
self.debug('got commits: %s' % commits)
if not commits:
print 'No deltas. Done.'
return
for commit in commits:
dic = commit['dict']
labels = dic.get('labels')
owner = dic.get('owner')
status = dic.get('status')
if commit['type'] == 'append':
print 'Creating new tracker issue...'
if self.pretend:
print '(Skipping because --pretend is set)'
continue
created = self.it_client.add_issue(self.tracker_project,
dic['title'],
self.tracker_message,
self.tracker_user,
labels=labels,
owner=owner,
status=status)
issue_id = created.id.text.split('/')[-1]
print 'Created issue with id:', issue_id
print 'Write id back to spreadsheet row...'
raw_entry = commit['__ss_issue']['__raw_entry']
ss_issue = commit['__ss_issue']
del ss_issue['__raw_entry']
ss_issue.update({'id': issue_id})
self.gd_client.UpdateRow(raw_entry, ss_issue)
print 'Done.'
else:
print 'Updating issue with id:', dic['id']
if self.pretend:
print '(Skipping because --pretend is set)'
continue
self.it_client.update_issue(self.tracker_project,
dic['id'],
self.tracker_user,
comment=self.tracker_message,
status=status,
owner=owner,
labels=labels)
print 'Done.'
def spreadsheet_issue_for_id(self, issues, id_):
"""Given the array of spreadsheet issues, return the first one that
has id_ for the key 'id'."""
for issue in issues:
if issue['id'] == id_:
return issue
return None
def value_for_key_in_labels(self, label_array, prefix):
"""Given an array of labels and a prefix, return the non-prefix part
of the first label that has that prefix. E.g. if label_array is
["Mstone-R7", "Area-Installer"] and prefix is "Area-", returns
"Installer"."""
for label in label_array:
if label.startswith(prefix):
return label[len(prefix):]
return None
def tracker_issue_to_spreadsheet_issue(self, t_issue, ss_keys):
"""Converts a tracker issue to the format used by spreadsheet, given
the row headings ss_keys."""
new_row = {}
for key in ss_keys:
if key.endswith('-'):
# label
new_row[key] = self.value_for_key_in_labels(t_issue['labels'],
key.title())
# Special cases
if key in self.it_keys and key in t_issue:
new_row[key] = t_issue[key]
return new_row
def spreadsheet_row_needs_update(self, ss_issue, t_issue):
"""Returns True iff the spreadsheet issue passed in needs to be updated
to match data in the tracker issue."""
new_ss_issue = self.tracker_issue_to_spreadsheet_issue(t_issue,
ss_issue.keys())
for key in new_ss_issue.keys():
if not ss_issue.has_key(key):
continue
if new_ss_issue[key] != ss_issue[key]:
return True
return False
def tracker_to_spreadsheet_commits(self, t_issues, ss_issues):
"""Given the current set of spreadsheet and tracker issues, computes
commits needed to go to Spreadsheets to get the spreadsheet in line
with what's in Tracker."""
ret = []
keys = ss_issues[0].keys()
for t_issue in t_issues:
commit = {}
ss_issue = self.spreadsheet_issue_for_id(ss_issues, t_issue['id'])
if ss_issue is None:
# New issue
commit['new_row'] = self.tracker_issue_to_spreadsheet_issue(t_issue,
keys)
commit['type'] = 'append'
elif self.spreadsheet_row_needs_update(ss_issue, t_issue):
commit['__raw_entry'] = ss_issue['__raw_entry']
del ss_issue['__raw_entry']
ss_issue.update(self.tracker_issue_to_spreadsheet_issue(t_issue, keys))
commit['dict'] = ss_issue
commit['type'] = 'update'
else:
continue
ret.append(commit)
return ret
def tracker_to_spreadsheet(self):
"""High-level function to migrate data from Tracker to the spreadsheet."""
(t_issues, ss_issues) = self.fetch_issues()
if len(ss_issues) == 0:
raise Exception('Error: must have at least one non-header row in '\
'spreadsheet')
return
ss_keys = ss_issues[0].keys()
print 'Calculating deltas...'
ss_commits = self.tracker_to_spreadsheet_commits(t_issues, ss_issues)
self.debug('commits: %s' % ss_commits)
if not ss_commits:
print 'Nothing to commit.'
return
print 'Committing...'
for commit in ss_commits:
self.debug('Operating on commit: %s' % commit)
if commit['type'] == 'append':
print 'Appending new row...'
if not self.pretend:
self.gd_client.InsertRow(commit['new_row'],
self.ss_key, self.ss_ws_key)
else:
print '(Skipped because --pretend set)'
if commit['type'] == 'update':
print 'Updating row...'
if not self.pretend:
self.gd_client.UpdateRow(commit['__raw_entry'], commit['dict'])
else:
print '(Skipped because --pretend set)'
print 'Done.'
def main():
class PureEpilogOptionParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
parser = PureEpilogOptionParser()
parser.add_option('-a', '--action', dest='action', metavar='ACTION',
help='Action to perform')
parser.add_option('-d', '--debug', action='store_true', dest='debug',
default=False, help='Print debug output.')
parser.add_option('-m', '--message', dest='message', metavar='TEXT',
help='Log message when updating Tracker issues')
parser.add_option('-p', '--pretend', action='store_true', dest='pretend',
default=False, help="Don't commit anything.")
parser.add_option('--ss_key', dest='ss_key', metavar='KEY',
help='Spreadsheets key (find with browse action)')
parser.add_option('--ss_ws_key', dest='ss_ws_key', metavar='KEY',
help='Spreadsheets worksheet key (find with browse action)')
parser.add_option('--tracker_project', dest='tracker_project',
metavar='PROJECT',
help='Tracker project (default: chromium-os)',
default='chromium-os')
parser.epilog = """Actions:
browse -- browse spreadsheets to find spreadsheet and worksheet keys.
ss_to_t -- for each entry in spreadsheet, apply its values to tracker.
If no ID is in the spreadsheet row, a new tracker item is created
and the spreadsheet is updated.
t_to_ss -- for each tracker entry, apply it or add it to the spreadsheet.
This script can be used to migrate Issue Tracker issues between Issue Tracker
and Google Spreadsheets. The spreadsheet should have certain columns in any
order: Id, Owner, Title, Status. The spreadsheet may have any label of the
form 'Key-'. For those labels that end in '-', this script assumes the cell
value and the header form a label that should be applied to the issue. E.g.
if the spredsheet has a column named 'Mstone-' and a cell under it called
'R8' that corresponds to the label 'Mstone-R8' in Issue Tracker.
To migrate data, you must choose on each invocation of this script if you
wish to migrate data from Issue Tracker to a spreadsheet of vice-versa.
When migrating from Tracker, all found issues based on the query
(which is currently hard-coded to "label=Area-Installer") will be inserted
into the spreadsheet (overwritng existing cells if a row with matching ID
is found). Custom columns in the spreadsheet won't be overwritten, so if
the spreadsheet contains extra columns about issues (e.g. time estimates)
they will be preserved.
When migrating from spreadsheet to Tracker, each row in the spreadsheet
is compared to existing tracker issues that match the query
(which is currently hard-coded to "label=Area-Installer"). If the
spreadsheet row has no Id, a new Issue Tracker issue is created and the new
Id is written back to the spreadsheet. If an existing tracker issue exists,
it's updated with the data from the spreadsheet if anything has changed.
Suggested usage:
- Create a spreadsheet with columns Id, Owner, Title, Status, and any label
prefixes as desired.
- Run this script with '-b' to browse your spreadsheet and get the
spreadsheet key.
- Run this script again with '-b' and the spreadsheet key to get the
worksheet key.
- Run this script with "-a t_to_ss" or "-a ss_to_t" to migrate data in either
direction.
Known issues:
- query is currently hardcoded to label=Area-Installer. That should be
a command-line flag.
- When creating a new issue on tracker, the owner field isn't set. I (adlr)
am not sure why. Workaround: If you rerun this script, tho, it will detect
a delta and update the tracker issue with the owner, which seems to succeed.
"""
(options, args) = parser.parse_args()
merger = Merger(options.ss_key, options.ss_ws_key,
options.message, options.tracker_project,
options.debug, options.pretend)
if options.action == 'browse':
merger.browse()
elif options.action == 'ss_to_t':
if not options.message:
print 'Error: when updating tracker, -m MESSAGE required.'
return
merger.spreadsheet_to_tracker()
elif options.action == 't_to_ss':
merger.tracker_to_spreadsheet()
else:
raise Exception('Unknown action requested.')
if __name__ == '__main__':
main()

View File

@ -1,4 +0,0 @@
{
info "You have just been upgraded to chroot version 1! Congratulations!"
return 0
}