This commit is contained in:
Brandon Philips 2013-02-05 14:50:09 -08:00
parent 55a2b35b24
commit adbebe18df
62 changed files with 12770 additions and 27 deletions

View File

@ -17,7 +17,7 @@ SLOT="0"
KEYWORDS="amd64 arm x86"
IUSE="cros_host test"
RDEPEND="cros_host? ( app-emulation/qemu-kvm )
RDEPEND="cros_host? ( app-emulation/qemu )
app-portage/gentoolkit
cros_host? ( app-shells/bash )
!cros_host? ( !chromeos-base/gmerge )

View File

@ -15,7 +15,7 @@ SLOT="0"
KEYWORDS="~amd64 ~arm ~x86"
IUSE="cros_host test"
RDEPEND="cros_host? ( app-emulation/qemu-kvm )
RDEPEND="cros_host? ( app-emulation/qemu )
app-portage/gentoolkit
cros_host? ( app-shells/bash )
!cros_host? ( !chromeos-base/gmerge )

View File

@ -0,0 +1,36 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# TODO(jsalz): Remove this ebuild; it's no longer used.
EAPI="4"
CROS_WORKON_COMMIT="c759366a1dd3d733b12bb2edc3bae9868d38ee5b"
CROS_WORKON_TREE="46e050754b5a2f5392223d734036b7b51dde5b5b"
CROS_WORKON_PROJECT="chromiumos/platform/factory-utils"
inherit cros-workon
DESCRIPTION="Factory development utilities for ChromiumOS"
HOMEPAGE="http://www.chromium.org/"
LICENSE="GPL-2"
SLOT="0"
KEYWORDS="amd64 arm x86"
IUSE="cros_factory_bundle"
CROS_WORKON_LOCALNAME="factory-utils"
RDEPEND=""
# coreos-installer for solving "lib/coreos-common.sh" symlink.
# vboot_reference for binary programs (ex, cgpt).
# TODO: coreos-base/coreos-installer[cros_host]
DEPEND="
coreos-base/vboot_reference"
src_compile() {
true
}
src_install() {
true
}

View File

@ -0,0 +1,33 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# TODO(jsalz): Remove this ebuild; it's no longer used.
EAPI="4"
CROS_WORKON_PROJECT="chromiumos/platform/factory-utils"
inherit cros-workon
DESCRIPTION="Factory development utilities for ChromiumOS"
HOMEPAGE="http://www.chromium.org/"
LICENSE="GPL-2"
SLOT="0"
KEYWORDS="~amd64 ~arm ~x86"
IUSE="cros_factory_bundle"
CROS_WORKON_LOCALNAME="factory-utils"
RDEPEND=""
# chromeos-installer for solving "lib/chromeos-common.sh" symlink.
# vboot_reference for binary programs (ex, cgpt).
DEPEND="chromeos-base/chromeos-installer[cros_host]
chromeos-base/vboot_reference"
src_compile() {
true
}
src_install() {
true
}

View File

@ -18,10 +18,10 @@ KEYWORDS="amd64 arm x86"
CROS_WORKON_LOCALNAME="crostestutils"
RDEPEND="app-emulation/qemu-kvm
RDEPEND="app-emulation/qemu
app-portage/gentoolkit
app-shells/bash
chromeos-base/cros-devutils
coreos-base/cros-devutils
dev-util/crosutils
"

View File

@ -16,10 +16,10 @@ KEYWORDS="~amd64 ~arm ~x86"
CROS_WORKON_LOCALNAME="crostestutils"
RDEPEND="app-emulation/qemu-kvm
RDEPEND="app-emulation/qemu
app-portage/gentoolkit
app-shells/bash
chromeos-base/cros-devutils
coreos-base/cros-devutils
dev-util/crosutils
"

View File

@ -22,13 +22,10 @@ RDEPEND="${RDEPEND}
app-arch/lzop
app-arch/pigz
app-admin/sudo
dev-embedded/cbootimage
dev-embedded/tegrarcm
dev-embedded/u-boot-tools
dev-util/ccache
dev-util/crosutils
>=sys-apps/dtc-1.3.0-r5
sys-boot/bootstub
sys-boot/grub
sys-boot/syslinux
sys-devel/crossdev
@ -38,16 +35,14 @@ RDEPEND="${RDEPEND}
# Host dependencies for building cross-compiled packages.
# TODO: chromeos-base/chromeos-installer
RDEPEND="${RDEPEND}
app-admin/eselect-opengl
app-admin/eselect-mesa
app-arch/cabextract
>=app-arch/pbzip2-1.1.1-r1
app-arch/rpm2targz
app-arch/sharutils
app-arch/unzip
app-crypt/nss
app-emulation/qemu-kvm
!app-emulation/qemu-user
sys-libs/nss-db
dev-libs/nss
app-emulation/qemu
app-i18n/ibus
app-text/texi2html
coreos-base/google-breakpad
@ -78,7 +73,6 @@ RDEPEND="${RDEPEND}
dev-python/mako
dev-python/netifaces
dev-python/pygobject
dev-python/pygtk
dev-python/pyinotify
dev-python/pyopenssl
dev-python/python-daemon
@ -91,14 +85,12 @@ RDEPEND="${RDEPEND}
dev-util/gdbus-codegen
dev-util/gperf
dev-util/gtk-doc
dev-util/hdctools
>=dev-util/gtk-doc-am-1.13
>=dev-util/intltool-0.30
dev-util/scons
>=dev-vcs/git-1.7.2
dev-vcs/subversion[-dso]
>=media-libs/freetype-2.2.1
media-libs/mesa
net-misc/gsutil
sys-apps/usbutils
!sys-apps/nih-dbus-tool
@ -140,10 +132,9 @@ RDEPEND="${RDEPEND}
"
# Host dependencies that are needed to create and sign images
# TODO: sys-apps/mosys
RDEPEND="${RDEPEND}
>=coreos-base/vboot_reference-1.0-r174
coreos-base/verity
sys-apps/mosys
sys-fs/libfat
"
@ -153,8 +144,8 @@ RDEPEND="${RDEPEND}
"
# Host dependencies that are needed for delta_generator.
# TODO: coreos-base/update_engine
RDEPEND="${RDEPEND}
coreos-base/update_engine
"
# Host dependencies to run unit tests within the chroot

View File

@ -0,0 +1,55 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Determine which systems must use the old (v2) preamble header format, and
# create a config file forcing that format for those systems.
#
# This is only needed to provide backward compatibility for already-shipping
# systems.
EAPI="4"
inherit cros-board
DESCRIPTION="Chrome OS verified boot tools config"
LICENSE="GPL-2"
SLOT="0"
KEYWORDS="amd64 arm x86"
# These are the ONLY systems that should require v2. All others should adapt
# to the new format.
OLD_BOARDS=(
lumpy
lumpy64
stumpy
stumpy64
tegra2
x86-alex
x86-alex32
x86-mario
x86-mario64
x86-zgb
x86-zgb32
)
S=${WORKDIR}
src_compile() {
local b
b=$(get_current_board_no_variant)
mkdir -p "config"
if has "${b}" "${OLD_BOARDS[@]}" ; then
fmt=2
else
fmt=3
fi
printf -- '--format\n%s\n' "${fmt}" > "config/vbutil_firmware.options"
printf -- '--format\n%s\n' "${fmt}" > "config/vbutil_kernel.options"
}
src_install() {
insinto /usr/share/vboot/config
doins config/vbutil_{firmware,kernel}.options
}

View File

@ -17,8 +17,8 @@ IUSE="32bit_au minimal rbtest tpmtests cros_host"
LIBCHROME_VERS="125070"
# TODO: chromeos-base/libchrome:${LIBCHROME_VERS}[cros-debug=]
RDEPEND="app-crypt/trousers
chromeos-base/libchrome:${LIBCHROME_VERS}[cros-debug=]
!minimal? ( dev-libs/libyaml )
dev-libs/glib
dev-libs/openssl
@ -30,7 +30,7 @@ DEPEND="${RDEPEND}
# We need the config in place before we run, but don't need to rebuild this
# package every time.
RDEPEND="${RDEPEND}
!cros_host? ( chromeos-base/vboot_reference-config )"
!cros_host? ( coreos-base/vboot_reference-config )"
_src_compile_main() {
mkdir "${S}"/build-main

View File

@ -0,0 +1,38 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
EAPI=2
CROS_WORKON_COMMIT="c1fd42203b778dc7a4950a4422a01f839b84bc04"
CROS_WORKON_TREE="fac7e77188abf62a8c29d1aab875e6bc72d892b2"
CROS_WORKON_PROJECT="chromiumos/platform/crosutils"
CROS_WORKON_LOCALNAME="../scripts/"
inherit python cros-workon
DESCRIPTION="Chromium OS build utilities"
HOMEPAGE="http://www.chromium.org/chromium-os"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64"
IUSE=""
src_configure() {
find . -type l -exec rm {} \; &&
rm -fr WATCHLISTS inherit-review-settings-ok lib/shflags ||
die "Couldn't clean directory."
}
src_install() {
# Install package files
exeinto /usr/lib/crosutils
doexe * || die "Could not install shared files."
insinto "$(python_get_sitedir)"
doins lib/*.py || die "Could not install python files."
rm -f lib/*.py
# Install libraries
insinto /usr/lib/crosutils/lib
doins lib/* || die "Could not install library files"
}

View File

@ -0,0 +1,36 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
EAPI=2
CROS_WORKON_PROJECT="chromiumos/platform/crosutils"
CROS_WORKON_LOCALNAME="../scripts/"
inherit python cros-workon
DESCRIPTION="Chromium OS build utilities"
HOMEPAGE="http://www.chromium.org/chromium-os"
LICENSE="BSD"
SLOT="0"
KEYWORDS="~amd64"
IUSE=""
src_configure() {
find . -type l -exec rm {} \; &&
rm -fr WATCHLISTS inherit-review-settings-ok lib/shflags ||
die "Couldn't clean directory."
}
src_install() {
# Install package files
exeinto /usr/lib/crosutils
doexe * || die "Could not install shared files."
insinto "$(python_get_sitedir)"
doins lib/*.py || die "Could not install python files."
rm -f lib/*.py
# Install libraries
insinto /usr/lib/crosutils/lib
doins lib/* || die "Could not install library files"
}

View File

@ -0,0 +1,43 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header: $
# @ECLASS: appid.eclass
# @MAINTAINER:
# ChromiumOS Build Team
# @BUGREPORTS:
# Please report bugs via http://crosbug.com/new (with label Area-Build)
# @VCSURL: http://git.chromium.org/gitweb/?p=chromiumos/overlays/chromiumos-overlay.git;a=blob;f=eclass/@ECLASS@
# @BLURB: Eclass for setting up the omaha appid field in /etc/lsb-release
# @FUNCTION: doappid
# @USAGE: <appid>
# @DESCRIPTION:
# Initializes /etc/lsb-release with the appid. Note that appid is really
# just a UUID in the canonical {8-4-4-4-12} format (all uppercase). e.g.
# {01234567-89AB-CDEF-0123-456789ABCDEF}
doappid() {
[[ $# -eq 1 && -n $1 ]] || die "Usage: ${FUNCNAME} <appid>"
local appid=$1
# Validate the UUID is formatted correctly. Except for mario --
# it was created before we had strict rules, and so it violates :(.
if [[ ${appid} != '{87efface-864d-49a5-9bb3-4b050a7c227a}' ]] ; then
local uuid_regex='[{][0-9A-F]{8}-([0-9A-F]{4}-){3}[0-9A-F]{12}[}]'
local filtered_appid=$(echo "${appid}" | LC_ALL=C sed -r "s:${uuid_regex}::")
if [[ -n ${filtered_appid} ]] ; then
eerror "Invalid appid: ${appid} -> ${filtered_appid}"
eerror " - must start with '{' and end with '}'"
eerror " - must be all upper case"
eerror " - be a valid UUID (8-4-4-4-12 hex digits)"
die "invalid appid: ${appid}"
fi
fi
dodir /etc
local lsb="${D}/etc/lsb-release"
[[ -e ${lsb} ]] && die "${lsb} already exists!"
echo "CHROMEOS_RELEASE_APPID=${appid}" > "${lsb}" || die "creating ${lsb} failed!"
}

View File

@ -0,0 +1,56 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Eclass for handling autotest deps packages
#
EAPI=2
inherit autotest
AUTOTEST_CONFIG_LIST=""
AUTOTEST_DEPS_LIST=""
AUTOTEST_PROFILERS_LIST=""
#
# In order to build only deps (call their setup function), we need to have
# a test that calls their setup() in its own setup(). This is done by
# creating a "fake" test, prebuilding it, and then deleting it after install.
#
AUTOTEST_FORCE_TEST_LIST="myfaketest"
autotest-deponly_src_prepare() {
autotest_src_prepare
pushd "${AUTOTEST_WORKDIR}/client/site_tests/" 1> /dev/null || die
mkdir myfaketest
cd myfaketest
# NOTE: Here we create a fake test case, that does not do anything except for
# setup of all deps.
cat << ENDL > myfaketest.py
from autotest_lib.client.bin import test, utils
class myfaketest(test.test):
def setup(self):
ENDL
for item in ${AUTOTEST_DEPS_LIST}; do
echo " self.job.setup_dep(['${item}'])" >> myfaketest.py
done
chmod a+x myfaketest.py
popd 1> /dev/null
}
autotest-deponly_src_install() {
autotest_src_install
rm -rf ${D}/usr/local/autotest/client/site_tests/myfaketest || die
}
EXPORT_FUNCTIONS src_prepare src_install

View File

@ -0,0 +1,443 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Eclass for handling autotest test packages
#
RDEPEND="( autotest? ( >=chromeos-base/autotest-0.0.1-r3 ) )"
IUSE="+buildcheck autotest opengles"
# Ensure the configures run by autotest pick up the right config.site
export CONFIG_SITE="/usr/share/config.site"
export AUTOTEST_WORKDIR="${WORKDIR}/autotest-work"
# @ECLASS-VARIABLE: AUTOTEST_CLIENT_*
# @DESCRIPTION:
# Location of the appropriate test directory inside ${S}
: ${AUTOTEST_CLIENT_TESTS:=client/tests}
: ${AUTOTEST_CLIENT_SITE_TESTS:=client/site_tests}
: ${AUTOTEST_CONFIG:=client/config}
: ${AUTOTEST_DEPS:=client/deps}
: ${AUTOTEST_PROFILERS:=client/profilers}
# @ECLASS-VARIABLE: AUTOTEST_*_LIST
# @DESCRIPTION:
# The list of deps/configs/profilers provided with this package
: ${AUTOTEST_CONFIG_LIST:=}
: ${AUTOTEST_DEPS_LIST:=}
: ${AUTOTEST_PROFILERS_LIST:=}
# @ECLASS-VARIABLE: AUTOTEST_FORCE_LIST
# @DESCRIPTION:
# Sometimes we just want to forget about useflags and build what's inside
: ${AUTOTEST_FORCE_TEST_LIST:=}
# @ECLASS-VARIABLE: AUTOTEST_FILE_MASK
# @DESCRIPTION:
# The list of 'find' expressions to find in the resulting image and delete
: ${AUTOTEST_FILE_MASK:=}
fast_cp() {
cp -l "$@" || cp "$@"
}
get_test_list() {
if [ -n "${AUTOTEST_FORCE_TEST_LIST}" ]; then
# list forced
echo "${AUTOTEST_FORCE_TEST_LIST}"
return
fi
# we cache the result of this operation in AUTOTEST_TESTS,
# because it's expensive and does not change over the course of one ebuild run
local result="${IUSE_TESTS[*]//[+-]tests_/}"
result="${result//tests_/}"
result=$(for test in ${result}; do use tests_${test} && echo -n "${test} "; done)
echo "${result}"
}
# Pythonify the list of packages by doing the equivalent of ','.join(args)
pythonify_test_list() {
local result=$(printf '%s,' "$@")
echo ${result%,}
}
# Create python package init files for top level test case dirs.
touch_init_py() {
local dirs=${1}
for base_dir in $dirs
do
local sub_dirs="$(find ${base_dir} -maxdepth 1 -type d)"
for sub_dir in ${sub_dirs}
do
touch ${sub_dir}/__init__.py
done
touch ${base_dir}/__init__.py
done
}
# Exports a CROS_WORKON_SUBDIRS_TO_COPY variable to ensure that only the
# necessary files will be copied. This cannot be applied globally, as some
# ebuilds may not have tests only.
autotest_restrict_workon_subdirs() {
CROS_WORKON_SUBDIRS_TO_COPY=()
local var
for var in AUTOTEST_{CLIENT,SERVER}_{TESTS,SITE_TESTS} \
AUTOTEST_{CONFIG,DEPS,PROFILERS}; do
CROS_WORKON_SUBDIRS_TO_COPY+=( ${!var} )
done
}
setup_cross_toolchain() {
tc-export CC CXX AR RANLIB LD NM STRIP PKG_CONFIG
export CCFLAGS="$CFLAGS"
# TODO(fes): Check for /etc/hardened for now instead of the hardened
# use flag because we aren't enabling hardened on the target board.
# Rather, right now we're using hardened only during toolchain compile.
# Various tests/etc. use %ebx in here, so we have to turn off PIE when
# using the hardened compiler
if use x86 ; then
if use hardened ; then
#CC="${CC} -nopie"
append-flags -nopie
fi
fi
}
create_autotest_workdir() {
local dst=${1}
# create a working enviroment for pre-building
ln -sf "${SYSROOT}"/usr/local/autotest/{conmux,tko,global_config.ini,shadow_config.ini} "${dst}"/
# NOTE: in order to make autotest not notice it's running from /usr/local/, we need
# to make sure the binaries are real, because they do the path magic
local root_path base_path
for base_path in utils server; do
root_path="${SYSROOT}/usr/local/autotest/${base_path}"
mkdir -p "${dst}/${base_path}"
for entry in $(ls "${root_path}"); do
# Take all important binaries from SYSROOT install, make a copy.
if [ -d "${root_path}/${entry}" ]; then
# Ignore anything that has already been put in place by
# something else. This will typically be server/{site_tests,tests}.
if ! [ -e "${dst}/${base_path}/${entry}" ]; then
ln -sf "${root_path}/${entry}" "${dst}/${base_path}/"
fi
else
cp -f ${root_path}/${entry} ${dst}/${base_path}/
fi
done
done
for base_path in client client/bin; do
root_path="${SYSROOT}/usr/local/autotest/${base_path}"
mkdir -p "${dst}/${base_path}"
# Skip bin, because it is processed separately, and test-provided dirs
# Also don't symlink to packages, because that kills the build
for entry in $(ls "${root_path}" | \
grep -v "\(bin\|tests\|site_tests\|config\|deps\|profilers\|packages\)$"); do
ln -sf "${root_path}/${entry}" "${dst}/${base_path}/"
done
done
# replace the important binaries with real copies
for base_path in autotest autotest_client; do
root_path="${SYSROOT}/usr/local/autotest/client/bin/${base_path}"
rm "${dst}/client/bin/${base_path}"
cp -f ${root_path} "${dst}/client/bin/${base_path}"
done
# Selectively pull in deps that are not provided by the current test package
for base_path in config deps profilers; do
for dir in "${SYSROOT}/usr/local/autotest/client/${base_path}"/*; do
if [ -d "${dir}" ] && \
! [ -d "${AUTOTEST_WORKDIR}/client/${base_path}/$(basename ${dir})" ]; then
# directory does not exist, create a symlink
ln -sf "${dir}" "${AUTOTEST_WORKDIR}/client/${base_path}/$(basename ${dir})"
fi
done
done
}
print_test_dirs() {
local testroot="${1}"
local ignore_test_contents="${2}"
pushd "${testroot}" 1> /dev/null
for test in *; do
if [ -d "${test}" ] && [ -n "${ignore_test_contents}" -o \
-f "${test}/${test}".py ]; then
echo "${test}"
fi
done
popd 1> /dev/null
}
# checks IUSE_TESTS and sees if at least one of these is enabled
are_we_used() {
if ! use autotest; then
# unused
return 1
fi
[ -n "$(get_test_list)" ] && return 0
# unused
return 1
}
autotest_src_prepare() {
are_we_used || return 0
einfo "Preparing tests"
# FIXME: These directories are needed, autotest quietly dies if
# they don't even exist. They may, however, stay empty.
mkdir -p "${AUTOTEST_WORKDIR}"/client/tests
mkdir -p "${AUTOTEST_WORKDIR}"/client/site_tests
mkdir -p "${AUTOTEST_WORKDIR}"/client/config
mkdir -p "${AUTOTEST_WORKDIR}"/client/deps
mkdir -p "${AUTOTEST_WORKDIR}"/client/profilers
TEST_LIST=$(get_test_list)
# Pull in the individual test cases.
for l1 in client server; do
for l2 in site_tests tests; do
# pick up the indicated location of test sources
eval srcdir=${WORKDIR}/${P}/\${AUTOTEST_${l1^^*}_${l2^^*}}
# test does have this directory
for test in ${TEST_LIST}; do
if [ -d "${srcdir}/${test}" ]; then
fast_cp -fpr "${srcdir}/${test}" "${AUTOTEST_WORKDIR}/${l1}/${l2}"/ || die
fi
done
done
done
# Pull in all the deps provided by this package, selectively.
for l2 in config deps profilers; do
# pick up the indicated location of test sources
eval srcdir=${WORKDIR}/${P}/\${AUTOTEST_${l2^^*}}
if [ -d "${srcdir}" ]; then # test does have this directory
pushd "${srcdir}" 1> /dev/null
eval deplist=\${AUTOTEST_${l2^^*}_LIST}
if [ "${deplist}" = "*" ]; then
fast_cp -fpr * "${AUTOTEST_WORKDIR}/client/${l2}"
else
for dir in ${deplist}; do
fast_cp -fpr "${dir}" "${AUTOTEST_WORKDIR}/client/${l2}"/ || die
done
fi
popd 1> /dev/null
fi
done
# FIXME: We'd like if this were not necessary, and autotest supported out-of-tree build
create_autotest_workdir "${AUTOTEST_WORKDIR}"
# Each test directory needs to be visited and have an __init__.py created.
# However, that only applies to the directories which have a main .py file.
pushd "${AUTOTEST_WORKDIR}" > /dev/null || die "AUTOTEST_WORKDIR does not exist?!"
for dir in client/tests client/site_tests; do
pushd "${dir}" > /dev/null || continue
for sub in *; do
[ -f "${sub}/${sub}.py" ] || continue
touch_init_py ${sub}
done
popd > /dev/null
done
popd > /dev/null
# Cleanup checked-in binaries that don't support the target architecture
[[ ${E_MACHINE} == "" ]] && return 0;
rm -fv $( scanelf -RmyBF%a "${AUTOTEST_WORKDIR}" | grep -v -e ^${E_MACHINE} )
}
autotest_src_compile() {
if ! are_we_used; then
ewarn "***************************************************************"
ewarn "* Not building any tests, because the requested list is empty *"
ewarn "***************************************************************"
return 0
fi
einfo "Compiling tests"
pushd "${AUTOTEST_WORKDIR}" 1> /dev/null
setup_cross_toolchain
if use opengles ; then
graphics_backend=OPENGLES
else
graphics_backend=OPENGL
fi
# HACK: Some of the autotests depend on SYSROOT being defined, and die
# a gruesome death if it isn't. But SYSROOT does not need to exist, for
# example on the host, it doesn't. Let's define a compatible variable
# here in case we have none.
export SYSROOT=${SYSROOT:-"/"}
# This only prints the tests that have the associated .py
# (and therefore a setup function)
local prebuild_test_dirs="
client/tests client/site_tests"
TESTS=$(\
for dir in ${prebuild_test_dirs}; do
print_test_dirs "${AUTOTEST_WORKDIR}/${dir}"
done | sort -u)
NR_TESTS=$(echo ${TESTS}|wc -w)
if ! [ "${NR_TESTS}" = "0" ]; then
einfo "Building tests (${NR_TESTS}):"
einfo "${TESTS}"
NORMAL=$(echo -e "\e[0m")
GREEN=$(echo -e "\e[1;32m")
RED=$(echo -e "\e[1;31m")
# Call autotest to prebuild all test cases.
# Parse output through a colorifying sed script
( GRAPHICS_BACKEND="$graphics_backend" LOGNAME=${SUDO_USER} \
client/bin/autotest_client --quiet \
--client_test_setup=$(pythonify_test_list ${TESTS}) \
|| ! use buildcheck || die "Tests failed to build."
) | sed -e "s/\(INFO:root:setup\)/${GREEN}* \1${NORMAL}/" \
-e "s/\(ERROR:root:\[.*\]\)/${RED}\1${NORMAL}/"
else
einfo "No tests to prebuild, skipping"
fi
# Cleanup some temp files after compiling
for mask in '*.[do]' '*.pyc' ${AUTOTEST_FILE_MASK}; do
einfo "Purging ${mask}"
find . -name "${mask}" -delete
done
popd 1> /dev/null
}
autotest_src_install() {
are_we_used || return 0
einfo "Installing tests"
# Install all test cases, after setup has been called on them.
# We install everything, because nothing else is copied into the
# testcase directories besides what this package provides.
local instdirs="
client/tests
client/site_tests"
for dir in ${instdirs}; do
[ -d "${AUTOTEST_WORKDIR}/${dir}" ] || continue
insinto /usr/local/autotest/$(dirname ${dir})
doins -r "${AUTOTEST_WORKDIR}/${dir}"
done
# Install the deps, configs, profilers.
# Difference from above is, we don't install the whole thing, just
# the stuff provided by this package, by looking at AUTOTEST_*_LIST.
instdirs="
config
deps
profilers"
for dir in ${instdirs}; do
[ -d "${AUTOTEST_WORKDIR}/client/${dir}" ] || continue
insinto /usr/local/autotest/client/${dir}
eval provided=\${AUTOTEST_${dir^^*}_LIST}
# * means provided all, figure out the list from ${S}
if [ "${provided}" = "*" ]; then
if eval pushd "${WORKDIR}/${P}/\${AUTOTEST_${dir^^*}}" &> /dev/null; then
provided=$(ls)
popd 1> /dev/null
else
provided=""
fi
fi
for item in ${provided}; do
doins -r "${AUTOTEST_WORKDIR}/client/${dir}/${item}"
done
done
# TODO: Not all needs to be executable, but it's hard to pick selectively.
# The source repo should already contain stuff with the right permissions.
chmod -R a+x "${D}"/usr/local/autotest/*
}
autotest_pkg_postinst() {
are_we_used || return 0
local root_autotest_dir="${ROOT}/usr/local/autotest"
local path_to_image="${D}/usr/local/autotest"
# Should only happen when running emerge on a DUT.
if [ ! -d "${root_autotest_dir}" ]; then
einfo "Skipping packaging as no autotest installation detected."
return 0
fi
# Gather the artifacts we want autotest to package.
local test_opt dep_opt prof_opt
# Only client tests can be packaged.
local tests_to_package_dirs="client/tests client/site_tests"
local client_tests=$(\
for dir in ${tests_to_package_dirs}; do
print_test_dirs "${path_to_image}/${dir}" yes
done | sort -u)
if [ -n "${client_tests}" ] && [ "${client_tests}" != "myfaketest" ]; then
# Check for test_count. The packager performs poorly when
# too many arguments are specified vs. --all. This should be fixed in
# autotest (crosbug.com/28173).
test_count=$(echo ${client_tests} | wc -w)
if [ ${test_count} -gt 10 ]; then
test_opt="--all"
else
test_opt="--test=$(pythonify_test_list ${client_tests})"
fi
fi
if [ "{test_opt}" != "--all" ]; then
if [ -n "${AUTOTEST_DEPS_LIST}" ]; then
dep_opt="--dep=$(pythonify_test_list ${AUTOTEST_DEPS_LIST})"
fi
# For *, we must generate the list of profilers.
if [ "${AUTOTEST_PROFILERS_LIST}" = "*" ]; then
AUTOTEST_PROFILERS_LIST=$(\
print_test_dirs "${path_to_image}/client/profilers" yes | sort -u)
prof_opt="--profiler=$(pythonify_test_list ${AUTOTEST_PROFILERS_LIST})"
fi
fi
if [ -n "${test_opt}" -o -n "${dep_opt}" -o -n "${prof_opt}" ]; then
einfo "Running packager with options ${test_opt} ${dep_opt} ${prof_opt}"
flock "${root_autotest_dir}/packages" \
-c "python -B ${root_autotest_dir}/utils/packager.py \
-r ${root_autotest_dir}/packages \
${test_opt} ${dep_opt} ${prof_opt} upload"
else
einfo "Packager not run as nothing was found to package."
fi
}
if [[ "${CROS_WORKON_PROJECT}" == "chromiumos/third_party/autotest" ]]; then
# Using main autotest repo. Automatically restricting checkout.
# Note that this does not happen if the inherit is done prior to setting
# CROS_WORKON_* variables.
autotest_restrict_workon_subdirs
fi
EXPORT_FUNCTIONS src_compile src_prepare src_install pkg_postinst

View File

@ -0,0 +1,22 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header:
# @ECLASS: binutils-funcs.eclass
# @MAINTAINER:
# Raymes Khoury <raymes@google.com>
# @DESCRIPTION:
# Functions to get the paths to binutils installations for gold and for GNU ld.
inherit toolchain-funcs
get_binutils_path_ld() {
local ld_path=$(readlink -f $(type -p $(tc-getLD ${1})))
local binutils_dir=$(dirname ${ld_path})
echo ${binutils_dir}
}
get_binutils_path_gold() {
echo $(get_binutils_path_ld ${1})-gold
}

View File

@ -0,0 +1,17 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# @ECLASS-VARIABLE: CONFLICT_LIST
# @DESCRIPTION:
# Atoms mentioned in CONFLICT_LIST need to either be unmerged or upgraded
# prior to this package being installed, but we don't want to have an explicit
# dependency on that package. So instead, we do the following:
# 1. When we are installed, ensure that the old version is not installed.
# 2. If old version is installed, ask emerge to consider upgrading it.
# This consideration is listed as PDEPEND so that we don't add an
# explicit dependency on the other package.
for atom in $CONFLICT_LIST; do
DEPEND="$DEPEND !!<=$atom"
RDEPEND="$RDEPEND !!<=$atom"
PDEPEND="$PDEPEND || ( >$atom !!<=$atom )"
done

View File

@ -0,0 +1,47 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Library for handling packages that are part of Auto Update.
#
inherit flag-o-matic
# Some boards started out 32bit (user/kernel) and then migrated to 64bit
# (user/kernel). Since we need to auto-update (AU) from the 32bit to
# 64bit, and the old 32bit kernels can't execte 64bit code, we need to
# always build the AU code as 32bit.
#
# Setup the build env to create 32bit objects.
board_setup_32bit_au_env()
{
[[ $# -eq 0 ]] || die "${FUNCNAME}: takes no arguments"
__AU_OLD_ARCH=${ARCH}
__AU_OLD_ABI=${ABI}
__AU_OLD_LIBDIR_x86=${LIBDIR_x86}
export ARCH=x86 ABI=x86 LIBDIR_x86="lib"
__AU_OLD_CHOST=${CHOST}
export CHOST=i686-pc-linux-gnu
__AU_OLD_SYSROOT=${SYSROOT}
export SYSROOT=/usr/${CHOST}
append-ldflags -L"${__AU_OLD_SYSROOT}"/usr/lib
append-cxxflags -isystem "${__AU_OLD_SYSROOT}"/usr/include
}
# undo what we did in the above function
board_teardown_32bit_au_env()
{
[[ $# -eq 0 ]] || die "${FUNCNAME}: takes no arguments"
[ -z "${__AU_OLD_SYSROOT}" ] && \
die "board_setup_32bit_au_env must be called first"
filter-ldflags -L"${__AU_OLD_SYSROOT}"/usr/lib
filter-flags -isystem "${__AU_OLD_SYSROOT}"/usr/include
export SYSROOT=${__AU_OLD_SYSROOT}
export CHOST=${__AU_OLD_CHOST}
export LIBDIR_x86=${__AU_OLD_LIBDIR_x86}
export ABI=${__AU_OLD_ABI}
export ARCH=${__AU_OLD_ARCH}
}

View File

@ -0,0 +1,148 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install binary packages for Chromium OS
#
# @ECLASS-VARIABLE: CROS_BINARY_STORE_DIR
# @DESCRIPTION:
# Storage directory for Chrome OS Binaries
: ${CROS_BINARY_STORE_DIR:=${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/cros-binary}
# @ECLASS-VARIABLE: CROS_BINARY_URI
# @DESCRIPTION:
# URI for the binary may be one of:
# http://
# https://
# ssh://
# file:// (file is relative to the files directory)
# TODO: Add "->" support if we get file collisions
: ${CROS_BINARY_URI:=}
# @ECLASS-VARIABLE: CROS_BINARY_SUM
# @DESCRIPTION:
# Optional SHA-1 sum of the file to be fetched
: ${CROS_BINARY_SUM:=}
# @ECLASS-VARIABLE: CROS_BINARY_INSTALL_FLAGS
# @DESCRIPTION:
# Optional Flags to use while installing the binary
: ${CROS_BINARY_INSTALL_FLAGS:=}
# @ECLASS-VARIABLE: CROS_BINARY_LOCAL_URI_BASE
# @DESCRIPTION:
# Optional URI to override CROS_BINARY_URI location. If this variable
# is used the filename from CROS_BINARY_URI will be used, but the path
# to the binary will be changed.
: ${CROS_BINARY_LOCAL_URI_BASE:=}
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
cros-binary_check_file() {
local target="${CROS_BINARY_STORE_DIR}/${CROS_BINARY_URI##*/}"
elog "Checking for cached $target"
if [[ -z "${CROS_BINARY_SUM}" ]]; then
return 1
else
echo "${CROS_BINARY_SUM} ${target}" |
sha1sum -c --quiet >/dev/null 2>&1
return
fi
}
cros-binary_fetch() {
local uri=${CROS_BINARY_URI}
if [[ ! -z "${CROS_BINARY_LOCAL_URI_BASE}" ]]; then
uri="${CROS_BINARY_LOCAL_URI_BASE}/${CROS_BINARY_URI##*/}"
fi
local scheme="${uri%%://*}"
local non_scheme=${uri#*://}
local netloc=${non_scheme%%/*}
local server=${netloc%%:*}
local port=${netloc##*:}
local path=${non_scheme#*/}
if [[ -z "${port}" ]]; then
port="22"
fi
local scp_args="-qo BatchMode=yes -oCheckHostIP=no -P ${port}"
local target="${CROS_BINARY_STORE_DIR}/${uri##*/}"
addwrite "${CROS_BINARY_STORE_DIR}"
if [[ ! -d "${CROS_BINARY_STORE_DIR}" ]]; then
mkdir -p "${CROS_BINARY_STORE_DIR}" ||
die "Failed to mkdir ${CROS_BINARY_STORE_DIR}"
fi
if ! cros-binary_check_file; then
rm -f "${target}"
case "${scheme}" in
http|https)
wget "${uri}" -O "${target}" -nv -nc ||
rm -f "${target}"
;;
ssh)
elog "Running: scp ${scp_args} ${server}:/${path} ${target}"
scp ${scp_args} "${server}:/${path}" "${target}" ||
rm -f "${target}"
;;
file)
if [[ "${non_scheme:0:1}" == "/" ]]; then
cp "${non_scheme}" "${target}" || rm -f "${target}"
else
cp "${FILESDIR}/${non_scheme}" "${target}" ||
rm -f "${target}"
fi
;;
*)
die "Protocol for '${uri}' is unsupported."
;;
esac
# if no checksum, generate a new one
if [[ -z "${CROS_BINARY_SUM}" ]]; then
local sha1=( $(sha1sum "${target}") )
elog "cros-binary ${target} is not checksummed. Use:"
elog "CROS_BINARY_SUM=\"${sha1[0]}\""
CROS_BINARY_SUM="${sha1[0]}"
fi
else
elog "Not downloading, cached copy available in ${target}"
fi
cros-binary_check_file || die "Failed to fetch ${uri}."
}
cros-binary_src_unpack() {
cros-binary_fetch
}
cros-binary_src_install() {
local target="${CROS_BINARY_STORE_DIR}/${CROS_BINARY_URI##*/}"
local extension="${CROS_BINARY_URI##*.}"
local flags
case "${CROS_BINARY_URI##*.}" in
gz|tgz) flags="z";;
bz2|tbz2) flags="j";;
*) die "Unsupported binary file format ${CROS_BINARY_URI##*.}"
esac
cd "${D}" || die
tar "${flags}xpf" "${target}" ${CROS_BINARY_INSTALL_FLAGS} || die "Failed to unpack"
}
EXPORT_FUNCTIONS src_unpack src_install

View File

@ -0,0 +1,126 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Library for handling building of ChromiumOS packages
#
#
# This class sets the BOARD environment variable. It is intended to
# be used by ebuild packages that need to have the board information
# for various reasons -- for example, to differentiate various
# hardware attributes at build time; see 'adhd' for an example of
# this.
#
# When set, the BOARD environment variable should conform to the
# syntax used by the CROS utilities.
#
# If an unknown board is encountered, this class deliberately fails
# the build; this provides an easy method of identifying a change to
# the build which might affect inheriting packages.
#
# For example, the ADHD (Google A/V daemon) package relies on the
# BOARD variable to determine which hardware should be monitored at
# runtime. If the BOARD variable is not set, the daemon will not
# monitor any specific hardware; thus, when a new board is added, the
# ADHD project must be updated.
#
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
BOARD_USE_PREFIX="board_use_"
ALL_BOARDS=(
amd64-generic
amd64-corei7
amd64-drm
amd64-host
aries
arm-generic
beaglebone
butterfly
chronos
daisy
daisy-drm
daisy_spring
daisy_snow
emeraldlake2
eureka
fb1
haswell
haswell_baskingridge
haswell_wtm1
haswell_wtm2
ironhide
kiev
klang
link
lumpy
panda
parrot
puppy
raspberrypi
stout
stumpy
tegra2
tegra2_aebl
tegra2_arthur
tegra2_asymptote
tegra2_dev-board
tegra2_dev-board-opengl
tegra2_kaen
tegra2_seaboard
tegra2_wario
tegra3-generic
waluigi
cardhu
x32-generic
x86-agz
x86-alex
x86-alex_he
x86-alex_hubble
x86-alex32
x86-alex32_he
x86-dogfood
x86-drm
x86-fruitloop
x86-generic
x86-mario
x86-mario64
x86-pineview
x86-wayland
x86-zgb
x86-zgb_he
x86-zgb32
x86-zgb32_he
)
# Add BOARD_USE_PREFIX to each board in ALL_BOARDS to create IUSE.
# Also add cros_host so that we can inherit this eclass in ebuilds
# that get emerged both in the cros-sdk and for target boards.
# See REQUIRED_USE below.
IUSE="${ALL_BOARDS[@]/#/${BOARD_USE_PREFIX}} cros_host"
# Require one, and only one, of the board_use flags to be set if this eclass
# is used. This effectively makes any ebuild that inherits this eclass require
# a known valid board to be set.
REQUIRED_USE="^^ ( ${IUSE} )"
# Echo the current board, with variant.
get_current_board_with_variant()
{
local b
for b in "${ALL_BOARDS[@]}"; do
if use ${BOARD_USE_PREFIX}${b}; then
echo ${b}
return
fi
done
die "Unable to determine current board (with variant)."
}
# Echo the current board, without variant.
get_current_board_no_variant()
{
get_current_board_with_variant | cut -d '_' -f 1
}

View File

@ -0,0 +1,92 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# @ECLASS: cros-coreboot.eclass
# @MAINTAINER:
# The Chromium OS Authors
# @BLURB: Unifies logic for building coreboot images for Chromium OS.
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
inherit toolchain-funcs
DESCRIPTION="coreboot x86 firmware"
HOMEPAGE="http://www.coreboot.org"
LICENSE="GPL-2"
SLOT="0"
IUSE="em100-mode"
RDEPEND="!sys-boot/chromeos-coreboot"
DEPEND="sys-power/iasl
sys-apps/coreboot-utils
sys-boot/chromeos-mrc
"
# @ECLASS-VARIABLE: COREBOOT_BOARD
# @DESCRIPTION:
# Coreboot Configuration name.
: ${COREBOOT_BOARD:=}
# @ECLASS-VARIABLE: COREBOOT_BUILD_ROOT
# @DESCRIPTION:
# Build directory root
: ${COREBOOT_BUILD_ROOT:=}
[[ -z ${COREBOOT_BOARD} ]] && die "COREBOOT_BOARD must be set"
[[ -z ${COREBOOT_BUILD_ROOT} ]] && die "COREBOOT_BUILD_ROOT must be set"
cros-coreboot_pre_src_prepare() {
cp configs/config.${COREBOOT_BOARD} .config
}
cros-coreboot_src_compile() {
tc-export CC
local board="${COREBOOT_BOARD}"
local build_root="${COREBOOT_BUILD_ROOT}"
# Set KERNELREVISION (really coreboot revision) to the ebuild revision
# number followed by a dot and the first seven characters of the git
# hash. The name is confusing but consistent with the coreboot
# Makefile.
local sha1v="${VCSID/*-/}"
export KERNELREVISION=".${PV}.${sha1v:0:7}"
# Firmware related binaries are compiled with a 32-bit toolchain
# on 64-bit platforms
if use amd64 ; then
export CROSS_COMPILE="i686-pc-linux-gnu-"
export CC="${CROSS_COMPILE}-gcc"
else
export CROSS_COMPILE=${CHOST}-
fi
elog "Toolchain:\n$(sh util/xcompile/xcompile)\n"
emake obj="${build_root}" oldconfig
emake obj="${build_root}"
# Modify firmware descriptor if building for the EM100 emulator.
if use em100-mode; then
ifdtool --em100 "${build_root}/coreboot.rom" || die
mv "${build_root}/coreboot.rom"{.new,} || die
fi
# Build cbmem for the target
cd util/cbmem
emake clean
CROSS_COMPILE="${CHOST}-" emake
}
cros-coreboot_src_install() {
dobin util/cbmem/cbmem
insinto /firmware
newins "${COREBOOT_BUILD_ROOT}/coreboot.rom" coreboot.rom
OPROM=$( awk 'BEGIN{FS="\""} /CONFIG_VGA_BIOS_FILE=/ { print $2 }' \
configs/config.${COREBOOT_BOARD} )
CBFSOPROM=pci$( awk 'BEGIN{FS="\""} /CONFIG_VGA_BIOS_ID=/ { print $2 }' \
configs/config.${COREBOOT_BOARD} ).rom
newins ${OPROM} ${CBFSOPROM}
}
EXPORT_FUNCTIONS src_compile src_install pre_src_prepare

View File

@ -0,0 +1,15 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Set -DNDEBUG if the cros-debug USE flag is not defined.
#
inherit flag-o-matic
IUSE="cros-debug"
cros-debug-add-NDEBUG() {
use cros-debug || append-flags -DNDEBUG
}

View File

@ -0,0 +1,371 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Generate shell script containing firmware update bundle.
#
inherit cros-workon cros-binary
# @ECLASS-VARIABLE: CROS_FIRMWARE_BCS_USER_NAME
# @DESCRIPTION: (Optional) Name of user on BCS server
: ${CROS_FIRMWARE_BCS_USER_NAME:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_BCS_OVERLAY_NAME
# @DESCRIPTION: (Optional) Name of the ebuild overlay.
: ${CROS_FIRMWARE_BCS_OVERLAY_NAME:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_MAIN_IMAGE
# @DESCRIPTION: (Optional) Location of system firmware (BIOS) image
: ${CROS_FIRMWARE_MAIN_IMAGE:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_MAIN_RW_IMAGE
# @DESCRIPTION: (Optional) Location of RW system firmware image
: ${CROS_FIRMWARE_MAIN_RW_IMAGE:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_EC_IMAGE
# @DESCRIPTION: (Optional) Location of EC firmware image
: ${CROS_FIRMWARE_EC_IMAGE:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_EC_VERSION
# @DESCRIPTION: (Optional) Version name of EC firmware
: ${CROS_FIRMWARE_EC_VERSION:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_MAIN_SUM
# @DESCRIPTION: (Optional) SHA-1 checksum of system firmware (BIOS) image
: ${CROS_FIRMWARE_MAIN_SUM:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_MAIN_RW_SUM
# @DESCRIPTION: (Optional) SHA-1 checksum of RW system firmware image
: ${CROS_FIRMWARE_MAIN_RW_SUM:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_EC_SUM
# @DESCRIPTION: (Optional) SHA-1 checksum of EC firmware image on BCS
: ${CROS_FIRMWARE_EC_SUM:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_PLATFORM
# @DESCRIPTION: (Optional) Platform name of firmware
: ${CROS_FIRMWARE_PLATFORM:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_SCRIPT
# @DESCRIPTION: (Optional) Entry script file name of updater
: ${CROS_FIRMWARE_SCRIPT:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_UNSTABLE
# @DESCRIPTION: (Optional) Mark firmrware as unstable (always RO+RW update)
: ${CROS_FIRMWARE_UNSTABLE:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_BINARY
# @DESCRIPTION: (Optional) location of custom flashrom tool
: ${CROS_FIRMWARE_FLASHROM_BINARY:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_EXTRA_LIST
# @DESCRIPTION: (Optional) Semi-colon separated list of additional resources
: ${CROS_FIRMWARE_EXTRA_LIST:=}
# @ECLASS-VARIABLE: CROS_FIRMWARE_FORCE_UPDATE
# @DESCRIPTION: (Optional) Always add "force update firmware" tag.
: ${CROS_FIRMWARE_FORCE_UPDATE:=}
# TODO(hungte) Support "local_mainfw" and "local_ecfw".
# $board-overlay/make.conf may contain these flags to always create "firmware
# from source".
IUSE="bootimage cros_ec"
# Some tools (flashrom, iotools, mosys, ...) were bundled in the updater so we
# don't write RDEPEND=$DEPEND. RDEPEND should have an explicit list of what it
# needs to extract and execute the updater.
DEPEND="
coreos-base/vpd
dev-util/shflags
>=sys-apps/flashrom-0.9.3-r36
sys-apps/mosys
"
# Build firmware from source.
DEPEND="$DEPEND
bootimage? ( sys-boot/coreos-bootimage )
cros_ec? ( coreos-base/coreos-ec )
"
# Maintenance note: The factory install shim downloads and executes
# the firmware updater. Consequently, runtime dependencies for the
# updater are also runtime dependencies for the install shim.
#
# The contents of RDEPEND below must also be present in the
# chromeos-base/chromeos-factoryinstall ebuild in PROVIDED_DEPEND.
# If you make any change to the list below, you may need to make a
# matching change in the factory install ebuild.
#
# TODO(hungte) remove gzip/tar if we have busybox
RDEPEND="
app-arch/gzip
app-arch/sharutils
app-arch/tar
sys-apps/util-linux"
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
UPDATE_SCRIPT="coreos-firmwareupdate"
FW_IMAGE_LOCATION=""
FW_RW_IMAGE_LOCATION=""
EC_IMAGE_LOCATION=""
EXTRA_LOCATIONS=""
# Returns true (0) if parameter starts with "bcs://"
_is_on_bcs() {
[[ "${1%%://*}" = "bcs" ]]
}
# Returns true (0) if parameter starts with "file://"
_is_in_files() {
[[ "${1%%://*}" = "file" ]]
}
# Fetch a file from the Binary Component Server
# Parameters: URI of file "bcs://filename.tbz2", checksum of file.
# Returns: Nothing
_bcs_fetch() {
local filename="${1##*://}"
local checksum="$2"
local bcs_host="git.chromium.org:6222"
local bcs_user="${CROS_FIRMWARE_BCS_USER_NAME}"
local bcs_pkgdir="${CATEGORY}/${PN}"
local bcs_root="$CROS_FIRMWARE_BCS_OVERLAY_NAME"
# Support both old and new locations for BCS binaries.
# TODO(dparker@chromium.org): Remove after all binaries are using the
# new location. crosbug.com/22789
[ -z "$bcs_root" ] && bcs_root="home/$CROS_FIRMWARE_BCS_USER_NAME"
URI_BASE="ssh://$bcs_user@$bcs_host/$bcs_root/$bcs_pkgdir"
CROS_BINARY_URI="${URI_BASE}/${filename}"
CROS_BINARY_SUM="${checksum}"
cros-binary_fetch
}
# Unpack a tbz2 firmware archive to ${S}
# Parameters: Location of archived firmware
# Returns: Location of unpacked firmware as $RETURN_VALUE
_src_unpack() {
local filepath="${1}"
local filename="$(basename ${filepath})"
mkdir -p "${S}" || die "Not able to create ${S}"
cp "${filepath}" "${S}" || die "Can't copy ${filepath} to ${S}"
cd "${S}" || die "Can't change directory to ${S}"
tar -axpf "${filename}" ||
die "Failed to unpack ${filename}"
local contents="$(tar -atf "${filename}")"
if [ "$(echo "$contents" | wc -l)" -gt 1 ]; then
# Currently we can only serve one file (or directory).
ewarn "WARNING: package $filename contains multiple files."
contents="$(echo "$contents" | head -n 1)"
fi
RETURN_VALUE="${S}/$contents"
}
# Unpack a tbz2 archive fetched from the BCS to ${S}
# Parameters: URI of file. Example: "bcs://filename.tbz2"
# Returns: Location of unpacked firmware as $RETURN_VALUE
_bcs_src_unpack() {
local filename="${1##*://}"
_src_unpack "${CROS_BINARY_STORE_DIR}/${filename}"
RETURN_VALUE="${RETURN_VALUE}"
}
# Provides the location of a firmware image given a URI.
# Unpacks the firmware image if necessary.
# Parameters: URI of file.
# Example: "file://filename.ext" or an absolute filepath.
# Returns the absolute filepath of the unpacked firmware as $RETURN_VALUE
_firmware_image_location() {
local source_uri=$1
if _is_in_files "${source_uri}"; then
local image_location="${FILESDIR}/${source_uri#*://}"
else
local image_location="${source_uri}"
fi
[[ -f "${image_location}" ]] || die "File not found: ${image_location}"
case "${image_location}" in
*.tbz2 | *.tbz | *.tar.bz2 | *.tgz | *.tar.gz )
_src_unpack "${image_location}"
RETURN_VALUE="${RETURN_VALUE}"
;;
* )
RETURN_VALUE="${image_location}"
esac
}
cros-firmware_src_unpack() {
cros-workon_src_unpack
# Backwards compatibility with the older naming convention.
if [[ -n "${CROS_FIRMWARE_BIOS_ARCHIVE}" ]]; then
CROS_FIRMWARE_MAIN_IMAGE="bcs://${CROS_FIRMWARE_BIOS_ARCHIVE}"
fi
if [[ -n "${CROS_FIRMWARE_EC_ARCHIVE}" ]]; then
CROS_FIRMWARE_EC_IMAGE="bcs://${CROS_FIRMWARE_EC_ARCHIVE}"
fi
# Fetch and unpack the system firmware image
if [[ -n "${CROS_FIRMWARE_MAIN_IMAGE}" ]]; then
if _is_on_bcs "${CROS_FIRMWARE_MAIN_IMAGE}"; then
_bcs_fetch "${CROS_FIRMWARE_MAIN_IMAGE}" \
"${CROS_FIRMWARE_MAIN_SUM}"
_bcs_src_unpack "${CROS_FIRMWARE_MAIN_IMAGE}"
FW_IMAGE_LOCATION="${RETURN_VALUE}"
else
_firmware_image_location "${CROS_FIRMWARE_MAIN_IMAGE}"
FW_IMAGE_LOCATION="${RETURN_VALUE}"
fi
fi
# Fetch and unpack the system RW firmware image
if [[ -n "${CROS_FIRMWARE_MAIN_RW_IMAGE}" ]]; then
if _is_on_bcs "${CROS_FIRMWARE_MAIN_RW_IMAGE}"; then
_bcs_fetch "${CROS_FIRMWARE_MAIN_RW_IMAGE}" \
"${CROS_FIRMWARE_MAIN_RW_SUM}"
_bcs_src_unpack "${CROS_FIRMWARE_MAIN_RW_IMAGE}"
FW_RW_IMAGE_LOCATION="${RETURN_VALUE}"
else
_firmware_image_location "${CROS_FIRMWARE_MAIN_RW_IMAGE}"
FW_RW_IMAGE_LOCATION="${RETURN_VALUE}"
fi
fi
# Fetch and unpack the EC image
if [[ -n "${CROS_FIRMWARE_EC_IMAGE}" ]]; then
if _is_on_bcs "${CROS_FIRMWARE_EC_IMAGE}"; then
_bcs_fetch "${CROS_FIRMWARE_EC_IMAGE}"\
"${CROS_FIRMWARE_EC_SUM}"
_bcs_src_unpack "${CROS_FIRMWARE_EC_IMAGE}"
EC_IMAGE_LOCATION="${RETURN_VALUE}"
else
_firmware_image_location "${CROS_FIRMWARE_EC_IMAGE}"
EC_IMAGE_LOCATION="${RETURN_VALUE}"
fi
fi
# Fetch and unpack BCS resources in CROS_FIRMWARE_EXTRA_LIST
local extra extra_list
# For backward compatibility, ':' is still supported if there's no
# special URL (bcs://, file://).
local tr_source=';:' tr_target='\n\n'
if echo "${CROS_FIRMWARE_EXTRA_LIST}" | grep -q '://'; then
tr_source=';'
tr_target='\n'
fi
extra_list="$(echo "${CROS_FIRMWARE_EXTRA_LIST}" |
tr "$tr_source" "$tr_target")"
for extra in $extra_list; do
if _is_on_bcs "${extra}"; then
_bcs_fetch "${extra}" ""
_bcs_src_unpack "${extra}"
RETURN_VALUE="${RETURN_VALUE}"
else
RETURN_VALUE="${extra}"
fi
EXTRA_LOCATIONS="${EXTRA_LOCATIONS}:${RETURN_VALUE}"
done
EXTRA_LOCATIONS="${EXTRA_LOCATIONS#:}"
}
_add_param() {
local prefix="$1"
local value="$2"
if [[ -n "$value" ]]; then
echo "$prefix '$value' "
fi
}
_add_bool_param() {
local prefix="$1"
local value="$2"
if [[ -n "$value" ]]; then
echo "$prefix "
fi
}
cros-firmware_src_compile() {
local image_cmd="" ext_cmd="" local_image_cmd=""
local root="${ROOT%/}"
# Prepare images
image_cmd+="$(_add_param -b "${FW_IMAGE_LOCATION}")"
image_cmd+="$(_add_param -e "${EC_IMAGE_LOCATION}")"
image_cmd+="$(_add_param -w "${FW_RW_IMAGE_LOCATION}")"
image_cmd+="$(_add_param --ec_version "${CROS_FIRMWARE_EC_VERSION}")"
# Prepare extra commands
ext_cmd+="$(_add_bool_param --unstable "${CROS_FIRMWARE_UNSTABLE}")"
ext_cmd+="$(_add_param --extra "${EXTRA_LOCATIONS}")"
ext_cmd+="$(_add_param --script "${CROS_FIRMWARE_SCRIPT}")"
ext_cmd+="$(_add_param --platform "${CROS_FIRMWARE_PLATFORM}")"
ext_cmd+="$(_add_param --flashrom "${CROS_FIRMWARE_FLASHROM_BINARY}")"
ext_cmd+="$(_add_param --tool_base \
"$root/firmware/utils:$root/usr/sbin:$root/usr/bin")"
# Pack firmware update script!
if [ -z "$image_cmd" ]; then
# Create an empty update script for the generic case
# (no need to update)
einfo "Building empty firmware update script"
echo -n > ${UPDATE_SCRIPT}
else
# create a new script
einfo "Building ${BOARD} firmware updater: $image_cmd $ext_cmd"
./pack_firmware.sh $image_cmd $ext_cmd -o $UPDATE_SCRIPT ||
die "Cannot pack firmware."
fi
# Create local updaters
local local_image_cmd="" output_bom output_file
if use cros_ec; then
local_image_cmd+="-e $root/firmware/ec.bin "
fi
if use bootimage; then
for fw_file in $root/firmware/image-*.bin; do
einfo "Updater for local fw - $fw_file"
output_bom=${fw_file##*/image-}
output_bom=${output_bom%%.bin}
output_file=updater-$output_bom.sh
./pack_firmware.sh -b $fw_file -o $output_file \
$local_image_cmd $ext_cmd ||
die "Cannot pack local firmware."
done
elif use cros_ec; then
# TODO(hungte) Deal with a platform that has only EC and no
# BIOS, which is usually incorrect configuration.
die "Sorry, platform without local BIOS EC is not supported."
fi
}
cros-firmware_src_install() {
# install the main updater program
dosbin $UPDATE_SCRIPT || die "Failed to install update script."
# install factory wipe script
dosbin firmware-factory-wipe
# install updaters for firmware-from-source archive.
if use bootimage; then
exeinto /firmware
doexe updater-*.sh
fi
# The "force_update_firmware" tag file is used by chromeos-installer.
if [ -n "$CROS_FIRMWARE_FORCE_UPDATE" ]; then
insinto /root
touch .force_update_firmware
doins .force_update_firmware
fi
}
EXPORT_FUNCTIONS src_unpack src_compile src_install

View File

@ -0,0 +1,18 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
inherit linux-info
# @FUNCTION: create_temp_build_dir
# @DESCRIPTION:
# Creates a local copy of the kernel build tree. This allows the package to
# rebuild host-specific parts of the tree without violating the sandbox.
create_temp_build_dir() {
get_version || die "Failed to find kernel tree"
local dst_build=${1:-${T}/kernel-build}
cp -pPR "$(readlink -e "${KV_OUT_DIR}")" "${dst_build}" ||
die "Failed to copy kernel tree"
echo "${dst_build}"
}

View File

@ -0,0 +1,369 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="localhost"
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_XZ=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_EFI_PARTITION=y
CONFIG_ARCH_EXYNOS=y
CONFIG_S3C_LOWLEVEL_UART_PORT=3
CONFIG_S3C_ADC=y
CONFIG_S3C24XX_PWM=y
CONFIG_ARCH_EXYNOS5=y
CONFIG_MACH_EXYNOS4_DT=y
CONFIG_MACH_EXYNOS5_DT=y
CONFIG_ARM_THUMBEE=y
CONFIG_SMP=y
CONFIG_VMSPLIT_2G=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
# CONFIG_COMPACTION is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_SECCOMP=y
CONFIG_CC_STACKPROTECTOR=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC3,115200 init=/linuxrc mem=256M"
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_PM_RUNTIME=y
CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_CONG_LP=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_IPV6_SIT=m
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NF_CONNTRACK=y
# CONFIG_NF_CONNTRACK_SECMARK is not set
# CONFIG_NF_CONNTRACK_FTP is not set
# CONFIG_NF_CONNTRACK_IRC is not set
# CONFIG_NF_CONNTRACK_SIP is not set
CONFIG_NF_CT_NETLINK=y
# CONFIG_NETFILTER_XT_TARGET_LOG is not set
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_IP_NF_MANGLE=y
CONFIG_NET_SCHED=y
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBTSDIO=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_ATH3K=y
CONFIG_BT_HCIUART_LL=y
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
CONFIG_BT_MRVL=m
CONFIG_BT_MRVL_SDIO=m
CONFIG_CFG80211=m
CONFIG_NL80211_TESTMODE=y
CONFIG_CFG80211_DEBUGFS=y
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_MAC80211_LEDS=y
CONFIG_MAC80211_DEBUGFS=y
CONFIG_MAC80211_DEBUG_MENU=y
CONFIG_MAC80211_VERBOSE_DEBUG=y
CONFIG_RFKILL=y
CONFIG_RFKILL_GPIO=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_SMC911X=y
CONFIG_SMSC911X=y
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
# CONFIG_USB_NET_NET1080 is not set
CONFIG_USB_NET_MCS7830=m
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_MAC80211_HWSIM=m
CONFIG_ATH_COMMON=m
CONFIG_ATH_DEBUG=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_RT2X00=m
CONFIG_RT2500USB=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
CONFIG_INPUT_FF_MEMLESS=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_MOUSE_PS2 is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_IFORCE=m
CONFIG_JOYSTICK_IFORCE_USB=y
CONFIG_JOYSTICK_XPAD=m
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_RAW=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_HW_RANDOM=y
CONFIG_TCG_TPM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_S3C2410=y
CONFIG_I2C_STUB=m
CONFIG_SPI=y
CONFIG_SPI_BITBANG=m
CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SPIDEV=m
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_BATTERY_SBS=m
CONFIG_CHARGER_GPIO=y
CONFIG_SENSORS_NTC_THERMISTOR=y
CONFIG_SENSORS_S3C=y
CONFIG_SENSORS_S3C_RAW=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MFD_MAX77686=y
CONFIG_MFD_WM8994=y
CONFIG_MFD_TPS65090=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_MAX77686=y
CONFIG_REGULATOR_TPS65090=y
CONFIG_REGULATOR_WM8994=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
CONFIG_EXYNOS_DP=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_7x14=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_HRTIMER=m
CONFIG_SND_DYNAMIC_MINORS=y
# CONFIG_SND_ARM is not set
# CONFIG_SND_SPI is not set
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=y
CONFIG_SND_SOC_SAMSUNG=y
CONFIG_HIDRAW=y
CONFIG_HID_APPLE=m
CONFIG_HID_CHERRY=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_LOGITECH_DJ=m
CONFIG_HID_MAGICMOUSE=m
CONFIG_HID_MULTITOUCH=m
CONFIG_HID_PRIMAX=m
CONFIG_HID_SONY=m
CONFIG_HID_WIIMOTE=m
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_S5P=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_EXYNOS=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_SERIAL=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_QUALCOMM=m
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_SAMSUNG_USBPHY=y
CONFIG_MMC=y
CONFIG_MMC_UNSAFE_RESUME=y
CONFIG_MMC_BLOCK_MINORS=16
CONFIG_MMC_SDHCI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_IDMAC=y
CONFIG_MMC_DW_EXYNOS=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S3C=y
CONFIG_AUXDISPLAY=y
CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=m
CONFIG_SENSORS_TSL2563=m
CONFIG_TSL2583=m
CONFIG_IIO=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=m
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_HFSPLUS_FS=m
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_LZO=y
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_PRINTK_TIME=y
CONFIG_DEFAULT_MESSAGE_LOGLEVEL=5
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_FUNCTION_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_STRICT_DEVMEM=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_DEBUG_S3C_UART3=y
CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=m
CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_SPARC is not set

View File

@ -0,0 +1,450 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_XZ=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_EFI_PARTITION=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
CONFIG_X86_MCE=y
# CONFIG_X86_MCE_AMD is not set
CONFIG_MICROCODE=y
CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_MMAP_NOEXEC_TAINT=0
CONFIG_LOW_MEM_NOTIFY=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
# CONFIG_MTRR_SANITIZER is not set
CONFIG_EFI=y
CONFIG_CC_STACKPROTECTOR=y
CONFIG_HZ_1000=y
# CONFIG_RELOCATABLE is not set
CONFIG_PM_RUNTIME=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE_RTC=y
CONFIG_ACPI_PROCFS=y
CONFIG_ACPI_VIDEO=y
CONFIG_ACPI_DOCK=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_X86_ACPI_CPUFREQ=y
CONFIG_PCI_MMCONFIG=y
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
CONFIG_PCI_MSI=y
CONFIG_PCI_IOAPIC=y
CONFIG_HOTPLUG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_NET_KEY=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_IPV6_SIT=m
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NF_CONNTRACK=y
# CONFIG_NF_CONNTRACK_SECMARK is not set
# CONFIG_NF_CONNTRACK_FTP is not set
# CONFIG_NF_CONNTRACK_IRC is not set
# CONFIG_NF_CONNTRACK_SIP is not set
CONFIG_NF_CT_NETLINK=y
# CONFIG_NETFILTER_XT_TARGET_LOG is not set
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_ULOG=y
# CONFIG_NF_NAT is not set
CONFIG_IP_NF_MANGLE=y
CONFIG_NET_SCHED=y
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
CONFIG_BT_ATH3K=m
CONFIG_CFG80211=m
CONFIG_NL80211_TESTMODE=y
CONFIG_CFG80211_DEBUGFS=y
CONFIG_MAC80211=m
CONFIG_MAC80211_DEBUGFS=y
CONFIG_MAC80211_DEBUG_MENU=y
CONFIG_MAC80211_VERBOSE_DEBUG=y
CONFIG_RFKILL=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_SATA_AHCI=y
CONFIG_ATA_PIIX=y
CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_CHROMEOS=y
CONFIG_NETDEVICES=y
CONFIG_MII=y
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_ALTEON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
CONFIG_TIGON3=m
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
CONFIG_E100=m
CONFIG_E1000=m
CONFIG_E1000E=m
CONFIG_IGBVF=m
CONFIG_JME=m
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
CONFIG_R8169=m
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
CONFIG_PHYLIB=y
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_NET_DM9601=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
CONFIG_USB_NET_MCS7830=m
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
CONFIG_USB_NET_GOBI=m
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_MAC80211_HWSIM=m
CONFIG_ATH_COMMON=m
CONFIG_ATH_DEBUG=y
CONFIG_ATH5K=m
CONFIG_ATH9K=m
CONFIG_ATH9K_DEBUGFS=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
CONFIG_RT2X00=m
CONFIG_RT2800PCI=m
CONFIG_RTL8192CE=m
CONFIG_INPUT_FF_MEMLESS=y
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_APPLETOUCH=m
CONFIG_MOUSE_CYAPA=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_IFORCE=m
CONFIG_JOYSTICK_IFORCE_USB=y
CONFIG_JOYSTICK_XPAD=m
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_CT82C710=m
CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_RAW=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_AMD is not set
# CONFIG_HW_RANDOM_VIA is not set
CONFIG_NVRAM=y
CONFIG_HPET=y
# CONFIG_HPET_MMAP is not set
CONFIG_TCG_TPM=y
CONFIG_TCG_TIS=y
CONFIG_RAMOOPS=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_I801=y
CONFIG_I2C_PIIX4=m
CONFIG_I2C_STUB=m
CONFIG_SPI=y
CONFIG_SPI_BITBANG=m
CONFIG_SPI_SPIDEV=m
CONFIG_GPIOLIB=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_NM10=m
CONFIG_SENSORS_CORETEMP=y
CONFIG_LPC_SCH=m
CONFIG_MEDIA_SUPPORT=m
CONFIG_VIDEO_DEV=m
# CONFIG_IR_NEC_DECODER is not set
# CONFIG_IR_RC5_DECODER is not set
# CONFIG_IR_RC6_DECODER is not set
# CONFIG_IR_JVC_DECODER is not set
# CONFIG_IR_SONY_DECODER is not set
# CONFIG_IR_RC5_SZ_DECODER is not set
# CONFIG_IR_LIRC_CODEC is not set
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
CONFIG_USB_VIDEO_CLASS=m
# CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV is not set
# CONFIG_USB_GSPCA is not set
# CONFIG_RADIO_ADAPTERS is not set
CONFIG_AGP=y
CONFIG_AGP_INTEL=y
# CONFIG_VGA_ARB is not set
CONFIG_DRM=y
CONFIG_DRM_I915=y
CONFIG_DRM_I915_KMS=y
CONFIG_FB_MODE_HELPERS=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CHROMEOS_KEYBOARD=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_HRTIMER=m
CONFIG_SND_HDA_INTEL=m
CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_INPUT_JACK=y
CONFIG_SND_HDA_POWER_SAVE=y
CONFIG_SND_HDA_POWER_SAVE_DEFAULT=15
# CONFIG_SND_SPI is not set
CONFIG_SND_USB_AUDIO=m
CONFIG_HIDRAW=y
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
CONFIG_HID_APPLE=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_MAGICMOUSE=m
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MULTITOUCH=m
CONFIG_HID_PRIMAX=m
CONFIG_HID_SONY=m
CONFIG_HID_WIIMOTE=m
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=m
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_REALTEK=y
CONFIG_USB_LIBUSUAL=y
CONFIG_USB_SERIAL=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_QUALCOMM=m
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_MMC=m
CONFIG_MMC_BLOCK_MINORS=16
CONFIG_MMC_SDHCI=m
CONFIG_MMC_SDHCI_PCI=m
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_RTC_DRV_CMOS=m
CONFIG_VIRTIO_PCI=m
CONFIG_STAGING=y
CONFIG_IIO=m
CONFIG_SENSORS_ISL29018=m
CONFIG_SENSORS_TSL2563=m
CONFIG_TSL2583=m
CONFIG_ZRAM=m
CONFIG_DELL_WMI=m
CONFIG_HP_WMI=m
CONFIG_INTEL_MENLOW=m
CONFIG_ACPI_WMI=m
CONFIG_ACPI_CHROMEOS=y
CONFIG_CHROMEOS_LAPTOP=y
CONFIG_CHROMEOS_RAMOOPS_RAM_START=0x00f00000
CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE=0x00100000
CONFIG_CHROMEOS_RAMOOPS_RECORD_SIZE=0x00020000
CONFIG_CHROMEOS_RAMOOPS_DUMP_OOPS=0x1
CONFIG_INTEL_IOMMU=y
CONFIG_GOOGLE_FIRMWARE=y
CONFIG_GOOGLE_MEMCONSOLE=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_HFSPLUS_FS=m
CONFIG_PSTORE=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_PRINTK_TIME=y
CONFIG_DEFAULT_MESSAGE_LOGLEVEL=5
# CONFIG_ENABLE_WARN_DEPRECATED is not set
CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_FUNCTION_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_STRICT_DEVMEM=y
CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_IO_DELAY_0XED=y
CONFIG_DEBUG_BOOT_PARAMS=y
CONFIG_OPTIMIZE_INLINING=y
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_YAMA=y
CONFIG_SECURITY_CHROMIUMOS=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_NI_INTEL=m
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=m
CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set

View File

@ -0,0 +1,464 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_XZ=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_EFI_PARTITION=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
CONFIG_X86_MCE=y
# CONFIG_X86_MCE_AMD is not set
CONFIG_MICROCODE=y
CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_MMAP_NOEXEC_TAINT=0
CONFIG_LOW_MEM_NOTIFY=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
# CONFIG_MTRR_SANITIZER is not set
CONFIG_EFI=y
CONFIG_CC_STACKPROTECTOR=y
CONFIG_HZ_1000=y
# CONFIG_RELOCATABLE is not set
# CONFIG_COMPAT_VDSO is not set
CONFIG_PM_RUNTIME=y
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE_RTC=y
CONFIG_ACPI_PROCFS=y
CONFIG_ACPI_VIDEO=y
CONFIG_ACPI_DOCK=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_X86_ACPI_CPUFREQ=y
CONFIG_PCI_MMCONFIG=y
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
CONFIG_PCI_MSI=y
CONFIG_PCI_IOAPIC=y
CONFIG_HOTPLUG_PCI=y
CONFIG_IA32_EMULATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_NET_KEY=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_IPV6_SIT=m
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
CONFIG_NF_CONNTRACK=y
# CONFIG_NF_CONNTRACK_SECMARK is not set
# CONFIG_NF_CONNTRACK_FTP is not set
# CONFIG_NF_CONNTRACK_IRC is not set
# CONFIG_NF_CONNTRACK_SIP is not set
CONFIG_NF_CT_NETLINK=y
# CONFIG_NETFILTER_XT_TARGET_LOG is not set
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_ULOG=y
# CONFIG_NF_NAT is not set
CONFIG_IP_NF_MANGLE=y
CONFIG_NET_SCHED=y
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
CONFIG_BT_ATH3K=m
CONFIG_CFG80211=m
CONFIG_NL80211_TESTMODE=y
CONFIG_CFG80211_DEBUGFS=y
CONFIG_MAC80211=m
CONFIG_MAC80211_DEBUGFS=y
CONFIG_MAC80211_DEBUG_MENU=y
CONFIG_MAC80211_VERBOSE_DEBUG=y
CONFIG_RFKILL=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_SATA_AHCI=y
CONFIG_ATA_PIIX=y
CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_CHROMEOS=y
CONFIG_NETDEVICES=y
CONFIG_MII=y
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_ALTEON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
CONFIG_TIGON3=m
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
CONFIG_E100=m
CONFIG_E1000=m
CONFIG_E1000E=m
CONFIG_IGBVF=m
CONFIG_JME=m
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
CONFIG_R8169=m
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
CONFIG_PHYLIB=y
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_NET_DM9601=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
CONFIG_USB_NET_MCS7830=m
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
CONFIG_USB_HSO=y
CONFIG_USB_NET_GOBI=m
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_MAC80211_HWSIM=m
CONFIG_ATH_COMMON=m
CONFIG_ATH_DEBUG=y
CONFIG_ATH5K=m
CONFIG_ATH9K=m
CONFIG_ATH9K_DEBUGFS=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
CONFIG_IWLWIFI=m
CONFIG_IWL4965=m
CONFIG_IWL3945=m
CONFIG_RT2X00=m
CONFIG_RT2800PCI=m
# CONFIG_RT2800PCI_RT33XX is not set
# CONFIG_RT2800PCI_RT35XX is not set
# CONFIG_RT2800PCI_RT53XX is not set
CONFIG_RTL8192CE=m
# CONFIG_RTLWIFI_DEBUG is not set
CONFIG_INPUT_FF_MEMLESS=y
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_APPLETOUCH=m
CONFIG_MOUSE_CYAPA=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_IFORCE=m
CONFIG_JOYSTICK_IFORCE_USB=y
CONFIG_JOYSTICK_XPAD=m
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_CT82C710=m
CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_RAW=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_AMD is not set
# CONFIG_HW_RANDOM_VIA is not set
CONFIG_NVRAM=y
CONFIG_HPET=y
# CONFIG_HPET_MMAP is not set
CONFIG_TCG_TPM=y
CONFIG_TCG_TIS=y
CONFIG_RAMOOPS=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_I801=y
CONFIG_I2C_PIIX4=m
CONFIG_I2C_STUB=m
CONFIG_SPI=y
CONFIG_SPI_BITBANG=m
CONFIG_SPI_SPIDEV=m
CONFIG_GPIOLIB=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_NM10=m
CONFIG_SENSORS_CORETEMP=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
CONFIG_LPC_SCH=m
CONFIG_MEDIA_SUPPORT=m
CONFIG_VIDEO_DEV=m
# CONFIG_IR_NEC_DECODER is not set
# CONFIG_IR_RC5_DECODER is not set
# CONFIG_IR_RC6_DECODER is not set
# CONFIG_IR_JVC_DECODER is not set
# CONFIG_IR_SONY_DECODER is not set
# CONFIG_IR_RC5_SZ_DECODER is not set
# CONFIG_IR_LIRC_CODEC is not set
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
CONFIG_USB_VIDEO_CLASS=m
# CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV is not set
# CONFIG_USB_GSPCA is not set
# CONFIG_V4L_PCI_DRIVERS is not set
# CONFIG_RADIO_ADAPTERS is not set
CONFIG_AGP=y
CONFIG_AGP_INTEL=y
# CONFIG_VGA_ARB is not set
CONFIG_DRM=y
CONFIG_DRM_I915=y
CONFIG_DRM_I915_KMS=y
CONFIG_FB_MODE_HELPERS=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CHROMEOS_KEYBOARD=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_HRTIMER=m
CONFIG_SND_HDA_INTEL=m
CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_INPUT_JACK=y
CONFIG_SND_HDA_POWER_SAVE=y
CONFIG_SND_HDA_POWER_SAVE_DEFAULT=15
# CONFIG_SND_SPI is not set
CONFIG_SND_USB_AUDIO=m
CONFIG_HIDRAW=y
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
CONFIG_HID_APPLE=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_MAGICMOUSE=m
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MULTITOUCH=m
CONFIG_HID_PRIMAX=m
CONFIG_HID_SONY=m
CONFIG_HID_WIIMOTE=m
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=m
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_REALTEK=y
CONFIG_USB_LIBUSUAL=y
CONFIG_USB_SERIAL=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_QUALCOMM=m
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_MMC=m
CONFIG_MMC_BLOCK_MINORS=16
CONFIG_MMC_SDHCI=m
CONFIG_MMC_SDHCI_PCI=m
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_RTC_DRV_CMOS=m
CONFIG_VIRTIO_PCI=m
CONFIG_STAGING=y
CONFIG_IIO=m
CONFIG_SENSORS_ISL29018=m
CONFIG_SENSORS_TSL2563=m
CONFIG_TSL2583=m
CONFIG_ZRAM=m
CONFIG_DELL_WMI=m
CONFIG_HP_WMI=m
CONFIG_INTEL_MENLOW=m
CONFIG_ACPI_WMI=m
CONFIG_ACPI_CHROMEOS=y
CONFIG_CHROMEOS_LAPTOP=y
CONFIG_CHROMEOS_RAMOOPS_RAM_START=0x00f00000
CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE=0x00100000
CONFIG_CHROMEOS_RAMOOPS_RECORD_SIZE=0x00020000
CONFIG_CHROMEOS_RAMOOPS_DUMP_OOPS=0x1
CONFIG_GOOGLE_FIRMWARE=y
CONFIG_GOOGLE_MEMCONSOLE=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_HFSPLUS_FS=m
CONFIG_PSTORE=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_PRINTK_TIME=y
CONFIG_DEFAULT_MESSAGE_LOGLEVEL=5
# CONFIG_ENABLE_WARN_DEPRECATED is not set
CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_FUNCTION_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_STRICT_DEVMEM=y
CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_IO_DELAY_0XED=y
CONFIG_DEBUG_BOOT_PARAMS=y
CONFIG_OPTIMIZE_INLINING=y
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_YAMA=y
CONFIG_SECURITY_CHROMIUMOS=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_NI_INTEL=m
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=m
CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set

View File

@ -0,0 +1 @@
i386_defconfig

View File

@ -0,0 +1,659 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
inherit binutils-funcs cros-board toolchain-funcs
HOMEPAGE="http://www.chromium.org/"
LICENSE="GPL-2"
SLOT="0"
DEPEND="sys-apps/debianutils
initramfs? ( chromeos-base/chromeos-initramfs )
"
IUSE="-device_tree -kernel_sources"
STRIP_MASK="/usr/lib/debug/boot/vmlinux"
# Build out-of-tree and incremental by default, but allow an ebuild inheriting
# this eclass to explicitly build in-tree.
: ${CROS_WORKON_OUTOFTREE_BUILD:=1}
: ${CROS_WORKON_INCREMENTAL_BUILD:=1}
# Config fragments selected by USE flags
# ...fragments will have the following variables substitutions
# applied later (needs to be done later since these values
# aren't reliable when used in a global context like this):
# %ROOT% => ${ROOT}
CONFIG_FRAGMENTS=(
blkdevram
ca0132
cifs
debug
fbconsole
gdmwimax
gobi
highmem
i2cdev
initramfs
kvm
nfs
pcserial
qmi
realtekpstor
samsung_serial
systemtap
tpm
vfat
)
blkdevram_desc="ram block device"
blkdevram_config="
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384
"
ca0132_desc="CA0132 ALSA codec"
ca0132_config="
CONFIG_SND_HDA_CODEC_CA0132=y
CONFIG_SND_HDA_DSP_LOADER=y
"
cifs_desc="Samba/CIFS Support"
cifs_config="
CONFIG_CIFS=m
"
debug_desc="debug settings"
debug_config="
CONFIG_DEBUG_INFO=y
"
fbconsole_desc="framebuffer console"
fbconsole_config="
CONFIG_FRAMEBUFFER_CONSOLE=y
"
gdmwimax_desc="GCT GDM72xx WiMAX support"
gdmwimax_config="
CONFIG_WIMAX_GDM72XX=m
CONFIG_WIMAX_GDM72XX_USB=y
CONFIG_WIMAX_GDM72XX_USB_PM=y
"
gobi_desc="Qualcomm Gobi modem driver"
gobi_config="
CONFIG_USB_NET_GOBI=m
"
highmem_desc="highmem"
highmem_config="
CONFIG_HIGHMEM64G=y
"
i2cdev_desc="I2C device interface"
i2cdev_config="
CONFIG_I2C_CHARDEV=y
"
tpm_desc="TPM support"
tpm_config="
CONFIG_TCG_TPM=y
CONFIG_TCG_TIS=y
"
initramfs_desc="initramfs"
initramfs_config="
CONFIG_INITRAMFS_SOURCE=\"%ROOT%/var/lib/misc/initramfs.cpio.xz\"
CONFIG_INITRAMFS_COMPRESSION_XZ=y
"
vfat_desc="vfat"
vfat_config="
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_FAT_FS=y
CONFIG_VFAT_FS=y
"
kvm_desc="KVM"
kvm_config="
CONFIG_HAVE_KVM=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_EVENTFD=y
CONFIG_KVM_APIC_ARCHITECTURE=y
CONFIG_KVM_MMIO=y
CONFIG_KVM_ASYNC_PF=y
CONFIG_KVM=m
CONFIG_KVM_INTEL=m
# CONFIG_KVM_AMD is not set
# CONFIG_KVM_MMU_AUDIT is not set
CONFIG_VIRTIO=m
CONFIG_VIRTIO_BLK=m
CONFIG_VIRTIO_NET=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_VIRTIO_RING=m
CONFIG_VIRTIO_PCI=m
"
nfs_desc="NFS"
nfs_config="
CONFIG_USB_NET_AX8817X=y
CONFIG_DNOTIFY=y
CONFIG_DNS_RESOLVER=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_NFSD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_NFS_FS=y
CONFIG_NFS_USE_KERNEL_DNS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_RPCSEC_GSS_KRB5=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
CONFIG_USB_USBNET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
"
pcserial_desc="PC serial"
pcserial_config="
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_PCI=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
"
qmi_desc="QMI WWAN driver"
qmi_config="
CONFIG_USB_NET_QMI_WWAN=m
"
samsung_serial_desc="Samsung serialport"
samsung_serial_config="
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
"
realtekpstor_desc="Realtek PCI card reader"
realtekpstor_config="
CONFIG_RTS_PSTOR=m
"
systemtap_desc="systemtap support"
systemtap_config="
CONFIG_KPROBES=y
CONFIG_DEBUG_INFO=y
"
# Add all config fragments as off by default
IUSE="${IUSE} ${CONFIG_FRAGMENTS[@]}"
# If an overlay has eclass overrides, but doesn't actually override this
# eclass, we'll have ECLASSDIR pointing to the active overlay's
# eclass/ dir, but this eclass is still in the main chromiumos tree. So
# add a check to locate the cros-kernel/ regardless of what's going on.
ECLASSDIR_LOCAL=${BASH_SOURCE[0]%/*}
defconfig_dir() {
local d="${ECLASSDIR}/cros-kernel"
if [[ ! -d ${d} ]] ; then
d="${ECLASSDIR_LOCAL}/cros-kernel"
fi
echo "${d}"
}
# @FUNCTION: kernelversion
# @DESCRIPTION:
# Returns the current compiled kernel version.
# Note: Only valid after src_configure has finished running.
kernelversion() {
kmake -s --no-print-directory kernelversion
}
# @FUNCTION: install_kernel_sources
# @DESCRIPTION:
# Installs the kernel sources into ${D}/usr/src/${P} and fixes symlinks.
# The package must have already installed a directory under ${D}/lib/modules.
install_kernel_sources() {
local version=$(kernelversion)
local dest_modules_dir=lib/modules/${version}
local dest_source_dir=usr/src/${P}
local dest_build_dir=${dest_source_dir}/build
# Fix symlinks in lib/modules
ln -sfvT "../../../${dest_build_dir}" \
"${D}/${dest_modules_dir}/build" || die
ln -sfvT "../../../${dest_source_dir}" \
"${D}/${dest_modules_dir}/source" || die
einfo "Installing kernel source tree"
dodir "${dest_source_dir}"
local f
for f in "${S}"/*; do
[[ "$f" == "${S}/build" ]] && continue
cp -pPR "${f}" "${D}/${dest_source_dir}" ||
die "Failed to copy kernel source tree"
done
dosym "${P}" "/usr/src/linux"
einfo "Installing kernel build tree"
dodir "${dest_build_dir}"
cp -pPR "$(cros-workon_get_build_dir)"/{.config,.version,Makefile,Module.symvers,include} \
"${D}/${dest_build_dir}" || die
# Modify Makefile to use the ROOT environment variable if defined.
# This path needs to be absolute so that the build directory will
# still work if copied elsewhere.
sed -i -e "s@${S}@\$(ROOT)/${dest_source_dir}@" \
"${D}/${dest_build_dir}/Makefile" || die
}
get_build_cfg() {
echo "$(cros-workon_get_build_dir)/.config"
}
get_build_arch() {
if [ "${ARCH}" = "arm" ] ; then
case "${CHROMEOS_KERNEL_SPLITCONFIG}" in
*tegra*)
echo "tegra"
;;
*exynos*)
echo "exynos5"
;;
*)
echo "arm"
;;
esac
else
echo $(tc-arch-kernel)
fi
}
# @FUNCTION: cros_chkconfig_present
# @USAGE: <option to check config for>
# @DESCRIPTION:
# Returns success of the provided option is present in the build config.
cros_chkconfig_present() {
local config=$1
grep -q "^CONFIG_$1=[ym]$" "$(get_build_cfg)"
}
cros-kernel2_pkg_setup() {
# This is needed for running src_test(). The kernel code will need to
# be rebuilt with `make check`. If incremental build were enabled,
# `make check` would have nothing left to build.
use test && export CROS_WORKON_INCREMENTAL_BUILD=0
cros-workon_pkg_setup
}
# @FUNCTION: emit_its_script
# @USAGE: <output file> <boot_dir> <dtb_dir> <device trees>
# @DESCRIPTION:
# Emits the its script used to build the u-boot fitImage kernel binary
# that contains the kernel as well as device trees used when booting
# it.
emit_its_script() {
local iter=1
local its_out=${1}
shift
local boot_dir=${1}
shift
local dtb_dir=${1}
shift
cat > "${its_out}" <<-EOF || die
/dts-v1/;
/ {
description = "Chrome OS kernel image with one or more FDT blobs";
#address-cells = <1>;
images {
kernel@1 {
data = /incbin/("${boot_dir}/zImage");
type = "$(get_kernel_type)";
arch = "arm";
os = "linux";
compression = "none";
load = <$(get_load_addr)>;
entry = <$(get_load_addr)>;
};
EOF
local dtb
for dtb in "$@" ; do
cat >> "${its_out}" <<-EOF || die
fdt@${iter} {
description = "$(basename ${dtb})";
data = /incbin/("${dtb_dir}/${dtb}");
type = "flat_dt";
arch = "arm";
compression = "none";
hash@1 {
algo = "sha1";
};
};
EOF
((++iter))
done
cat <<-EOF >>"${its_script}"
};
configurations {
default = "conf@1";
EOF
local i
for i in $(seq 1 $((iter-1))) ; do
cat >> "${its_out}" <<-EOF || die
conf@${i} {
kernel = "kernel@1";
fdt = "fdt@${i}";
};
EOF
done
echo " };" >> "${its_out}"
echo "};" >> "${its_out}"
}
kmake() {
# Allow override of kernel arch.
local kernel_arch=${CHROMEOS_KERNEL_ARCH:-$(tc-arch-kernel)}
local cross=${CHOST}-
# Hack for using 64-bit kernel with 32-bit user-space
if [[ "${ABI:-${ARCH}}" != "amd64" && "${kernel_arch}" == "x86_64" ]]; then
cross=x86_64-cros-linux-gnu-
else
# TODO(raymes): Force GNU ld over gold. There are still some
# gold issues to iron out. See: 13209.
tc-export LD CC CXX
set -- \
LD="$(get_binutils_path_ld)/ld" \
CC="${CC} -B$(get_binutils_path_ld)" \
CXX="${CXX} -B$(get_binutils_path_ld)" \
"$@"
fi
cw_emake \
ARCH=${kernel_arch} \
LDFLAGS="$(raw-ldflags)" \
CROSS_COMPILE="${cross}" \
O="$(cros-workon_get_build_dir)" \
"$@"
}
cros-kernel2_src_prepare() {
cros-workon_src_prepare
}
cros-kernel2_src_configure() {
# Use a single or split kernel config as specified in the board or variant
# make.conf overlay. Default to the arch specific split config if an
# overlay or variant does not set either CHROMEOS_KERNEL_CONFIG or
# CHROMEOS_KERNEL_SPLITCONFIG. CHROMEOS_KERNEL_CONFIG is set relative
# to the root of the kernel source tree.
local config
local cfgarch="$(get_build_arch)"
if [ -n "${CHROMEOS_KERNEL_CONFIG}" ]; then
config="${S}/${CHROMEOS_KERNEL_CONFIG}"
else
config=${CHROMEOS_KERNEL_SPLITCONFIG:-"chromiumos-${cfgarch}"}
fi
elog "Using kernel config: ${config}"
# Keep a handle on the old .config in case it hasn't changed. This way
# we can keep the old timestamp which will avoid regenerating stuff that
# hasn't actually changed.
local temp_config="${T}/old-kernel-config"
if [[ -e $(get_build_cfg) ]] ; then
cp -a "$(get_build_cfg)" "${temp_config}"
else
rm -f "${temp_config}"
fi
if [ -n "${CHROMEOS_KERNEL_CONFIG}" ]; then
cp -f "${config}" "$(get_build_cfg)" || die
else
if [ -e chromeos/scripts/prepareconfig ] ; then
chromeos/scripts/prepareconfig ${config} \
"$(get_build_cfg)" || die
else
config="$(defconfig_dir)/${cfgarch}_defconfig"
ewarn "Can't prepareconfig, falling back to default " \
"${config}"
cp "${config}" "$(get_build_cfg)" || die
fi
fi
local fragment
for fragment in ${CONFIG_FRAGMENTS[@]}; do
use ${fragment} || continue
local msg="${fragment}_desc"
local config="${fragment}_config"
elog " - adding ${!msg} config"
echo "${!config}" | \
sed -e "s|%ROOT%|${ROOT}|g" \
>> "$(get_build_cfg)" || die
done
# Use default for any options not explitly set in splitconfig
yes "" | kmake oldconfig
# Restore the old config if it is unchanged.
if cmp -s "$(get_build_cfg)" "${temp_config}" ; then
touch -r "${temp_config}" "$(get_build_cfg)"
fi
}
# @FUNCTION: get_dtb_name
# @USAGE: <dtb_dir>
# @DESCRIPTION:
# Get the name(s) of the device tree binary file(s) to include.
get_dtb_name() {
local dtb_dir=${1}
local board_with_variant=$(get_current_board_with_variant)
# Do a simple mapping for device trees whose names don't match
# the board_with_variant format; default to just the
# board_with_variant format.
case "${board_with_variant}" in
(tegra2_dev-board)
echo tegra-harmony.dtb
;;
(tegra2_seaboard)
echo tegra-seaboard.dtb
;;
tegra*)
echo ${board_with_variant}.dtb
;;
*)
local f
for f in ${dtb_dir}/*.dtb ; do
basename ${f}
done
;;
esac
}
# All current tegra boards ship with an u-boot that won't allow
# use of kernel_noload. Because of this, keep using the traditional
# kernel type for those. This means kernel_type kernel and regular
# load and entry point addresses.
get_kernel_type() {
case "$(get_current_board_with_variant)" in
tegra*)
echo kernel
;;
*)
echo kernel_noload
;;
esac
}
get_load_addr() {
case "$(get_current_board_with_variant)" in
tegra*)
echo 0x03000000
;;
*)
echo 0
;;
esac
}
cros-kernel2_src_compile() {
local build_targets=() # use make default target
if use arm; then
build_targets=(
"uImage"
$(cros_chkconfig_present MODULES && echo "modules")
)
fi
local src_dir="$(cros-workon_get_build_dir)/source"
local kernel_arch=${CHROMEOS_KERNEL_ARCH:-$(tc-arch-kernel)}
SMATCH_ERROR_FILE="${src_dir}/chromeos/check/smatch_errors.log"
if use test && [[ -e "${SMATCH_ERROR_FILE}" ]]; then
local make_check_cmd="smatch -p=kernel"
local test_options=(
CHECK="${make_check_cmd}"
C=1
)
SMATCH_LOG_FILE="$(cros-workon_get_build_dir)/make.log"
# The path names in the log file are build-dependent. Strip out
# the part of the path before "kernel/files" and retains what
# comes after it: the file, line number, and error message.
kmake -k ${build_targets[@]} "${test_options[@]}" |& \
tee "${SMATCH_LOG_FILE}"
else
kmake -k ${build_targets[@]}
fi
if use device_tree; then
kmake -k dtbs
fi
}
cros-kernel2_src_test() {
[[ -e ${SMATCH_ERROR_FILE} ]] || \
die "smatch whitelist file ${SMATCH_ERROR_FILE} not found!"
[[ -e ${SMATCH_LOG_FILE} ]] || \
die "Log file from src_compile() ${SMATCH_LOG_FILE} not found!"
grep -w error: "${SMATCH_LOG_FILE}" | grep -o "kernel/files/.*" \
| sed s:"kernel/files/"::g > "${SMATCH_LOG_FILE}.errors"
local num_errors=$(wc -l < "${SMATCH_LOG_FILE}.errors")
local num_warnings=$(egrep -wc "warn:|warning:" "${SMATCH_LOG_FILE}")
einfo "smatch found ${num_errors} errors and ${num_warnings} warnings."
# Create a version of the error database that doesn't have line numbers,
# since line numbers will shift as code is added or removed.
local build_dir="$(cros-workon_get_build_dir)"
local no_line_numbers_file="${build_dir}/no_line_numbers.log"
sed -r "s/(:[0-9]+){1,2}//" "${SMATCH_ERROR_FILE}" > \
"${no_line_numbers_file}"
# For every smatch error that came up during the build, check if it is
# in the error database file.
local num_unknown_errors=0
local line=""
while read line; do
local no_line_num=$(echo "${line}" | \
sed -r "s/(:[0-9]+){1,2}//")
if ! fgrep -q "${no_line_num}" "${no_line_numbers_file}"; then
eerror "Non-whitelisted error found: \"${line}\""
: $(( ++num_unknown_errors ))
fi
done < "${SMATCH_LOG_FILE}.errors"
[[ ${num_unknown_errors} -eq 0 ]] || \
die "smatch found ${num_unknown_errors} unknown errors."
}
cros-kernel2_src_install() {
dodir /boot
kmake INSTALL_PATH="${D}/boot" install
if cros_chkconfig_present MODULES; then
kmake INSTALL_MOD_PATH="${D}" modules_install
fi
kmake INSTALL_MOD_PATH="${D}" firmware_install
local version=$(kernelversion)
if use arm; then
local boot_dir="$(cros-workon_get_build_dir)/arch/${ARCH}/boot"
local kernel_bin="${D}/boot/vmlinuz-${version}"
local zimage_bin="${D}/boot/zImage-${version}"
local dtb_dir="${boot_dir}"
# Newer kernels (after linux-next 12/3/12) put dtbs in the dts
# dir. Use that if we we find no dtbs directly in boot_dir.
# Note that we try boot_dir first since the newer kernel will
# actually rm ${boot_dir}/*.dtb so we'll have no stale files.
if ! ls "${dtb_dir}"/*.dtb &> /dev/null; then
dtb_dir="${boot_dir}/dts"
fi
if use device_tree; then
local its_script="$(cros-workon_get_build_dir)/its_script"
emit_its_script "${its_script}" "${boot_dir}" \
"${dtb_dir}" $(get_dtb_name "${dtb_dir}")
mkimage -f "${its_script}" "${kernel_bin}" || die
else
cp -a "${boot_dir}/uImage" "${kernel_bin}" || die
fi
cp -a "${boot_dir}/zImage" "${zimage_bin}" || die
# TODO(vbendeb): remove the below .uimg link creation code
# after the build scripts have been modified to use the base
# image name.
cd $(dirname "${kernel_bin}")
ln -sf $(basename "${kernel_bin}") vmlinux.uimg || die
ln -sf $(basename "${zimage_bin}") zImage || die
fi
if [ ! -e "${D}/boot/vmlinuz" ]; then
ln -sf "vmlinuz-${version}" "${D}/boot/vmlinuz" || die
fi
# Check the size of kernel image and issue warning when image size is near
# the limit.
local kernel_image_size=$(stat -c '%s' -L "${D}"/boot/vmlinuz)
einfo "Kernel image size is ${kernel_image_size} bytes."
if [[ ${kernel_image_size} -gt $((8 * 1024 * 1024)) ]]; then
die "Kernel image is larger than 8 MB."
elif [[ ${kernel_image_size} -gt $((7 * 1024 * 1024)) ]]; then
ewarn "Kernel image is larger than 7 MB. Limit is 8 MB."
fi
# Install uncompressed kernel for debugging purposes.
insinto /usr/lib/debug/boot
doins "$(cros-workon_get_build_dir)/vmlinux"
if use kernel_sources; then
install_kernel_sources
fi
}
EXPORT_FUNCTIONS pkg_setup src_prepare src_configure src_compile src_test src_install

View File

@ -0,0 +1,31 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Eclass for use by ebuilds that need to know the debug serial port.
#
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
SERIAL_USE_PREFIX="serial_use_"
ALL_SERIALPORTS=(
ttyAMA{0..5}
ttyO{0..5}
ttyS{0..5}
ttySAC{0..5}
)
IUSE=${ALL_SERIALPORTS[@]/#/${SERIAL_USE_PREFIX}}
# Echo the current serial port name
get_serial_name() {
local item
for item in "${ALL_SERIALPORTS[@]}"; do
if use ${SERIAL_USE_PREFIX}${item}; then
echo ${item}
return
fi
done
die "Unable to determine current serial port."
}

View File

@ -0,0 +1,538 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# @ECLASS: cros-workon.eclass
# @MAINTAINER:
# ChromiumOS Build Team
# @BUGREPORTS:
# Please report bugs via http://crosbug.com/new (with label Area-Build)
# @VCSURL: http://git.chromium.org/gitweb/?p=chromiumos/overlays/chromiumos-overlay.git;a=blob;f=eclass/@ECLASS@
# @BLURB: helper eclass for building ChromiumOS packages from git
# @DESCRIPTION:
# A lot of ChromiumOS packages (src/platform/ and src/third_party/) are
# managed in the same way. You've got a git tree and you want to build
# it. This automates a lot of that common stuff in one place.
# Array variables. All of the following variables can contain multiple items
# with the restriction being that all of them have to have either:
# - the same number of items globally
# - one item as default for all
# - no items as the cros-workon default
# The exception is CROS_WORKON_PROJECT which has to have all items specified.
ARRAY_VARIABLES=( CROS_WORKON_{SUBDIR,REPO,PROJECT,LOCALNAME,DESTDIR,COMMIT,TREE} )
# @ECLASS-VARIABLE: CROS_WORKON_SUBDIR
# @DESCRIPTION:
# Sub-directory which is added to create full source checkout path
: ${CROS_WORKON_SUBDIR:=}
# @ECLASS-VARIABLE: CROS_WORKON_REPO
# @DESCRIPTION:
# Git URL which is prefixed to CROS_WORKON_PROJECT
: ${CROS_WORKON_REPO:=http://git.chromium.org}
# @ECLASS-VARIABLE: CROS_WORKON_PROJECT
# @DESCRIPTION:
# Git project name which is suffixed to CROS_WORKON_REPO
: ${CROS_WORKON_PROJECT:=${PN}}
# @ECLASS-VARIABLE: CROS_WORKON_LOCALNAME
# @DESCRIPTION:
# Temporary local name in third_party
: ${CROS_WORKON_LOCALNAME:=${PN}}
# @ECLASS-VARIABLE: CROS_WORKON_DESTDIR
# @DESCRIPTION:
# Destination directory in ${WORKDIR} for checkout.
# Note that the default is ${S}, but is only referenced in src_unpack for
# ebuilds that would like to override it.
: ${CROS_WORKON_DESTDIR:=}
# @ECLASS-VARIABLE: CROS_WORKON_COMMIT
# @DESCRIPTION:
# Git commit to checkout to
: ${CROS_WORKON_COMMIT:=master}
# @ECLASS-VARIABLE: CROS_WORKON_TREE
# @DESCRIPTION:
# SHA1 of the contents of the repository. This is used for verifying the
# correctness of prebuilts. Unlike the commit hash, this SHA1 is unaffected
# by the history of the repository, or by commit messages.
: ${CROS_WORKON_TREE:=}
# Scalar variables. These variables modify the behaviour of the eclass.
# @ECLASS-VARIABLE: CROS_WORKON_SUBDIRS_TO_COPY
# @DESCRIPTION:
# Make cros-workon operate exclusively with the subtrees given by this array.
# NOTE: This only speeds up local_cp builds. Inplace/local_git builds are unaffected.
# It will also be disabled by using project arrays, rather than a single project.
: ${CROS_WORKON_SUBDIRS_TO_COPY:=/}
# @ECLASS-VARIABLE: CROS_WORKON_SUBDIRS_BLACKLIST
# @DESCRIPTION:
# Array of directories in the source tree to explicitly ignore and not even copy
# them over. This is intended, for example, for blocking infamous bloated and
# generated content that is unwanted during the build.
: ${CROS_WORKON_SUBDIRS_BLACKLIST:=}
# @ECLASS-VARIABLE: CROS_WORKON_SRCROOT
# @DESCRIPTION:
# Directory where chrome third party and platform sources are located (formerly CHROMEOS_ROOT)
: ${CROS_WORKON_SRCROOT:=}
# @ECLASS-VARIABLE: CROS_WORKON_INPLACE
# @DESCRIPTION:
# Build the sources in place. Don't copy them to a temp dir.
: ${CROS_WORKON_INPLACE:=}
# @ECLASS-VARIABLE: CROS_WORKON_USE_VCSID
# @DESCRIPTION:
# Export VCSID into the project
: ${CROS_WORKON_USE_VCSID:=}
# @ECLASS-VARIABLE: CROS_WORKON_GIT_SUFFIX
# @DESCRIPTION:
# The git eclass does not do locking on its repo. That means
# multiple ebuilds that use the same git repo cannot safely be
# emerged at the same time. Until we can get that sorted out,
# allow ebuilds that know they'll conflict to declare a unique
# path for storing the local clone.
: ${CROS_WORKON_GIT_SUFFIX:=}
# @ECLASS-VARIABLE: CROS_WORKON_OUTOFTREE_BUILD
# @DESCRIPTION:
# Do not copy the source tree to $S; instead set $S to the
# source tree and store compiled objects and build state
# in $WORKDIR. The ebuild is responsible for ensuring
# the build output goes to $WORKDIR, e.g. setting
# O=${WORKDIR}/${P}/build/${board} when compiling the kernel.
: ${CROS_WORKON_OUTOFTREE_BUILD:=}
# @ECLASS-VARIABLE: CROS_WORKON_INCREMENTAL_BUILD
# @DESCRIPTION:
# If set to "1", store output objects in a location that is not wiped
# between emerges. If disabled, objects will be written to ${WORKDIR}
# like normal.
: ${CROS_WORKON_INCREMENTAL_BUILD:=}
# Join the tree commits to produce a unique identifier
CROS_WORKON_TREE_COMPOSITE=$(IFS="_"; echo "${CROS_WORKON_TREE[*]}")
IUSE="cros_workon_tree_$CROS_WORKON_TREE_COMPOSITE"
inherit git-2 flag-o-matic toolchain-funcs
# Sanitize all variables, autocomplete where necessary.
# This function possibly modifies all CROS_WORKON_ variables inplace. It also
# provides a global project_count variable which contains the number of
# projects.
array_vars_autocomplete() {
# NOTE: This one variable has to have all values explicitly filled in.
project_count=${#CROS_WORKON_PROJECT[@]}
# No project_count is really bad.
[ ${project_count} -eq 0 ] && die "Must have at least one CROS_WORKON_PROJECT"
# For one project, defaults will suffice.
[ ${project_count} -eq 1 ] && return
[[ ${CROS_WORKON_OUTOFTREE_BUILD} == "1" ]] && die "Out of Tree Build not compatible with multi-project ebuilds"
local count var
for var in "${ARRAY_VARIABLES[@]}"; do
eval count=\${#${var}\[@\]}
if [[ ${count} -ne ${project_count} ]] && [[ ${count} -ne 1 ]]; then
die "${var} has ${count} projects. ${project_count} or one default expected."
fi
# Invariably, ${project_count} is at least 2 here. All variables also either
# have all items or the first serves as default (or isn't needed if
# empty). By looking at the second item, determine if we need to
# autocomplete.
local i
if [[ ${count} -ne ${project_count} ]]; then
for (( i = 1; i < project_count; ++i )); do
eval ${var}\[i\]=\${${var}\[0\]}
done
fi
eval einfo "${var}: \${${var}[@]}"
done
}
# Calculate path where code should be checked out.
# Result passed through global variable "path" to preserve proper array quoting.
get_paths() {
local pathbase
if [[ -n "${CROS_WORKON_SRCROOT}" ]]; then
pathbase="${CROS_WORKON_SRCROOT}"
elif [[ -n "${CHROMEOS_ROOT}" ]]; then
pathbase="${CHROMEOS_ROOT}"
else
# HACK: Figure out the missing legacy path for now
# this only happens in amd64 chroot with sudo emerge.
pathbase="/home/${SUDO_USER}/trunk"
fi
if [[ "${CATEGORY}" == "chromeos-base" ]] ; then
pathbase+=/src/platform
else
pathbase+=/src/third_party
fi
path=()
local pathelement i
for (( i = 0; i < project_count; ++i )); do
pathelement="${pathbase}/${CROS_WORKON_LOCALNAME[i]}"
if [[ -n "${CROS_WORKON_SUBDIR[i]}" ]]; then
pathelement+="/${CROS_WORKON_SUBDIR[i]}"
fi
path+=( "${pathelement}" )
done
}
local_copy_cp() {
local src="${1}"
local dst="${2}"
einfo "Copying sources from ${src}"
local blacklist=( "${CROS_WORKON_SUBDIR_BLACKLIST[@]/#/--exclude=}" )
local sl
for sl in "${CROS_WORKON_SUBDIRS_TO_COPY[@]}"; do
if [[ -d "${src}/${sl}" ]]; then
mkdir -p "${dst}/${sl}"
rsync -a "${blacklist[@]}" "${src}/${sl}"/* "${dst}/${sl}" || \
die "rsync -a ${blacklist[@]} ${src}/${sl}/* ${dst}/${sl}"
fi
done
}
symlink_in_place() {
local src="${1}"
local dst="${2}"
einfo "Using experimental inplace build in ${src}."
SBOX_TMP=":${SANDBOX_WRITE}:"
if [ "${SBOX_TMP/:$CROS_WORKON_SRCROOT://}" == "${SBOX_TMP}" ]; then
ewarn "For inplace build you need to modify the sandbox"
ewarn "Set SANDBOX_WRITE=${CROS_WORKON_SRCROOT} in your env."
fi
ln -sf "${src}" "${dst}"
}
local_copy() {
# Local vars used by all called functions.
local src="${1}"
local dst="${2}"
# If we want to use git, and the source actually is a git repo
if [ "${CROS_WORKON_INPLACE}" == "1" ]; then
symlink_in_place "${src}" "${dst}"
elif [ "${CROS_WORKON_OUTOFTREE_BUILD}" == "1" ]; then
S="${src}"
else
local_copy_cp "${src}" "${dst}"
fi
}
set_vcsid() {
export VCSID="${PVR}-${1}"
if [ "${CROS_WORKON_USE_VCSID}" = "1" ]; then
append-flags -DVCSID=\\\"${VCSID}\\\"
MAKEOPTS+=" VCSID=${VCSID}"
fi
}
get_rev() {
GIT_DIR="$1" git rev-parse HEAD
}
cros-workon_src_unpack() {
local fetch_method # local|git
# Set the default of CROS_WORKON_DESTDIR. This is done here because S is
# sometimes overridden in ebuilds and we cannot rely on the global state
# (and therefore ordering of eclass inherits and local ebuild overrides).
: ${CROS_WORKON_DESTDIR:=${S}}
# Fix array variables
array_vars_autocomplete
if [[ "${PV}" == "9999" ]]; then
# Live packages
fetch_method=local
else
fetch_method=git
fi
# Hack
# TODO(msb): remove once we've resolved the include path issue
# http://groups.google.com/a/chromium.org/group/chromium-os-dev/browse_thread/thread/5e85f28f551eeda/3ae57db97ae327ae
local p i
for p in "${CROS_WORKON_LOCALNAME[@]/#/${WORKDIR}/}"; do
ln -s "${S}" "${p}" &> /dev/null
done
local repo=( "${CROS_WORKON_REPO[@]}" )
local project=( "${CROS_WORKON_PROJECT[@]}" )
local destdir=( "${CROS_WORKON_DESTDIR[@]}" )
get_paths
# Automatically build out-of-tree for common.mk packages.
# TODO(vapier): Enable this once all common.mk packages have converted.
#if [[ -e ${path}/common.mk ]] ; then
# : ${CROS_WORKON_OUTOFTREE_BUILD:=1}
#fi
if [[ ${fetch_method} == "git" && ${CROS_WORKON_OUTOFTREE_BUILD} == "1" ]] ; then
# See if the local repo exists, is unmodified, and is checked out to
# the right rev. This will be the common case, so support it to make
# builds a bit faster.
if [[ -d ${path} ]] ; then
if [[ ${CROS_WORKON_COMMIT} == "$(get_rev "${path}/.git")" ]] ; then
local changes=$(
cd "${path}"
# Needed as `git status` likes to grab a repo lock.
addpredict "${PWD}"
# Ignore untracked files as they (should) be ignored by the build too.
git status --porcelain | grep -v '^[?][?]'
)
if [[ -z ${changes} ]] ; then
fetch_method=local
else
# Assume that if the dev has changes, they want it that way.
: #ewarn "${path} contains changes"
fi
else
ewarn "${path} is not at rev ${CROS_WORKON_COMMIT}"
fi
else
# This will hit minilayout users a lot, and rarely non-minilayout
# users. So don't bother warning here.
: #ewarn "${path} does not exist"
fi
fi
if [[ "${fetch_method}" == "git" ]] ; then
all_local() {
local p
for p in "${path[@]}"; do
[[ -d ${p} ]] || return 1
done
return 0
}
local fetched=0
if all_local; then
for (( i = 0; i < project_count; ++i )); do
# Looks like we already have a local copy of all repositories.
# Let's use these and checkout ${CROS_WORKON_COMMIT}.
# -s: For speed, share objects between ${path} and ${S}.
# -n: Don't checkout any files from the repository yet. We'll
# checkout the source separately.
#
# We don't use git clone to checkout the source because the -b
# option for clone defaults to HEAD if it can't find the
# revision you requested. On the other hand, git checkout fails
# if it can't find the revision you requested, so we use that
# instead.
# Destination directory. If we have one project, it's simply
# ${CROS_WORKON_DESTDIR}. More projects either specify an array or go to
# ${S}/${project}.
if [[ "${CROS_WORKON_COMMIT[i]}" == "master" ]]; then
# Since we don't have a CROS_WORKON_COMMIT revision specified,
# we don't know what revision the ebuild wants. Let's take the
# version of the code that the user has checked out.
#
# This almost replicates the pre-cros-workon behavior, where
# the code you had in your source tree was used to build
# things. One difference here, however, is that only committed
# changes are included.
#
# TODO(davidjames): We should fix the preflight buildbot to
# specify CROS_WORKON_COMMIT for all ebuilds, and update this
# code path to fail and explain the problem.
git clone -s "${path[i]}" "${destdir[i]}" || \
die "Can't clone ${path[i]}."
: $(( ++fetched ))
else
git clone -sn "${path[i]}" "${destdir[i]}" || \
die "Can't clone ${path[i]}."
if ! ( cd ${destdir[i]} && git checkout -q ${CROS_WORKON_COMMIT[i]} ) ; then
ewarn "Cannot run git checkout ${CROS_WORKON_COMMIT[i]} in ${destdir[i]}."
ewarn "Is ${path[i]} up to date? Try running repo sync."
rm -rf "${destdir[i]}/.git"
else
: $(( ++fetched ))
fi
fi
done
if [[ ${fetched} -eq ${project_count} ]]; then
# TODO: Id of all repos?
set_vcsid "$(get_rev "${path[0]}/.git")"
return
else
ewarn "Falling back to git.eclass..."
fi
fi
# Since we have no indication of being on a branch, it would
# default to 'master', and that might not contain our commit, as
# minilayout can have git checkouts independent of the repo tree.
# Hack around this by using empty branch. This will cause git fetch to
# pull all branches instead. Note that the branch has to be a space,
# rather than empty, for this trick to work.
EGIT_BRANCH=" "
for (( i = 0; i < project_count; ++i )); do
EGIT_REPO_URI="${repo[i]}/${project[i]}.git"
EGIT_PROJECT="${project[i]}${CROS_WORKON_GIT_SUFFIX}"
EGIT_SOURCEDIR="${destdir[i]}"
EGIT_COMMIT="${CROS_WORKON_COMMIT[i]}"
# Clones to /var, copies src tree to the /build/<board>/tmp.
git-2_src_unpack
# TODO(zbehan): Support multiple projects for vcsid?
done
set_vcsid "${CROS_WORKON_COMMIT[0]}"
return
fi
einfo "Using local source dir(s): ${path[*]}"
# Clone from the git host + repository path specified by
# CROS_WORKON_REPO + CROS_WORKON_PROJECT. Checkout source from
# the branch specified by CROS_WORKON_COMMIT into the workspace path.
# If the repository exists just punt and let it be copied off for build.
if [[ "${fetch_method}" == "local" && ! -d ${path} ]] ; then
ewarn "Sources are missing in ${path}"
ewarn "You need to cros_workon and repo sync your project. For example if you are working on the shill ebuild and repository:"
ewarn "cros_workon start --board=x86-generic shill"
ewarn "repo sync shill"
fi
einfo "path: ${path[*]}"
einfo "destdir: ${destdir[*]}"
# Copy source tree to /build/<board>/tmp for building
for (( i = 0; i < project_count; ++i )); do
local_copy "${path[i]}" "${destdir[i]}" || \
die "Cannot create a local copy"
set_vcsid "$(get_rev "${path[0]}/.git")"
done
}
cros-workon_get_build_dir() {
local dir
if [[ ${CROS_WORKON_INCREMENTAL_BUILD} == "1" ]]; then
dir="${SYSROOT}/var/cache/portage/${CATEGORY}/${PN}"
[[ ${SLOT:-0} != "0" ]] && dir+=":${SLOT}"
else
dir="${WORKDIR}/build"
fi
echo "${dir}"
}
cros-workon_pkg_setup() {
if [[ ${CROS_WORKON_INCREMENTAL_BUILD} == "1" ]]; then
local out=$(cros-workon_get_build_dir)
addwrite "${out}"
mkdir -p -m 755 "${out}"
chown ${PORTAGE_USERNAME}:${PORTAGE_GRPNAME} "${out}" "${out%/*}"
fi
}
cros-workon_src_prepare() {
local out="$(cros-workon_get_build_dir)"
[[ ${CROS_WORKON_INCREMENTAL_BUILD} != "1" ]] && mkdir -p "${out}"
if [[ -e ${S}/common.mk ]] ; then
: ${OUT=${out}}
export OUT
fi
}
cros-workon_src_configure() {
if [[ -e ${S}/common.mk ]] ; then
# We somewhat overshoot here, but it isn't harmful,
# and catches all the packages we care about.
tc-export CC CXX AR RANLIB LD NM PKG_CONFIG
# Portage takes care of this for us.
export SPLITDEBUG=0
if [[ $(type -t cros-debug-add-NDEBUG) == "function" ]] ; then
# Only run this if we've inherited cros-debug.eclass.
cros-debug-add-NDEBUG
fi
if [[ ${LIBCHROME_VERS:+set} == "set" ]] ; then
# For packages that use libchromeos, set it up automatically.
export BASE_VER=${LIBCHROME_VERS}
fi
else
default
fi
}
cw_emake() {
local dir=$(cros-workon_get_build_dir)
# Clean up a previous build dir if it exists. Use sudo in case some
# files happened to be owned by root or are otherwise marked a-w.
sudo rm -rf "${dir}%failed" &
if ! nonfatal emake "$@" ; then
# If things failed, move the incremental dir out of the way --
# we don't know why exactly it failed as it could be due to
# corruption. Don't throw it away immediately in case the the
# developer wants to poke around.
# http://crosbug.com/35958
if [[ ${CROS_WORKON_INCREMENTAL_BUILD} == "1" ]] ; then
if [[ $(hostname -d) == "golo.chromium.org" ]] ; then
eerror "The build failed. Output has been retained at:"
eerror " ${dir}%failed/"
eerror "It will be cleaned up automatically next emerge."
wait # wait for the `rm` to finish.
mv "${dir}" "${dir}%failed"
else
ewarn "If this failure is due to build-dir corruption, run:"
ewarn " sudo rm -rf '${dir}'"
fi
fi
die "command: emake $*"
fi
}
cros-workon_src_compile() {
if [[ -e ${S}/common.mk ]] ; then
cw_emake
else
default
fi
}
cros-workon_src_test() {
if [[ -e ${S}/common.mk ]] ; then
emake \
VALGRIND=$(use_if_iuse valgrind && echo 1) \
tests
else
default
fi
}
cros-workon_pkg_info() {
print_quoted_array() { printf '"%s"\n' "$@"; }
array_vars_autocomplete > /dev/null
get_paths
CROS_WORKON_SRCDIR=("${path[@]}")
local val var
for var in CROS_WORKON_SRCDIR CROS_WORKON_PROJECT ; do
eval val=(\"\${${var}\[@\]}\")
echo ${var}=\($(print_quoted_array "${val[@]}")\)
done
}
EXPORT_FUNCTIONS pkg_setup src_unpack pkg_info

View File

@ -0,0 +1,566 @@
# Copyright 1999-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/git-2.eclass,v 1.14 2011/08/22 04:46:31 vapier Exp $
# @ECLASS: git-2.eclass
# @MAINTAINER:
# Donnie Berkholz <dberkholz@gentoo.org>
# @BLURB: Eclass for fetching and unpacking git repositories.
# @DESCRIPTION:
# Eclass for easing maitenance of live ebuilds using git as remote repository.
# Eclass support working with git submodules and branching.
# This eclass support all EAPIs
EXPORT_FUNCTIONS src_unpack
DEPEND="dev-vcs/git"
# @ECLASS-VARIABLE: EGIT_SOURCEDIR
# @DESCRIPTION:
# This variable specifies destination where the cloned
# data are copied to.
#
# EGIT_SOURCEDIR="${S}"
# @ECLASS-VARIABLE: EGIT_STORE_DIR
# @DESCRIPTION:
# Storage directory for git sources.
#
# EGIT_STORE_DIR="${DISTDIR}/egit-src"
# @ECLASS-VARIABLE: EGIT_HAS_SUBMODULES
# @DEFAULT_UNSET
# @DESCRIPTION:
# If non-empty this variable enables support for git submodules in our
# checkout. Also this makes the checkout to be non-bare for now.
# @ECLASS-VARIABLE: EGIT_OPTIONS
# @DEFAULT_UNSET
# @DESCRIPTION:
# Variable specifying additional options for fetch command.
# @ECLASS-VARIABLE: EGIT_MASTER
# @DESCRIPTION:
# Variable for specifying master branch.
# Usefull when upstream don't have master branch or name it differently.
#
# EGIT_MASTER="master"
# @ECLASS-VARIABLE: EGIT_PROJECT
# @DESCRIPTION:
# Variable specifying name for the folder where we check out the git
# repository. Value of this variable should be unique in the
# EGIT_STORE_DIR as otherwise you would override another repository.
#
# EGIT_PROJECT="${EGIT_REPO_URI##*/}"
# @ECLASS-VARIABLE: EGIT_DIR
# @DESCRIPTION:
# Directory where we want to store the git data.
# This variable should not be overriden.
#
# EGIT_DIR="${EGIT_STORE_DIR}/${EGIT_PROJECT}"
# @ECLASS-VARIABLE: EGIT_REPO_URI
# @REQUIRED
# @DEFAULT_UNSET
# @DESCRIPTION:
# URI for the repository
# e.g. http://foo, git://bar
#
# Support multiple values:
# EGIT_REPO_URI="git://a/b.git http://c/d.git"
# @ECLASS-VARIABLE: EVCS_OFFLINE
# @DEFAULT_UNSET
# @DESCRIPTION:
# If non-empty this variable prevents performance of any online
# operations.
# @ECLASS-VARIABLE: EGIT_BRANCH
# @DESCRIPTION:
# Variable containing branch name we want to check out.
# It can be overriden via env using packagename_LIVE_BRANCH
# variable.
#
# EGIT_BRANCH="${EGIT_MASTER}"
# @ECLASS-VARIABLE: EGIT_COMMIT
# @DESCRIPTION:
# Variable containing commit hash/tag we want to check out.
# It can be overriden via env using packagename_LIVE_COMMIT
# variable.
#
# EGIT_COMMIT="${EGIT_BRANCH}"
# @ECLASS-VARIABLE: EGIT_REPACK
# @DEFAULT_UNSET
# @DESCRIPTION:
# If non-empty this variable specifies that repository will be repacked to
# save space. However this can take a REALLY LONG time with VERY big
# repositories.
# @ECLASS-VARIABLE: EGIT_PRUNE
# @DEFAULT_UNSET
# @DESCRIPTION:
# If non-empty this variable enables pruning all loose objects on each fetch.
# This is useful if upstream rewinds and rebases branches often.
# @ECLASS-VARIABLE: EGIT_NONBARE
# @DEFAULT_UNSET
# @DESCRIPTION:
# If non-empty this variable specifies that all checkouts will be done using
# non bare repositories. This is useful if you can't operate with bare
# checkouts for some reason.
# @ECLASS-VARIABLE: EGIT_NOUNPACK
# @DEFAULT_UNSET
# @DESCRIPTION:
# If non-empty this variable bans unpacking of ${A} content into the srcdir.
# Default behaviour is to unpack ${A} content.
# @FUNCTION: git-2_init_variables
# @DESCRIPTION:
# Internal function initializing all git variables.
# We define it in function scope so user can define
# all the variables before and after inherit.
git-2_init_variables() {
debug-print-function ${FUNCNAME} "$@"
local x
: ${EGIT_SOURCEDIR="${S}"}
: ${EGIT_STORE_DIR:="${PORTAGE_ACTUAL_DISTDIR-${DISTDIR}}/egit-src"}
: ${EGIT_HAS_SUBMODULES:=}
: ${EGIT_OPTIONS:=}
: ${EGIT_MASTER:=master}
eval x="\$${PN//[-+]/_}_LIVE_REPO"
EGIT_REPO_URI=${x:-${EGIT_REPO_URI}}
[[ -z ${EGIT_REPO_URI} ]] && die "EGIT_REPO_URI must have some value"
: ${EVCS_OFFLINE:=}
eval x="\$${PN//[-+]/_}_LIVE_BRANCH"
[[ -n ${x} ]] && ewarn "QA: using \"${PN//[-+]/_}_LIVE_BRANCH\" variable, you won't get any support"
EGIT_BRANCH=${x:-${EGIT_BRANCH:-${EGIT_MASTER}}}
eval x="\$${PN//[-+]/_}_LIVE_COMMIT"
[[ -n ${x} ]] && ewarn "QA: using \"${PN//[-+]/_}_LIVE_COMMIT\" variable, you won't get any support"
EGIT_COMMIT=${x:-${EGIT_COMMIT:-${EGIT_BRANCH}}}
: ${EGIT_REPACK:=}
: ${EGIT_PRUNE:=}
}
# @FUNCTION: git-2_submodules
# @DESCRIPTION:
# Internal function wrapping the submodule initialisation and update.
git-2_submodules() {
debug-print-function ${FUNCNAME} "$@"
if [[ -n ${EGIT_HAS_SUBMODULES} ]]; then
if [[ -n ${EVCS_OFFLINE} ]]; then
# for submodules operations we need to be online
debug-print "${FUNCNAME}: not updating submodules in offline mode"
return 1
fi
debug-print "${FUNCNAME}: working in \"${1}\""
pushd "${EGIT_DIR}" > /dev/null
debug-print "${FUNCNAME}: git submodule init"
git submodule init || die
debug-print "${FUNCNAME}: git submodule sync"
git submodule sync || die
debug-print "${FUNCNAME}: git submodule update"
git submodule update || die
popd > /dev/null
fi
}
# @FUNCTION: git-2_branch
# @DESCRIPTION:
# Internal function that changes branch for the repo based on EGIT_COMMIT and
# EGIT_BRANCH variables.
git-2_branch() {
debug-print-function ${FUNCNAME} "$@"
local branchname src
debug-print "${FUNCNAME}: working in \"${EGIT_SOURCEDIR}\""
pushd "${EGIT_SOURCEDIR}" > /dev/null
local branchname=branch-${EGIT_BRANCH} src=origin/${EGIT_BRANCH}
if [[ ${EGIT_COMMIT} != ${EGIT_BRANCH} ]]; then
branchname=tree-${EGIT_COMMIT}
src=${EGIT_COMMIT}
fi
debug-print "${FUNCNAME}: git checkout -b ${branchname} ${src}"
git checkout -b ${branchname} ${src} \
|| die "${FUNCNAME}: changing the branch failed"
popd > /dev/null
}
# @FUNCTION: git-2_gc
# @DESCRIPTION:
# Internal function running garbage collector on checked out tree.
git-2_gc() {
debug-print-function ${FUNCNAME} "$@"
local args
pushd "${EGIT_DIR}" > /dev/null
if [[ -n ${EGIT_REPACK} || -n ${EGIT_PRUNE} ]]; then
ebegin "Garbage collecting the repository"
[[ -n ${EGIT_PRUNE} ]] && args='--prune'
debug-print "${FUNCNAME}: git gc ${args}"
git gc ${args}
eend $?
fi
popd > /dev/null
}
# @FUNCTION: git-2_prepare_storedir
# @DESCRIPTION:
# Internal function preparing directory where we are going to store SCM
# repository.
git-2_prepare_storedir() {
debug-print-function ${FUNCNAME} "$@"
local clone_dir
# initial clone, we have to create master git storage directory and play
# nicely with sandbox
if [[ ! -d ${EGIT_STORE_DIR} ]]; then
debug-print "${FUNCNAME}: Creating git main storage directory"
addwrite /
mkdir -p "${EGIT_STORE_DIR}" \
|| die "${FUNCNAME}: can't mkdir \"${EGIT_STORE_DIR}\""
fi
# allow writing into EGIT_STORE_DIR
addwrite "${EGIT_STORE_DIR}"
# calculate the proper store dir for data
# If user didn't specify the EGIT_DIR, we check if he did specify
# the EGIT_PROJECT or get the folder name from EGIT_REPO_URI.
[[ -z ${EGIT_REPO_URI##*/} ]] && EGIT_REPO_URI="${EGIT_REPO_URI%/}"
if [[ -z ${EGIT_DIR} ]]; then
if [[ -n ${EGIT_PROJECT} ]]; then
clone_dir=${EGIT_PROJECT}
else
clone_dir=${EGIT_REPO_URI##*/}
fi
EGIT_DIR=${EGIT_STORE_DIR}/${clone_dir}
fi
export EGIT_DIR=${EGIT_DIR}
debug-print "${FUNCNAME}: Storing the repo into \"${EGIT_DIR}\"."
}
# @FUNCTION: git-2_move_source
# @DESCRIPTION:
# Internal function moving sources from the EGIT_DIR to EGIT_SOURCEDIR dir.
git-2_move_source() {
debug-print-function ${FUNCNAME} "$@"
debug-print "${FUNCNAME}: ${MOVE_COMMAND} \"${EGIT_DIR}\" \"${EGIT_SOURCEDIR}\""
pushd "${EGIT_DIR}" > /dev/null
mkdir -p "${EGIT_SOURCEDIR}" \
|| die "${FUNCNAME}: failed to create ${EGIT_SOURCEDIR}"
${MOVE_COMMAND} "${EGIT_SOURCEDIR}" \
|| die "${FUNCNAME}: sync to \"${EGIT_SOURCEDIR}\" failed"
popd > /dev/null
}
# @FUNCTION: git-2_initial_clone
# @DESCRIPTION:
# Internal function running initial clone on specified repo_uri.
git-2_initial_clone() {
debug-print-function ${FUNCNAME} "$@"
local repo_uri
EGIT_REPO_URI_SELECTED=""
for repo_uri in ${EGIT_REPO_URI}; do
debug-print "${FUNCNAME}: git clone ${EGIT_LOCAL_OPTIONS} \"${repo_uri}\" \"${EGIT_DIR}\""
git clone ${EGIT_LOCAL_OPTIONS} "${repo_uri}" "${EGIT_DIR}"
if [[ $? -eq 0 ]]; then
# global variable containing the repo_name we will be using
debug-print "${FUNCNAME}: EGIT_REPO_URI_SELECTED=\"${repo_uri}\""
EGIT_REPO_URI_SELECTED="${repo_uri}"
break
fi
done
if [[ -z ${EGIT_REPO_URI_SELECTED} ]]; then
die "${FUNCNAME}: can't fetch from ${EGIT_REPO_URI}"
fi
}
# @FUNCTION: git-2_update_repo
# @DESCRIPTION:
# Internal function running update command on specified repo_uri.
git-2_update_repo() {
debug-print-function ${FUNCNAME} "$@"
local repo_uri
if [[ -n ${EGIT_LOCAL_NONBARE} ]]; then
# checkout master branch and drop all other local branches
git checkout ${EGIT_MASTER} || die "${FUNCNAME}: can't checkout master branch ${EGIT_MASTER}"
for x in $(git branch | grep -v "* ${EGIT_MASTER}" | tr '\n' ' '); do
debug-print "${FUNCNAME}: git branch -D ${x}"
git branch -D ${x} > /dev/null
done
fi
EGIT_REPO_URI_SELECTED=""
for repo_uri in ${EGIT_REPO_URI}; do
# git urls might change, so reset it
git config remote.origin.url "${repo_uri}"
debug-print "${EGIT_UPDATE_CMD}"
${EGIT_UPDATE_CMD} > /dev/null
if [[ $? -eq 0 ]]; then
# global variable containing the repo_name we will be using
debug-print "${FUNCNAME}: EGIT_REPO_URI_SELECTED=\"${repo_uri}\""
EGIT_REPO_URI_SELECTED="${repo_uri}"
break
fi
done
if [[ -z ${EGIT_REPO_URI_SELECTED} ]]; then
die "${FUNCNAME}: can't update from ${EGIT_REPO_URI}"
fi
}
# @FUNCTION: git-2_fetch
# @DESCRIPTION:
# Internal function fetching repository from EGIT_REPO_URI and storing it in
# specified EGIT_STORE_DIR.
git-2_fetch() {
debug-print-function ${FUNCNAME} "$@"
local oldsha cursha repo_type
[[ -n ${EGIT_LOCAL_NONBARE} ]] && repo_type="non-bare repository" || repo_type="bare repository"
if [[ ! -d ${EGIT_DIR} ]]; then
git-2_initial_clone
pushd "${EGIT_DIR}" > /dev/null
cursha=$(git rev-parse ${UPSTREAM_BRANCH})
echo "GIT NEW clone -->"
echo " repository: ${EGIT_REPO_URI_SELECTED}"
echo " at the commit: ${cursha}"
popd > /dev/null
elif [[ -n ${EVCS_OFFLINE} ]]; then
pushd "${EGIT_DIR}" > /dev/null
cursha=$(git rev-parse ${UPSTREAM_BRANCH})
echo "GIT offline update -->"
echo " repository: $(git config remote.origin.url)"
echo " at the commit: ${cursha}"
popd > /dev/null
else
pushd "${EGIT_DIR}" > /dev/null
oldsha=$(git rev-parse ${UPSTREAM_BRANCH})
git-2_update_repo
cursha=$(git rev-parse ${UPSTREAM_BRANCH})
# fetch updates
echo "GIT update -->"
echo " repository: ${EGIT_REPO_URI_SELECTED}"
# write out message based on the revisions
if [[ "${oldsha}" != "${cursha}" ]]; then
echo " updating from commit: ${oldsha}"
echo " to commit: ${cursha}"
else
echo " at the commit: ${cursha}"
fi
# print nice statistic of what was changed
git --no-pager diff --stat ${oldsha}..${UPSTREAM_BRANCH}
popd > /dev/null
fi
# export the version the repository is at
export EGIT_VERSION="${cursha}"
# log the repo state
[[ ${EGIT_COMMIT} != ${EGIT_BRANCH} ]] \
&& echo " commit: ${EGIT_COMMIT}"
echo " branch: ${EGIT_BRANCH}"
echo " storage directory: \"${EGIT_DIR}\""
echo " checkout type: ${repo_type}"
}
# @FUNCTION: git_bootstrap
# @DESCRIPTION:
# Internal function that runs bootstrap command on unpacked source.
git-2_bootstrap() {
debug-print-function ${FUNCNAME} "$@"
# @ECLASS-VARIABLE: EGIT_BOOTSTRAP
# @DESCRIPTION:
# Command to be executed after checkout and clone of the specified
# repository.
# enviroment the package will fail if there is no update, thus in
# combination with --keep-going it would lead in not-updating
# pakcages that are up-to-date.
if [[ -n ${EGIT_BOOTSTRAP} ]]; then
pushd "${EGIT_SOURCEDIR}" > /dev/null
einfo "Starting bootstrap"
if [[ -f ${EGIT_BOOTSTRAP} ]]; then
# we have file in the repo which we should execute
debug-print "${FUNCNAME}: bootstraping with file \"${EGIT_BOOTSTRAP}\""
if [[ -x ${EGIT_BOOTSTRAP} ]]; then
eval "./${EGIT_BOOTSTRAP}" \
|| die "${FUNCNAME}: bootstrap script failed"
else
eerror "\"${EGIT_BOOTSTRAP}\" is not executable."
eerror "Report upstream, or bug ebuild maintainer to remove bootstrap command."
die "\"${EGIT_BOOTSTRAP}\" is not executable"
fi
else
# we execute some system command
debug-print "${FUNCNAME}: bootstraping with commands \"${EGIT_BOOTSTRAP}\""
eval "${EGIT_BOOTSTRAP}" \
|| die "${FUNCNAME}: bootstrap commands failed"
fi
einfo "Bootstrap finished"
popd > /dev/null
fi
}
# @FUNCTION: git-2_migrate_repository
# @DESCRIPTION:
# Internal function migrating between bare and normal checkout repository.
# This is based on usage of EGIT_SUBMODULES, at least until they
# start to work with bare checkouts sanely.
# This function also set some global variables that differ between
# bare and non-bare checkout.
git-2_migrate_repository() {
debug-print-function ${FUNCNAME} "$@"
local target returnstate
# first find out if we have submodules
if [[ -z ${EGIT_HAS_SUBMODULES} ]]; then
target="bare"
else
target="full"
fi
# check if user didn't specify that we want non-bare repo
if [[ -n ${EGIT_NONBARE} ]]; then
target="full"
EGIT_LOCAL_NONBARE="true"
fi
# test if we already have some repo and if so find out if we have
# to migrate the data
if [[ -d ${EGIT_DIR} ]]; then
if [[ ${target} == bare && -d ${EGIT_DIR}/.git ]]; then
debug-print "${FUNCNAME}: converting \"${EGIT_DIR}\" to bare copy"
ebegin "Converting \"${EGIT_DIR}\" from non-bare to bare copy"
mv "${EGIT_DIR}/.git" "${EGIT_DIR}.bare"
export GIT_DIR="${EGIT_DIR}.bare"
git config core.bare true > /dev/null
returnstate=$?
unset GIT_DIR
rm -rf "${EGIT_DIR}"
mv "${EGIT_DIR}.bare" "${EGIT_DIR}"
eend ${returnstate}
fi
if [[ ${target} == full && ! -d ${EGIT_DIR}/.git ]]; then
debug-print "${FUNCNAME}: converting \"${EGIT_DIR}\" to non-bare copy"
ebegin "Converting \"${EGIT_DIR}\" from bare to non-bare copy"
git clone -l "${EGIT_DIR}" "${EGIT_DIR}.nonbare" > /dev/null
returnstate=$?
rm -rf "${EGIT_DIR}"
mv "${EGIT_DIR}.nonbare" "${EGIT_DIR}"
eend ${returnstate}
fi
fi
if [[ ${returnstate} -ne 0 ]]; then
debug-print "${FUNCNAME}: converting \"${EGIT_DIR}\" failed, removing to start from scratch"
# migration failed, remove the EGIT_DIR to play it safe
einfo "Migration failed, removing \"${EGIT_DIR}\" to start from scratch."
rm -rf "${EGIT_DIR}"
fi
# set various options to work with both targets
if [[ ${target} == bare ]]; then
debug-print "${FUNCNAME}: working in bare repository for \"${EGIT_DIR}\""
EGIT_LOCAL_OPTIONS+="${EGIT_OPTIONS} --bare"
MOVE_COMMAND="git clone -l -s -n ${EGIT_DIR// /\\ }"
EGIT_UPDATE_CMD="git fetch -t -f -u origin ${EGIT_BRANCH}:${EGIT_BRANCH}"
UPSTREAM_BRANCH="${EGIT_BRANCH}"
else
debug-print "${FUNCNAME}: working in bare repository for non-bare \"${EGIT_DIR}\""
MOVE_COMMAND="cp -pPR ."
EGIT_LOCAL_OPTIONS="${EGIT_OPTIONS}"
EGIT_UPDATE_CMD="git pull -f -u ${EGIT_OPTIONS}"
UPSTREAM_BRANCH="origin/${EGIT_BRANCH}"
EGIT_LOCAL_NONBARE="true"
fi
}
# @FUNCTION: git-2_cleanup
# @DESCRIPTION:
# Internal function cleaning up all the global variables
# that are not required after the unpack has been done.
git-2_cleanup() {
debug-print-function ${FUNCNAME} "$@"
# Here we can unset only variables that are GLOBAL
# defined by the eclass, BUT NOT subject to change
# by user (like EGIT_PROJECT).
# If ebuild writer polutes his environment it is
# his problem only.
unset EGIT_DIR
unset MOVE_COMMAND
unset EGIT_LOCAL_OPTIONS
unset EGIT_UPDATE_CMD
unset UPSTREAM_BRANCH
unset EGIT_LOCAL_NONBARE
}
# @FUNCTION: git-2_src_unpack
# @DESCRIPTION:
# Default git src_unpack function.
git-2_src_unpack() {
debug-print-function ${FUNCNAME} "$@"
git-2_init_variables
git-2_prepare_storedir
git-2_migrate_repository
git-2_fetch "$@"
git-2_gc
git-2_submodules
git-2_move_source
git-2_branch
git-2_bootstrap
git-2_cleanup
echo ">>> Unpacked to ${EGIT_SOURCEDIR}"
# Users can specify some SRC_URI and we should
# unpack the files too.
if [[ -z ${EGIT_NOUNPACK} ]]; then
if has ${EAPI:-0} 0 1; then
[[ -n ${A} ]] && unpack ${A}
else
default_src_unpack
fi
fi
}

View File

@ -0,0 +1,479 @@
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/git.eclass,v 1.43 2010/02/24 01:16:35 abcd Exp $
# @ECLASS: git.eclass
# @MAINTAINER:
# Tomas Chvatal <scarabeus@gentoo.org>
# Donnie Berkholz <dberkholz@gentoo.org>
# @BLURB: This eclass provides functions for fetch and unpack git repositories
# @DESCRIPTION:
# The eclass is based on subversion eclass.
# If you use this eclass, the ${S} is ${WORKDIR}/${P}.
# It is necessary to define the EGIT_REPO_URI variable at least.
# @THANKS TO:
# Fernando J. Pereda <ferdy@gentoo.org>
inherit eutils
EGIT="git.eclass"
# We DEPEND on at least a bit recent git version
DEPEND=">=dev-vcs/git-1.6"
EXPORTED_FUNCTIONS="src_unpack"
case "${EAPI:-0}" in
3|2) EXPORTED_FUNCTIONS="${EXPORTED_FUNCTIONS} src_prepare" ;;
1|0) ;;
:) DEPEND="EAPI-UNSUPPORTED" ;;
esac
EXPORT_FUNCTIONS ${EXPORTED_FUNCTIONS}
# define some nice defaults but only if nothing is set already
: ${HOMEPAGE:=http://git-scm.com/}
# @ECLASS-VARIABLE: EGIT_QUIET
# @DESCRIPTION:
# Enables user specified verbosity for the eclass elog informations.
# The user just needs to add EGIT_QUIET="ON" to the /etc/make.conf.
: ${EGIT_QUIET:="OFF"}
# @ECLASS-VARIABLE: EGIT_STORE_DIR
# @DESCRIPTION:
# Storage directory for git sources.
# Can be redefined.
[[ -z ${EGIT_STORE_DIR} ]] && EGIT_STORE_DIR="${PORTAGE_ACTUAL_DISTDIR-${DISTDIR}}/git-src"
# @ECLASS-VARIABLE: EGIT_HAS_SUBMODULES
# @DESCRIPTION:
# Set this to "true" to enable the (slower) submodule support.
# This variable should be set before inheriting git.eclass
: ${EGIT_HAS_SUBMODULES:=false}
# @ECLASS-VARIABLE: EGIT_FETCH_CMD
# @DESCRIPTION:
# Command for cloning the repository.
: ${EGIT_FETCH_CMD:="git clone"}
# @ECLASS-VARIABLE: EGIT_UPDATE_CMD
# @DESCRIPTION:
# Git fetch command.
if ${EGIT_HAS_SUBMODULES}; then
EGIT_UPDATE_CMD="git pull -f -u"
else
EGIT_UPDATE_CMD="git fetch -f -u"
fi
# @ECLASS-VARIABLE: EGIT_DIFFSTAT_CMD
# @DESCRIPTION:
# Git command for diffstat.
EGIT_DIFFSTAT_CMD="git --no-pager diff --stat"
# @ECLASS-VARIABLE: EGIT_OPTIONS
# @DESCRIPTION:
# This variable value is passed to clone and fetch.
: ${EGIT_OPTIONS:=}
# @ECLASS-VARIABLE: EGIT_MASTER
# @DESCRIPTION:
# Variable for specifying master branch.
# Usefull when upstream don't have master branch.
: ${EGIT_MASTER:=master}
# @ECLASS-VARIABLE: EGIT_REPO_URI
# @DESCRIPTION:
# URI for the repository
# e.g. http://foo, git://bar
# Supported protocols:
# http://
# https://
# git://
# git+ssh://
# rsync://
# ssh://
eval X="\$${PN//[-+]/_}_LIVE_REPO"
if [[ ${X} = "" ]]; then
EGIT_REPO_URI=${EGIT_REPO_URI:=}
else
EGIT_REPO_URI="${X}"
fi
# @ECLASS-VARIABLE: EGIT_PROJECT
# @DESCRIPTION:
# Project name of your ebuild.
# Git eclass will check out the git repository like:
# ${EGIT_STORE_DIR}/${EGIT_PROJECT}/${EGIT_REPO_URI##*/}
# so if you define EGIT_REPO_URI as http://git.collab.net/repo/git or
# http://git.collab.net/repo/git. and PN is subversion-git.
# it will check out like:
# ${EGIT_STORE_DIR}/subversion
: ${EGIT_PROJECT:=${PN/-git}}
# @ECLASS-VARIABLE: EGIT_BOOTSTRAP
# @DESCRIPTION:
# bootstrap script or command like autogen.sh or etc...
: ${EGIT_BOOTSTRAP:=}
# @ECLASS-VARIABLE: EGIT_OFFLINE
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic updating of
# an GIT source tree. This is intended to be set outside the git source
# tree by users.
EGIT_OFFLINE="${EGIT_OFFLINE:-${ESCM_OFFLINE}}"
# @ECLASS-VARIABLE: EGIT_PATCHES
# @DESCRIPTION:
# Similar to PATCHES array from base.eclass
# Only difference is that this patches are applied before bootstrap.
# Please take note that this variable should be bash array.
# @ECLASS-VARIABLE: EGIT_BRANCH
# @DESCRIPTION:
# git eclass can fetch any branch in git_fetch().
eval X="\$${PN//[-+]/_}_LIVE_BRANCH"
if [[ ${X} = "" ]]; then
EGIT_BRANCH=${EGIT_BRANCH:=master}
else
EGIT_BRANCH="${X}"
fi
# @ECLASS-VARIABLE: EGIT_COMMIT
# @DESCRIPTION:
# git eclass can checkout any commit.
eval X="\$${PN//[-+]/_}_LIVE_COMMIT"
if [[ ${X} = "" ]]; then
: ${EGIT_COMMIT:=${EGIT_BRANCH}}
else
EGIT_COMMIT="${X}"
fi
# @ECLASS-VARIABLE: EGIT_REPACK
# @DESCRIPTION:
# git eclass will repack objects to save disk space. However this can take a
# long time with VERY big repositories.
: ${EGIT_REPACK:=false}
# @ECLASS-VARIABLE: EGIT_PRUNE
# @DESCRIPTION:
# git eclass can prune the local clone. This is useful if upstream rewinds and
# rebases branches too often.
: ${EGIT_PRUNE:=false}
# @FUNCTION: git_submodules
# @DESCRIPTION:
# Internal function wrapping the submodule initialisation and update
git_submodules() {
if ${EGIT_HAS_SUBMODULES}; then
debug-print "git submodule init"
git submodule init
debug-print "git submodule update"
git submodule update
fi
}
# @FUNCTION: git_branch
# @DESCRIPTION:
# Internal function that changes branch for the repo based on EGIT_TREE and
# EGIT_BRANCH variables.
git_branch() {
local branchname=branch-${EGIT_BRANCH} src=origin/${EGIT_BRANCH}
if [[ ${EGIT_COMMIT} != ${EGIT_BRANCH} ]]; then
branchname=tree-${EGIT_COMMIT}
src=${EGIT_COMMIT}
fi
debug-print "git checkout -b ${branchname} ${src}"
git checkout -b ${branchname} ${src} || \
die "${EGIT}: Could not run git checkout -b ${branchname} ${src}"
unset branchname src
}
# @FUNCTION: git_fetch
# @DESCRIPTION:
# Gets repository from EGIT_REPO_URI and store it in specified EGIT_STORE_DIR
git_fetch() {
debug-print-function ${FUNCNAME} "$@"
local GIT_DIR EGIT_CLONE_DIR oldsha1 cursha1 extra_clone_opts upstream_branch
${EGIT_HAS_SUBMODULES} || export GIT_DIR
# choose if user wants elog or just einfo.
if [[ ${EGIT_QUIET} != OFF ]]; then
elogcmd="einfo"
else
elogcmd="elog"
fi
# If we have same branch and the tree we can do --depth 1 clone
# which outputs into really smaller data transfers.
# Sadly we can do shallow copy for now because quite a few packages need .git
# folder.
#[[ ${EGIT_COMMIT} = ${EGIT_BRANCH} ]] && \
# EGIT_FETCH_CMD="${EGIT_FETCH_CMD} --depth 1"
if [[ ! -z ${EGIT_TREE} ]] ; then
EGIT_COMMIT=${EGIT_TREE}
ewarn "QA: Usage of deprecated EGIT_TREE variable detected."
ewarn "QA: Use EGIT_COMMIT variable instead."
fi
# EGIT_REPO_URI is empty.
[[ -z ${EGIT_REPO_URI} ]] && die "${EGIT}: EGIT_REPO_URI is empty."
# check for the protocol or pull from a local repo.
if [[ -z ${EGIT_REPO_URI%%:*} ]] ; then
case ${EGIT_REPO_URI%%:*} in
git*|http|https|rsync|ssh) ;;
*) die "${EGIT}: protocol for fetch from "${EGIT_REPO_URI%:*}" is not yet implemented in eclass." ;;
esac
fi
# initial clone, we have to create master git storage directory and play
# nicely with sandbox
if [[ ! -d ${EGIT_STORE_DIR} ]] ; then
debug-print "${FUNCNAME}: initial clone. creating git directory"
addwrite /
# TODO(ers): Remove this workaround once we figure out how to make
# sure the directories are owned by the user instead of by root.
local old_umask="`umask`"
umask 002
mkdir -p "${EGIT_STORE_DIR}" \
|| die "${EGIT}: can't mkdir ${EGIT_STORE_DIR}."
umask ${old_umask}
export SANDBOX_WRITE="${SANDBOX_WRITE%%:/}"
fi
cd -P "${EGIT_STORE_DIR}" || die "${EGIT}: can't chdir to ${EGIT_STORE_DIR}"
EGIT_STORE_DIR=${PWD}
# allow writing into EGIT_STORE_DIR
addwrite "${EGIT_STORE_DIR}"
[[ -z ${EGIT_REPO_URI##*/} ]] && EGIT_REPO_URI="${EGIT_REPO_URI%/}"
EGIT_CLONE_DIR="${EGIT_PROJECT}"
debug-print "${FUNCNAME}: EGIT_OPTIONS = \"${EGIT_OPTIONS}\""
GIT_DIR="${EGIT_STORE_DIR}/${EGIT_CLONE_DIR}"
# we also have to remove all shallow copied repositories
# and fetch them again
if [[ -e "${GIT_DIR}/shallow" ]]; then
rm -rf "${GIT_DIR}"
einfo "The ${EGIT_CLONE_DIR} was shallow copy. Refetching."
fi
# repack from bare copy to normal one
if ${EGIT_HAS_SUBMODULES} && [[ -d ${GIT_DIR} && ! -d "${GIT_DIR}/.git/" ]]; then
rm -rf "${GIT_DIR}"
einfo "The ${EGIT_CLONE_DIR} was bare copy. Refetching."
fi
if ! ${EGIT_HAS_SUBMODULES} && [[ -d ${GIT_DIR} && -d ${GIT_DIR}/.git ]]; then
rm -rf "${GIT_DIR}"
einfo "The ${EGIT_CLONE_DIR} was not a bare copy. Refetching."
fi
if ${EGIT_HAS_SUBMODULES}; then
upstream_branch=origin/${EGIT_BRANCH}
else
upstream_branch=${EGIT_BRANCH}
# Note: Normally clones are created using --bare, which does not fetch
# remote refs and only updates master. This is not okay. --mirror
# changes that.
extra_clone_opts=--mirror
fi
if [[ ! -d ${GIT_DIR} ]] ; then
# first clone
${elogcmd} "GIT NEW clone -->"
${elogcmd} " repository: ${EGIT_REPO_URI}"
debug-print "${EGIT_FETCH_CMD} ${extra_clone_opts} ${EGIT_OPTIONS} \"${EGIT_REPO_URI}\" ${GIT_DIR}"
# TODO(ers): Remove this workaround once we figure out how to make
# sure the directories are owned by the user instead of by root.
local old_umask="`umask`"
umask 002
${EGIT_FETCH_CMD} ${extra_clone_opts} ${EGIT_OPTIONS} "${EGIT_REPO_URI}" ${GIT_DIR} \
|| die "${EGIT}: can't fetch from ${EGIT_REPO_URI}."
umask ${old_umask}
pushd "${GIT_DIR}" &> /dev/null
cursha1=$(git rev-parse ${upstream_branch})
${elogcmd} " at the commit: ${cursha1}"
git_submodules
popd &> /dev/null
elif [[ -n ${EGIT_OFFLINE} ]] ; then
pushd "${GIT_DIR}" &> /dev/null
cursha1=$(git rev-parse ${upstream_branch})
${elogcmd} "GIT offline update -->"
${elogcmd} " repository: ${EGIT_REPO_URI}"
${elogcmd} " at the commit: ${cursha1}"
popd &> /dev/null
else
pushd "${GIT_DIR}" &> /dev/null
# Git urls might change, so unconditionally set it here
git config remote.origin.url "${EGIT_REPO_URI}"
# fetch updates
${elogcmd} "GIT update -->"
${elogcmd} " repository: ${EGIT_REPO_URI}"
oldsha1=$(git rev-parse ${upstream_branch})
if ${EGIT_HAS_SUBMODULES}; then
debug-print "${EGIT_UPDATE_CMD} ${EGIT_OPTIONS}"
# fix branching
git checkout ${EGIT_MASTER}
for x in $(git branch |grep -v "* ${EGIT_MASTER}" |tr '\n' ' '); do
git branch -D ${x}
done
${EGIT_UPDATE_CMD} ${EGIT_OPTIONS} \
|| die "${EGIT}: can't update from ${EGIT_REPO_URI}."
elif [[ "${EGIT_COMMIT}" = "${EGIT_BRANCH}" ]]; then
debug-print "${EGIT_UPDATE_CMD} ${EGIT_OPTIONS} origin ${EGIT_BRANCH}:${EGIT_BRANCH}"
${EGIT_UPDATE_CMD} ${EGIT_OPTIONS} origin ${EGIT_BRANCH}:${EGIT_BRANCH} \
|| die "${EGIT}: can't update from ${EGIT_REPO_URI}."
else
debug-print "${EGIT_UPDATE_CMD} ${EGIT_OPTIONS} origin"
${EGIT_UPDATE_CMD} ${EGIT_OPTIONS} origin \
|| die "${EGIT}: can't update from ${EGIT_REPO_URI}."
fi
git_submodules
cursha1=$(git rev-parse ${upstream_branch})
# write out message based on the revisions
if [[ ${oldsha1} != ${cursha1} ]]; then
${elogcmd} " updating from commit: ${oldsha1}"
${elogcmd} " to commit: ${cursha1}"
else
${elogcmd} " at the commit: ${cursha1}"
# @ECLASS_VARIABLE: LIVE_FAIL_FETCH_IF_REPO_NOT_UPDATED
# @DESCRIPTION:
# If this variable is set to TRUE in make.conf or somewhere in
# enviroment the package will fail if there is no update, thus in
# combination with --keep-going it would lead in not-updating
# pakcages that are up-to-date.
# TODO: this can lead to issues if more projects/packages use same repo
[[ ${LIVE_FAIL_FETCH_IF_REPO_NOT_UPDATED} = true ]] && \
debug-print "${FUNCNAME}: Repository \"${EGIT_REPO_URI}\" is up-to-date. Skipping." && \
die "${EGIT}: Repository \"${EGIT_REPO_URI}\" is up-to-date. Skipping."
fi
${EGIT_DIFFSTAT_CMD} ${oldsha1}..${upstream_branch}
popd &> /dev/null
fi
pushd "${GIT_DIR}" &> /dev/null
if ${EGIT_REPACK} || ${EGIT_PRUNE} ; then
ebegin "Garbage collecting the repository"
git gc $(${EGIT_PRUNE} && echo '--prune')
eend $?
fi
popd &> /dev/null
# export the git version
export EGIT_VERSION="${cursha1}"
# log the repo state
[[ ${EGIT_COMMIT} != ${EGIT_BRANCH} ]] && elog " commit: ${EGIT_COMMIT}"
${elogcmd} " branch: ${EGIT_BRANCH}"
${elogcmd} " storage directory: \"${GIT_DIR}\""
if ${EGIT_HAS_SUBMODULES}; then
pushd "${GIT_DIR}" &> /dev/null
debug-print "rsync -rlpgo . \"${S}\""
time rsync -rlpgo . "${S}"
popd &> /dev/null
else
unset GIT_DIR
debug-print "git clone -l -s -n \"${EGIT_STORE_DIR}/${EGIT_CLONE_DIR}\" \"${S}\""
git clone -l -s -n "${EGIT_STORE_DIR}/${EGIT_CLONE_DIR}" "${S}"
fi
pushd "${S}" &> /dev/null
git_branch
# submodules always reqire net (thanks to branches changing)
[[ -n ${EGIT_OFFLINE} ]] || git_submodules
popd &> /dev/null
echo ">>> Unpacked to ${S}"
}
# @FUNCTION: git_bootstrap
# @DESCRIPTION:
# Runs bootstrap command if EGIT_BOOTSTRAP variable contains some value
# Remember that what ever gets to the EGIT_BOOTSTRAP variable gets evaled by bash.
git_bootstrap() {
debug-print-function ${FUNCNAME} "$@"
if [[ -n ${EGIT_BOOTSTRAP} ]] ; then
pushd "${S}" > /dev/null
einfo "Starting bootstrap"
if [[ -f ${EGIT_BOOTSTRAP} ]]; then
# we have file in the repo which we should execute
debug-print "$FUNCNAME: bootstraping with file \"${EGIT_BOOTSTRAP}\""
if [[ -x ${EGIT_BOOTSTRAP} ]]; then
eval "./${EGIT_BOOTSTRAP}" \
|| die "${EGIT}: bootstrap script failed"
else
eerror "\"${EGIT_BOOTSTRAP}\" is not executable."
eerror "Report upstream, or bug ebuild maintainer to remove bootstrap command."
die "${EGIT}: \"${EGIT_BOOTSTRAP}\" is not executable."
fi
else
# we execute some system command
debug-print "$FUNCNAME: bootstraping with commands \"${EGIT_BOOTSTRAP}\""
eval "${EGIT_BOOTSTRAP}" \
|| die "${EGIT}: bootstrap commands failed."
fi
einfo "Bootstrap finished"
popd > /dev/null
fi
}
# @FUNCTION: git_apply_patches
# @DESCRIPTION:
# Apply patches from EGIT_PATCHES bash array.
# Preffered is using the variable as bash array but for now it allows to write
# it also as normal space separated string list. (This part of code should be
# removed when all ebuilds get converted on bash array).
git_apply_patches() {
debug-print-function ${FUNCNAME} "$@"
pushd "${S}" > /dev/null
if [[ ${#EGIT_PATCHES[@]} -gt 1 ]] ; then
for i in "${EGIT_PATCHES[@]}"; do
debug-print "$FUNCNAME: git_autopatch: patching from ${i}"
epatch "${i}"
done
elif [[ ${EGIT_PATCHES} != "" ]]; then
# no need for loop if space separated string is passed.
debug-print "$FUNCNAME: git_autopatch: patching from ${EGIT_PATCHES}"
epatch "${EGIT_PATCHES}"
fi
popd > /dev/null
}
# @FUNCTION: git_src_unpack
# @DESCRIPTION:
# src_upack function, calls src_prepare one if EAPI!=2.
git_src_unpack() {
debug-print-function ${FUNCNAME} "$@"
git_fetch || die "${EGIT}: unknown problem in git_fetch()."
has src_prepare ${EXPORTED_FUNCTIONS} || git_src_prepare
}
# @FUNCTION: git_src_prepare
# @DESCRIPTION:
# src_prepare function for git stuff. Patches, bootstrap...
git_src_prepare() {
debug-print-function ${FUNCNAME} "$@"
git_apply_patches
git_bootstrap
}

View File

@ -0,0 +1,305 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Gobi firmware for Chromium OS
#
# @ECLASS-VARIABLE: GOBI_FIRMWARE_OEM
# @DESCRIPTION:
# OEM name for firmware to install
: ${GOBI_FIRMWARE_OEM:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_VID
# @DESCRIPTION:
# OEM Vendor ID
: ${GOBI_FIRMWARE_VID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_PID
# @DESCRIPTION:
# OEM Product ID
: ${GOBI_FIRMWARE_PID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_CARRIERS
# @DESCRIPTION:
# Install firmware for this list of carrier numbers
: ${GOBI_FIRMWARE_CARRIERS:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_ZIP_FILE
# @DESCRIPTION:
# Filename of zip file containing firmware
: ${GOBI_FIRMWARE_ZIP_FILE:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_FLAVOR
# @DESCRIPTION:
# The flavor (gps, xtra) to install
: ${GOBI_FIRMWARE_FLAVOR:="gps"}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_QDL
# @DESCRIPTION:
# Install the qdl program from the firmware zip file
: ${GOBI_FIRMWARE_QDL:="no"}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_DEFAULT_CARRIER
# @DESCRIPTION:
# Default carrier firmware to load if not set on modem
: ${GOBI_FIRMWARE_DEFAULT_CARRIER:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_APPS_DIR
# @DESCRIPTION:
# directory name for the .apps files
: ${GOBI_FIRMWARE_APPS_DIR:=""}
GOBI_FIRMWARE_CARRIER_VOD=0
GOBI_FIRMWARE_CARRIER_VZW=1
GOBI_FIRMWARE_CARRIER_ATT=2
GOBI_FIRMWARE_CARRIER_SPRINT=3
GOBI_FIRMWARE_CARRIER_TMO=4
GOBI_FIRMWARE_CARRIER_GEN=6
GOBI_FIRMWARE_CARRIER_TELLFON=7
GOBI_FIRMWARE_CARRIER_TELITAL=8
GOBI_FIRMWARE_CARRIER_ORANGE=9
GOBI_FIRMWARE_CARRIER_DOCO=12
GOBI_FIRMWARE_CARRIER_DELLX=15
GOBI_FIRMWARE_CARRIER_OMH=16
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
gobi-firmware_install_udev_qcserial_rules() {
local oem=${GOBI_FIRMWARE_OEM}
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local file=/etc/udev/rules.d/90-ttyusb-qcserial-${oem}.rules
cat > ${D}${file} <<EOF
# 90-ttyusb-qcserial-${oem}.rules
# Sets ownership of Gobi ttyusb devices belonging to qcserial.
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ACTION!="add", GOTO="ttyusb_qcserial_${oem}_end"
SUBSYSTEM!="tty", GOTO="ttyusb_qcserial_${oem}_end"
KERNEL!="ttyUSB[0-9]*", GOTO="ttyusb_qcserial_${oem}_end"
ATTRS{idVendor}=="${vid}", ATTRS{idProduct}=="${pid}", \
OWNER="qdlservice", GROUP="qdlservice"
LABEL="ttyusb_qcserial_${oem}_end"
EOF
}
gobi-firmware_install_udev_qdlservice_rules() {
local oem=${GOBI_FIRMWARE_OEM}
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local file=/etc/udev/rules.d/99-qdlservice-${oem}.rules
cat > ${D}${file} <<EOF
# 99-qdlservice-${oem}.rules
# Emits a signal in response to a Gobi serial device appearing. Upstart will run
# QDLService when it sees this signal.
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ACTION!="add", GOTO="qdlservice_${oem}_end"
SUBSYSTEM!="tty", GOTO="qdlservice_${oem}_end"
KERNEL!="ttyUSB[0-9]*", GOTO="qdlservice_${oem}_end"
ATTRS{idVendor}=="${vid}", ATTRS{idProduct}=="${pid}", \
RUN+="/sbin/initctl emit gobi_serial_${oem} GOBIDEV=/dev/%k"
LABEL="qdlservice_${oem}_end"
EOF
}
gobi-firmware_install_udev_rules() {
dodir /etc/udev/rules.d
gobi-firmware_install_udev_qcserial_rules
gobi-firmware_install_udev_qdlservice_rules
}
gobi-firmware_install_upstart_scripts() {
dodir /etc/init
file=/etc/init/qdlservice-${GOBI_FIRMWARE_OEM}.conf
cat > ${D}${file} <<EOF
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Starts QDLService if a Gobi ttyusb device appears.
start on gobi_serial_${GOBI_FIRMWARE_OEM}
script
set +e
GOBIQDL="/opt/Qualcomm/QDLService2k/QDLService2k${GOBI_FIRMWARE_OEM}"
ret=1
attempt=0
readonly MAX_ATTEMPTS=10
while [ \$ret -ne 0 -a \$attempt -lt \$MAX_ATTEMPTS ]; do
# Exponential backoff - wait (2^attempt) - 1 seconds
sleep \$(((1 << \$attempt) - 1))
starttime=\$(date +%s%N)
/sbin/minijail0 -u qdlservice -g qdlservice -- "\$GOBIQDL" "\$GOBIDEV"
ret=\$?
endtime=\$(date +%s%N)
logger -t qdlservice "attempt \$attempt: \$ret"
attempt=\$((\$attempt + 1))
if [ \$ret -ne 0 ]; then
logger -t qdlservice "resetting..."
/opt/Qualcomm/bin/powercycle-all-gobis
fi
done
download_time=\$(((\$endtime - \$starttime) / 1000000))
METRICROOT=Network.3G.Gobi.FirmwareDownload
metrics_client \$METRICROOT.Time \$download_time 0 10000 20
metrics_client -e \$METRICROOT.Attempts \$attempt \$MAX_ATTEMPTS
exit \$ret
end script
EOF
}
gobi-firmware_install_firmware_files() {
local oem=${GOBI_FIRMWARE_OEM}
local install_qdl=${GOBI_FIRMWARE_QDL}
local apps_dir=${GOBI_FIRMWARE_APPS_DIR}
# If the apps directory is not sepcified, then use the carrier
# directory. The apps directory should be set to UMTS for most
# UMTS carriers because they share the same firmware
if [ -z "${apps_dir}" ] ; then
apps_dir=${GOBI_FIRMWARE_DEFAULT_CARRIER}
fi
#
# installation directories.
# We could consider installing to more standard locations
# except that QDLService expects to find files in
# /opt/Qualcomm.
#
local firmware_install_dir=${D}/opt/Qualcomm/Images2k
local qdl_install_dir=${D}/opt/Qualcomm/QDLService2k
local log_install_dir=${D}/var/log/
local oemlog_filename=QDLService2k${oem}.txt
local log_filename=QDLService2k.txt
if [ -d Images2k/${oem} ] ; then
# We already have the firmware extracted, this is easy
local base_firmware=Images2k/${oem}
# Do not install qdl it will be build with SDK
install_qdl="no"
else
[ -z "${GOBI_FIRMWARE_ZIP_FILE}" ] && \
die "Must specify GOBI_FIRMWARE_ZIP_FILE"
[ ! -r "${GOBI_FIRMWARE_ZIP_FILE}" ] && \
die "${GOBI_FIRMWARE_ZIP_FILE} is unreadable"
mkdir -p "${T}/${oem}"
unzip ${GOBI_FIRMWARE_ZIP_FILE} -d "${T}/${oem}"
if [ -d "${T}/${oem}/Images2k/${oem}" ] ; then
local base_firmware="${T}/${oem}/Images2k/${oem}"
install_qdl=no
else
rpmfile=$(find "${T}/${oem}" -name \*.rpm -print)
[ -z $rpmfile ] &&
die "Could not find an RPM file in ${GOBI_FIRMWARE_ZIP_FILE}"
# extract the rpm
if [ -d ${oem}_rpm ] ; then
rm -rf ${oem}_rpm
fi
mkdir -p ${oem}_rpm
rpm2tar -O $rpmfile | tar -C ${oem}_rpm -xvf -
local base_firmware=${oem}_rpm/opt/Qualcomm/Images2k/${oem}
fi
fi
# make directories
install -d ${firmware_install_dir}/${oem} \
${qdl_install_dir} ${udev_rules_install_dir}
# install firmware
local flavor_firmware=${base_firmware}_${GOBI_FIRMWARE_FLAVOR}
for carrier in ${GOBI_FIRMWARE_CARRIERS} UMTS ; do
# copy the base firmware
cp -af ${base_firmware}/${carrier} ${firmware_install_dir}/${oem}
if [ -d ${flavor_firmware}/${carrier} ] ; then
# overlay spefic xtra/gps flavor files
cp -af ${flavor_firmware}/${carrier} ${firmware_install_dir}/${oem}
fi
done
# Copy DID file for this device
cp ${base_firmware}/*.did ${firmware_install_dir}/${oem}
# Create a DID file for generic GOBI devices
did_file=$(ls ${base_firmware}/*.did | head -n1)
if [ ! -x $did_file ] ; then
# TODO(jglasgow): Move code for 05c6920b to dogfood ebuild
cp $did_file ${firmware_install_dir}/${oem}/05c6920b.did
fi
# Set firmware and directory permissions
find ${firmware_install_dir}/${oem} -type f -exec chmod 444 {} \;
find ${firmware_install_dir}/${oem} -type d -exec chmod 555 {} \;
# install firmware download program, and associated files
if [ ${install_qdl} == "yes" ] ; then
local qdl_dir=${oem}_rpm/opt/Qualcomm/QDLService2k
install -t ${qdl_install_dir} \
${qdl_dir}/QDLService2k${oem}
ln -sf /opt/Qualcomm/QDLService2k/QDLService2k${oem} \
${qdl_install_dir}/QDLService2kGeneric
fi
# Ensure the default firmware files exists and create Options${oem}.txt
local image_dir=/opt/Qualcomm/Images2k/${oem}
local amss_file=${image_dir}/${apps_dir}/amss.mbn
local apps_file=${image_dir}/${apps_dir}/apps.mbn
local uqcn_file=${image_dir}/${GOBI_FIRMWARE_DEFAULT_CARRIER}/uqcn.mbn
for file in $amss_file $apps_file $uqcn_file ; do
if [ ! -r ${D}${file} ] ; then
die "Could not find file: $file in ${D}"
fi
done
cat > Options2k${oem}.txt <<EOF
${amss_file}
${apps_file}
${uqcn_file}
EOF
install -t ${qdl_install_dir} Options2k${oem}.txt
}
gobi-firmware_src_install() {
# Verify that eclass variables are set
[ -z "${GOBI_FIRMWARE_DEFAULT_CARRIER}" ] && \
die "Must specify GOBI_FIRMWARE_DEFAULT_CARRIER"
[ -z "${GOBI_FIRMWARE_OEM}" ] && \
die "Must specify GOBI_FIRMWARE_OEM"
[ -z "${GOBI_FIRMWARE_VID}" ] && \
die "Must specify GOBI_FIRMWARE_VID"
[ -z "${GOBI_FIRMWARE_PID}" ] && \
die "Must specify GOBI_FIRMWARE_PID"
[ -z "${GOBI_FIRMWARE_CARRIERS}" ] &&
die "Must specify GOBI_FIRMWARE_CARRIERS"
gobi-firmware_install_udev_rules
gobi-firmware_install_upstart_scripts
gobi-firmware_install_firmware_files
}
EXPORT_FUNCTIONS src_install

View File

@ -0,0 +1,81 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Gobi firmware for Chromium OS
# See:
# https://sites.google.com/a/google.com/chromeos/for-team-members/systems/gobi3kfirmwaretarball
inherit cros-binary
# @ECLASS-VARIABLE: GOBI_FIRMWARE_VID
# @DESCRIPTION:
# OEM Vendor ID
: ${GOBI_FIRMWARE_VID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_PID
# @DESCRIPTION:
# OEM Product ID
: ${GOBI_FIRMWARE_PID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_HASH
# @DESCRIPTION:
# Tarball hash
: ${GOBI_FIRMWARE_HASH:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_CARRIERS
# @DESCRIPTION:
# Install firmware for this list of carrier names.
: ${GOBI_FIRMWARE_CARRIERS:=}
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
gobi3k-firmware_src_unpack() {
mkdir -p ${S}
local fn="${PN}-${GOBI_FIRMWARE_VID}:${GOBI_FIRMWARE_PID}"
fn="${fn}-${GOBI_FIRMWARE_HASH}.tar.bz2"
CROS_BINARY_URI="${URI_BASE}/${fn}"
cros-binary_src_unpack
local target="${CROS_BINARY_STORE_DIR}/${fn}"
cp $target "${S}"
}
gobi3k-firmware_install_firmware_files() {
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local hash=${GOBI_FIRMWARE_HASH}
local fwid=${vid}:${pid}
local firmware_install_dir=${D}/opt/Qualcomm/firmware/${fwid}
mkdir -p firmware
# tar rudely interprets x:y as a host:path specifier (!?) and tries to ssh
# to it on your behalf (!!), so...
tar -C firmware -xvj < gobi3k-firmware-${fwid}-${hash}.tar.bz2
local base_firmware=firmware/${fwid}
install -d ${firmware_install_dir} ${udev_rules_install_dir}
for carrier in ${GOBI_FIRMWARE_CARRIERS} UMTS ; do
cp -af ${base_firmware}/${carrier} ${firmware_install_dir}
done
find ${firmware_install_dir}/${oem} -type f -exec chmod 444 {} \;
find ${firmware_install_dir}/${oem} -type d -exec chmod 555 {} \;
}
gobi3k-firmware_src_install() {
# Verify that eclass variables are set
[ -z "${GOBI_FIRMWARE_VID}" ] && die "Must specify GOBI_FIRMWARE_VID"
[ -z "${GOBI_FIRMWARE_PID}" ] && die "Must specify GOBI_FIRMWARE_PID"
[ -z "${GOBI_FIRMWARE_HASH}" ] && die "Must specify GOBI_FIRMWARE_HASH"
[ -z "${GOBI_FIRMWARE_CARRIERS}" ] &&
die "Must specify GOBI_FIRMWARE_CARRIERS"
gobi3k-firmware_install_firmware_files
}
EXPORT_FUNCTIONS src_unpack src_install

View File

@ -0,0 +1,905 @@
# Copyright 1999-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/linux-info.eclass,v 1.90 2011/08/22 04:46:32 vapier Exp $
# @ECLASS: linux-info.eclass
# @MAINTAINER:
# kernel-misc@gentoo.org
# @AUTHOR:
# Original author: John Mylchreest <johnm@gentoo.org>
# @BLURB: eclass used for accessing kernel related information
# @DESCRIPTION:
# This eclass is used as a central eclass for accessing kernel
# related information for source or binary already installed.
# It is vital for linux-mod.eclass to function correctly, and is split
# out so that any ebuild behaviour "templates" are abstracted out
# using additional eclasses.
#
# "kernel config" in this file means:
# The .config of the currently installed sources is used as the first
# preference, with a fall-back to bundled config (/proc/config.gz) if available.
# A Couple of env vars are available to effect usage of this eclass
# These are as follows:
# @ECLASS-VARIABLE: KERNEL_DIR
# @DESCRIPTION:
# A string containing the directory of the target kernel sources. The default value is
# "/usr/src/linux"
# @ECLASS-VARIABLE: CONFIG_CHECK
# @DESCRIPTION:
# A string containing a list of .config options to check for before
# proceeding with the install.
#
# e.g.: CONFIG_CHECK="MTRR"
#
# You can also check that an option doesn't exist by
# prepending it with an exclamation mark (!).
#
# e.g.: CONFIG_CHECK="!MTRR"
#
# To simply warn about a missing option, prepend a '~'.
# It may be combined with '!'.
#
# In general, most checks should be non-fatal. The only time fatal checks should
# be used is for building kernel modules or cases that a compile will fail
# without the option.
#
# This is to allow usage of binary kernels, and minimal systems without kernel
# sources.
# @ECLASS-VARIABLE: ERROR_<CFG>
# @DESCRIPTION:
# A string containing the error message to display when the check against CONFIG_CHECK
# fails. <CFG> should reference the appropriate option used in CONFIG_CHECK.
#
# e.g.: ERROR_MTRR="MTRR exists in the .config but shouldn't!!"
# @ECLASS-VARIABLE: KBUILD_OUTPUT
# @DESCRIPTION:
# A string passed on commandline, or set from the kernel makefile. It contains the directory
# which is to be used as the kernel object directory.
# There are also a couple of variables which are set by this, and shouldn't be
# set by hand. These are as follows:
# @ECLASS-VARIABLE: KV_FULL
# @DESCRIPTION:
# A read-only variable. It's a string containing the full kernel version. ie: 2.6.9-gentoo-johnm-r1
# @ECLASS-VARIABLE: KV_MAJOR
# @DESCRIPTION:
# A read-only variable. It's an integer containing the kernel major version. ie: 2
# @ECLASS-VARIABLE: KV_MINOR
# @DESCRIPTION:
# A read-only variable. It's an integer containing the kernel minor version. ie: 6
# @ECLASS-VARIABLE: KV_PATCH
# @DESCRIPTION:
# A read-only variable. It's an integer containing the kernel patch version. ie: 9
# @ECLASS-VARIABLE: KV_EXTRA
# @DESCRIPTION:
# A read-only variable. It's a string containing the kernel EXTRAVERSION. ie: -gentoo
# @ECLASS-VARIABLE: KV_LOCAL
# @DESCRIPTION:
# A read-only variable. It's a string containing the kernel LOCALVERSION concatenation. ie: -johnm
# @ECLASS-VARIABLE: KV_DIR
# @DESCRIPTION:
# A read-only variable. It's a string containing the kernel source directory, will be null if
# KERNEL_DIR is invalid.
# @ECLASS-VARIABLE: KV_OUT_DIR
# @DESCRIPTION:
# A read-only variable. It's a string containing the kernel object directory, will be KV_DIR unless
# KBUILD_OUTPUT is used. This should be used for referencing .config.
# And to ensure all the weirdness with crosscompile
inherit toolchain-funcs versionator
EXPORT_FUNCTIONS pkg_setup
DEPEND=""
RDEPEND=""
# Overwritable environment Var's
# ---------------------------------------
KERNEL_DIR="${KERNEL_DIR:-${ROOT}usr/src/linux}"
# Bug fixes
# fix to bug #75034
case ${ARCH} in
ppc) BUILD_FIXES="${BUILD_FIXES} TOUT=${T}/.tmp_gas_check";;
ppc64) BUILD_FIXES="${BUILD_FIXES} TOUT=${T}/.tmp_gas_check";;
esac
# @FUNCTION: set_arch_to_kernel
# @DESCRIPTION:
# Set the env ARCH to match what the kernel expects.
set_arch_to_kernel() { export ARCH=$(tc-arch-kernel); }
# @FUNCTION: set_arch_to_portage
# @DESCRIPTION:
# Set the env ARCH to match what portage expects.
set_arch_to_portage() { export ARCH=$(tc-arch); }
# qeinfo "Message"
# -------------------
# qeinfo is a quiet einfo call when EBUILD_PHASE
# should not have visible output.
qout() {
local outputmsg type
type=${1}
shift
outputmsg="${@}"
case "${EBUILD_PHASE}" in
depend) unset outputmsg;;
clean) unset outputmsg;;
preinst) unset outputmsg;;
esac
[ -n "${outputmsg}" ] && ${type} "${outputmsg}"
}
qeinfo() { qout einfo "${@}" ; }
qewarn() { qout ewarn "${@}" ; }
qeerror() { qout eerror "${@}" ; }
# File Functions
# ---------------------------------------
# @FUNCTION: getfilevar
# @USAGE: variable configfile
# @RETURN: the value of the variable
# @DESCRIPTION:
# It detects the value of the variable defined in the file configfile. This is
# done by including the configfile, and printing the variable with Make.
# It WILL break if your makefile has missing dependencies!
getfilevar() {
local ERROR basefname basedname myARCH="${ARCH}"
ERROR=0
[ -z "${1}" ] && ERROR=1
[ ! -f "${2}" ] && ERROR=1
if [ "${ERROR}" = 1 ]
then
echo -e "\n"
eerror "getfilevar requires 2 variables, with the second a valid file."
eerror " getfilevar <VARIABLE> <CONFIGFILE>"
else
basefname="$(basename ${2})"
basedname="$(dirname ${2})"
unset ARCH
echo -e "e:\\n\\t@echo \$(${1})\\ninclude ${basefname}" | \
make -C "${basedname}" M="${S}" ${BUILD_FIXES} -s -f - 2>/dev/null
ARCH=${myARCH}
fi
}
# @FUNCTION: getfilevar_noexec
# @USAGE: variable configfile
# @RETURN: the value of the variable
# @DESCRIPTION:
# It detects the value of the variable defined in the file configfile.
# This is done with sed matching an expression only. If the variable is defined,
# you will run into problems. See getfilevar for those cases.
getfilevar_noexec() {
local ERROR basefname basedname mycat myARCH="${ARCH}"
ERROR=0
mycat='cat'
[ -z "${1}" ] && ERROR=1
[ ! -f "${2}" ] && ERROR=1
[ "${2%.gz}" != "${2}" ] && mycat='zcat'
if [ "${ERROR}" = 1 ]
then
echo -e "\n"
eerror "getfilevar_noexec requires 2 variables, with the second a valid file."
eerror " getfilevar_noexec <VARIABLE> <CONFIGFILE>"
else
${mycat} "${2}" | \
sed -n \
-e "/^[[:space:]]*${1}[[:space:]]*:\\?=[[:space:]]*\(.*\)\$/{
s,^[^=]*[[:space:]]*=[[:space:]]*,,g ;
s,[[:space:]]*\$,,g ;
p
}"
fi
}
# @ECLASS-VARIABLE: _LINUX_CONFIG_EXISTS_DONE
# @INTERNAL
# @DESCRIPTION:
# This is only set if one of the linux_config_*exists functions has been called.
# We use it for a QA warning that the check for a config has not been performed,
# as linux_chkconfig* in non-legacy mode WILL return an undefined value if no
# config is available at all.
_LINUX_CONFIG_EXISTS_DONE=
linux_config_qa_check() {
local f="$1"
if [ -z "${_LINUX_CONFIG_EXISTS_DONE}" ]; then
ewarn "QA: You called $f before any linux_config_exists!"
ewarn "QA: The return value of $f will NOT guaranteed later!"
fi
}
# @FUNCTION: linux_config_src_exists
# @RETURN: true or false
# @DESCRIPTION:
# It returns true if .config exists in a build directory otherwise false
linux_config_src_exists() {
export _LINUX_CONFIG_EXISTS_DONE=1
[ -s "${KV_OUT_DIR}/.config" ]
}
# @FUNCTION: linux_config_bin_exists
# @RETURN: true or false
# @DESCRIPTION:
# It returns true if .config exists in /proc, otherwise false
linux_config_bin_exists() {
export _LINUX_CONFIG_EXISTS_DONE=1
[ -s "/proc/config.gz" ]
}
# @FUNCTION: linux_config_exists
# @RETURN: true or false
# @DESCRIPTION:
# It returns true if .config exists otherwise false
#
# This function MUST be checked before using any of the linux_chkconfig_*
# functions.
linux_config_exists() {
linux_config_src_exists || linux_config_bin_exists
}
# @FUNCTION: require_configured_kernel
# @DESCRIPTION:
# This function verifies that the current kernel is configured (it checks against the existence of .config)
# otherwise it dies.
require_configured_kernel() {
if ! linux_config_src_exists; then
qeerror "Could not find a usable .config in the kernel source directory."
qeerror "Please ensure that ${KERNEL_DIR} points to a configured set of Linux sources."
qeerror "If you are using KBUILD_OUTPUT, please set the environment var so that"
qeerror "it points to the necessary object directory so that it might find .config."
die "Kernel not configured; no .config found in ${KV_OUT_DIR}"
fi
}
# @FUNCTION: linux_chkconfig_present
# @USAGE: option
# @RETURN: true or false
# @DESCRIPTION:
# It checks that CONFIG_<option>=y or CONFIG_<option>=m is present in the current kernel .config
# If linux_config_exists returns false, the results of this are UNDEFINED. You
# MUST call linux_config_exists first.
linux_chkconfig_present() {
linux_config_qa_check linux_chkconfig_present
local RESULT
local config
config="${KV_OUT_DIR}/.config"
[ ! -f "${config}" ] && config="/proc/config.gz"
RESULT="$(getfilevar_noexec CONFIG_${1} "${config}")"
[ "${RESULT}" = "m" -o "${RESULT}" = "y" ] && return 0 || return 1
}
# @FUNCTION: linux_chkconfig_module
# @USAGE: option
# @RETURN: true or false
# @DESCRIPTION:
# It checks that CONFIG_<option>=m is present in the current kernel .config
# If linux_config_exists returns false, the results of this are UNDEFINED. You
# MUST call linux_config_exists first.
linux_chkconfig_module() {
linux_config_qa_check linux_chkconfig_module
local RESULT
local config
config="${KV_OUT_DIR}/.config"
[ ! -f "${config}" ] && config="/proc/config.gz"
RESULT="$(getfilevar_noexec CONFIG_${1} "${config}")"
[ "${RESULT}" = "m" ] && return 0 || return 1
}
# @FUNCTION: linux_chkconfig_builtin
# @USAGE: option
# @RETURN: true or false
# @DESCRIPTION:
# It checks that CONFIG_<option>=y is present in the current kernel .config
# If linux_config_exists returns false, the results of this are UNDEFINED. You
# MUST call linux_config_exists first.
linux_chkconfig_builtin() {
linux_config_qa_check linux_chkconfig_builtin
local RESULT
local config
config="${KV_OUT_DIR}/.config"
[ ! -f "${config}" ] && config="/proc/config.gz"
RESULT="$(getfilevar_noexec CONFIG_${1} "${config}")"
[ "${RESULT}" = "y" ] && return 0 || return 1
}
# @FUNCTION: linux_chkconfig_string
# @USAGE: option
# @RETURN: CONFIG_<option>
# @DESCRIPTION:
# It prints the CONFIG_<option> value of the current kernel .config (it requires a configured kernel).
# If linux_config_exists returns false, the results of this are UNDEFINED. You
# MUST call linux_config_exists first.
linux_chkconfig_string() {
linux_config_qa_check linux_chkconfig_string
local config
config="${KV_OUT_DIR}/.config"
[ ! -f "${config}" ] && config="/proc/config.gz"
getfilevar_noexec "CONFIG_${1}" "${config}"
}
# Versioning Functions
# ---------------------------------------
# @FUNCTION: kernel_is
# @USAGE: [-lt -gt -le -ge -eq] major_number [minor_number patch_number]
# @RETURN: true or false
# @DESCRIPTION:
# It returns true when the current kernel version satisfies the comparison against the passed version.
# -eq is the default comparison.
#
# @CODE
# For Example where KV = 2.6.9
# kernel_is 2 4 returns false
# kernel_is 2 returns true
# kernel_is 2 6 returns true
# kernel_is 2 6 8 returns false
# kernel_is 2 6 9 returns true
# @CODE
# got the jist yet?
kernel_is() {
# if we haven't determined the version yet, we need to.
linux-info_get_any_version
local operator testagainst value x=0 y=0 z=0
case ${1} in
-lt|lt) operator="-lt"; shift;;
-gt|gt) operator="-gt"; shift;;
-le|le) operator="-le"; shift;;
-ge|ge) operator="-ge"; shift;;
-eq|eq) operator="-eq"; shift;;
*) operator="-eq";;
esac
for x in ${@}; do
for((y=0; y<$((3 - ${#x})); y++)); do value="${value}0"; done
value="${value}${x}"
z=$((${z} + 1))
case ${z} in
1) for((y=0; y<$((3 - ${#KV_MAJOR})); y++)); do testagainst="${testagainst}0"; done;
testagainst="${testagainst}${KV_MAJOR}";;
2) for((y=0; y<$((3 - ${#KV_MINOR})); y++)); do testagainst="${testagainst}0"; done;
testagainst="${testagainst}${KV_MINOR}";;
3) for((y=0; y<$((3 - ${#KV_PATCH})); y++)); do testagainst="${testagainst}0"; done;
testagainst="${testagainst}${KV_PATCH}";;
*) die "Error in kernel-2_kernel_is(): Too many parameters.";;
esac
done
[ "${testagainst}" ${operator} "${value}" ] && return 0 || return 1
}
get_localversion() {
local lv_list i x
# ignore files with ~ in it.
for i in $(ls ${1}/localversion* 2>/dev/null); do
[[ -n ${i//*~*} ]] && lv_list="${lv_list} ${i}"
done
for i in ${lv_list}; do
x="${x}$(<${i})"
done
x=${x/ /}
echo ${x}
}
# Check if the Makefile is valid for direct parsing.
# Check status results:
# - PASS, use 'getfilevar' to extract values
# - FAIL, use 'getfilevar_noexec' to extract values
# The check may fail if:
# - make is not present
# - corruption exists in the kernel makefile
get_makefile_extract_function() {
local a='' b='' mkfunc='getfilevar'
a="$(getfilevar VERSION ${KERNEL_MAKEFILE})"
b="$(getfilevar_noexec VERSION ${KERNEL_MAKEFILE})"
[[ "${a}" != "${b}" ]] && mkfunc='getfilevar_noexec'
echo "${mkfunc}"
}
# internal variable, so we know to only print the warning once
get_version_warning_done=
# @FUNCTION: get_version
# @DESCRIPTION:
# It gets the version of the kernel inside KERNEL_DIR and populates the KV_FULL variable
# (if KV_FULL is already set it does nothing).
#
# The kernel version variables (KV_MAJOR, KV_MINOR, KV_PATCH, KV_EXTRA and KV_LOCAL) are also set.
#
# The KV_DIR is set using the KERNEL_DIR env var, the KV_DIR_OUT is set using a valid
# KBUILD_OUTPUT (in a decreasing priority list, we look for the env var, makefile var or the
# symlink /lib/modules/${KV_MAJOR}.${KV_MINOR}.${KV_PATCH}${KV_EXTRA}/build).
get_version() {
local kbuild_output mkfunc tmplocal
# no need to execute this twice assuming KV_FULL is populated.
# we can force by unsetting KV_FULL
[ -n "${KV_FULL}" ] && return 0
# if we dont know KV_FULL, then we need too.
# make sure KV_DIR isnt set since we need to work it out via KERNEL_DIR
unset KV_DIR
# KV_DIR will contain the full path to the sources directory we should use
[ -z "${get_version_warning_done}" ] && \
qeinfo "Determining the location of the kernel source code"
[ -h "${KERNEL_DIR}" ] && KV_DIR="$(readlink -f ${KERNEL_DIR})"
[ -d "${KERNEL_DIR}" ] && KV_DIR="${KERNEL_DIR}"
if [ -z "${KV_DIR}" ]
then
if [ -z "${get_version_warning_done}" ]; then
get_version_warning_done=1
qeerror "Unable to find kernel sources at ${KERNEL_DIR}"
#qeinfo "This package requires Linux sources."
if [ "${KERNEL_DIR}" == "/usr/src/linux" ] ; then
qeinfo "Please make sure that ${KERNEL_DIR} points at your running kernel, "
qeinfo "(or the kernel you wish to build against)."
qeinfo "Alternatively, set the KERNEL_DIR environment variable to the kernel sources location"
else
qeinfo "Please ensure that the KERNEL_DIR environment variable points at full Linux sources of the kernel you wish to compile against."
fi
fi
return 1
fi
if [ -z "${get_version_warning_done}" ]; then
qeinfo "Found kernel source directory:"
qeinfo " ${KV_DIR}"
fi
if [ ! -s "${KV_DIR}/Makefile" ]
then
if [ -z "${get_version_warning_done}" ]; then
get_version_warning_done=1
qeerror "Could not find a Makefile in the kernel source directory."
qeerror "Please ensure that ${KERNEL_DIR} points to a complete set of Linux sources"
fi
return 1
fi
# OK so now we know our sources directory, but they might be using
# KBUILD_OUTPUT, and we need this for .config and localversions-*
# so we better find it eh?
# do we pass KBUILD_OUTPUT on the CLI?
OUTPUT_DIR="${OUTPUT_DIR:-${KBUILD_OUTPUT}}"
# keep track of it
KERNEL_MAKEFILE="${KV_DIR}/Makefile"
# Decide the function used to extract makefile variables.
mkfunc="$(get_makefile_extract_function "${KERNEL_MAKEFILE}")"
# And if we didn't pass it, we can take a nosey in the Makefile
kbuild_output="$(${mkfunc} KBUILD_OUTPUT ${KERNEL_MAKEFILE})"
if [ -e "${kbuild_output}" ]; then
OUTPUT_DIR="${OUTPUT_DIR:-${kbuild_output}}"
fi
# And contrary to existing functions I feel we shouldn't trust the
# directory name to find version information as this seems insane.
# so we parse ${KERNEL_MAKEFILE}
KV_MAJOR="$(${mkfunc} VERSION ${KERNEL_MAKEFILE})"
KV_MINOR="$(${mkfunc} PATCHLEVEL ${KERNEL_MAKEFILE})"
KV_PATCH="$(${mkfunc} SUBLEVEL ${KERNEL_MAKEFILE})"
KV_EXTRA="$(${mkfunc} EXTRAVERSION ${KERNEL_MAKEFILE})"
if [ -z "${KV_MAJOR}" -o -z "${KV_MINOR}" -o -z "${KV_PATCH}" ]
then
if [ -z "${get_version_warning_done}" ]; then
get_version_warning_done=1
qeerror "Could not detect kernel version."
qeerror "Please ensure that ${KERNEL_DIR} points to a complete set of Linux sources."
fi
return 1
fi
# and in newer versions we can also pull LOCALVERSION if it is set.
# but before we do this, we need to find if we use a different object directory.
# This *WILL* break if the user is using localversions, but we assume it was
# caught before this if they are.
OUTPUT_DIR="${OUTPUT_DIR:-${ROOT}/lib/modules/${KV_MAJOR}.${KV_MINOR}.${KV_PATCH}${KV_EXTRA}/build}"
[ -h "${OUTPUT_DIR}" ] && KV_OUT_DIR="$(readlink -f ${OUTPUT_DIR})"
[ -d "${OUTPUT_DIR}" ] && KV_OUT_DIR="${OUTPUT_DIR}"
if [ -n "${KV_OUT_DIR}" ];
then
qeinfo "Found kernel object directory:"
qeinfo " ${KV_OUT_DIR}"
fi
# and if we STILL have not got it, then we better just set it to KV_DIR
KV_OUT_DIR="${KV_OUT_DIR:-${KV_DIR}}"
# Grab the kernel release from the output directory.
# TODO: we MUST detect kernel.release being out of date, and 'return 1' from
# this function.
if [ -s "${KV_OUT_DIR}"/include/config/kernel.release ]; then
KV_LOCAL=$(<"${KV_OUT_DIR}"/include/config/kernel.release)
elif [ -s "${KV_OUT_DIR}"/.kernelrelease ]; then
KV_LOCAL=$(<"${KV_OUT_DIR}"/.kernelrelease)
else
KV_LOCAL=
fi
# KV_LOCAL currently contains the full release; discard the first bits.
tmplocal=${KV_LOCAL#${KV_MAJOR}.${KV_MINOR}.${KV_PATCH}${KV_EXTRA}}
# If the updated local version was not changed, the tree is not prepared.
# Clear out KV_LOCAL in that case.
# TODO: this does not detect a change in the localversion part between
# kernel.release and the value that would be generated.
if [ "$KV_LOCAL" = "$tmplocal" ]; then
KV_LOCAL=
else
KV_LOCAL=$tmplocal
fi
# And we should set KV_FULL to the full expanded version
KV_FULL="${KV_MAJOR}.${KV_MINOR}.${KV_PATCH}${KV_EXTRA}${KV_LOCAL}"
qeinfo "Found sources for kernel version:"
qeinfo " ${KV_FULL}"
return 0
}
# @FUNCTION: get_running_version
# @DESCRIPTION:
# It gets the version of the current running kernel and the result is the same as get_version() if the
# function can find the sources.
get_running_version() {
KV_FULL=$(uname -r)
if [[ -f ${ROOT}/lib/modules/${KV_FULL}/source/Makefile && -f ${ROOT}/lib/modules/${KV_FULL}/build/Makefile ]]; then
KERNEL_DIR=$(readlink -f ${ROOT}/lib/modules/${KV_FULL}/source)
KBUILD_OUTPUT=$(readlink -f ${ROOT}/lib/modules/${KV_FULL}/build)
unset KV_FULL
get_version
return $?
elif [[ -f ${ROOT}/lib/modules/${KV_FULL}/source/Makefile ]]; then
KERNEL_DIR=$(readlink -f ${ROOT}/lib/modules/${KV_FULL}/source)
unset KV_FULL
get_version
return $?
elif [[ -f ${ROOT}/lib/modules/${KV_FULL}/build/Makefile ]]; then
KERNEL_DIR=$(readlink -f ${ROOT}/lib/modules/${KV_FULL}/build)
unset KV_FULL
get_version
return $?
else
KV_MAJOR=$(get_version_component_range 1 ${KV_FULL})
KV_MINOR=$(get_version_component_range 2 ${KV_FULL})
KV_PATCH=$(get_version_component_range 3 ${KV_FULL})
KV_PATCH=${KV_PATCH//-*}
KV_EXTRA="${KV_FULL#${KV_MAJOR}.${KV_MINOR}.${KV_PATCH}}"
fi
return 0
}
# This next function is named with the eclass prefix to avoid conflicts with
# some old versionator-like eclass functions.
# @FUNCTION: linux-info_get_any_version
# @DESCRIPTION:
# This attempts to find the version of the sources, and otherwise falls back to
# the version of the running kernel.
linux-info_get_any_version() {
get_version
if [[ $? -ne 0 ]]; then
ewarn "Unable to calculate Linux Kernel version for build, attempting to use running version"
get_running_version
fi
}
# ebuild check functions
# ---------------------------------------
# @FUNCTION: check_kernel_built
# @DESCRIPTION:
# This function verifies that the current kernel sources have been already prepared otherwise it dies.
check_kernel_built() {
# if we haven't determined the version yet, we need to
require_configured_kernel
get_version
if [ ! -f "${KV_OUT_DIR}/include/linux/version.h" ]
then
eerror "These sources have not yet been prepared."
eerror "We cannot build against an unprepared tree."
eerror "To resolve this, please type the following:"
eerror
eerror "# cd ${KV_DIR}"
eerror "# make oldconfig"
eerror "# make modules_prepare"
eerror
eerror "Then please try merging this module again."
die "Kernel sources need compiling first"
fi
}
# @FUNCTION: check_modules_supported
# @DESCRIPTION:
# This function verifies that the current kernel support modules (it checks CONFIG_MODULES=y) otherwise it dies.
check_modules_supported() {
# if we haven't determined the version yet, we need too.
require_configured_kernel
get_version
if ! linux_chkconfig_builtin "MODULES"
then
eerror "These sources do not support loading external modules."
eerror "to be able to use this module please enable \"Loadable modules support\""
eerror "in your kernel, recompile and then try merging this module again."
die "No support for external modules in ${KV_FULL} config"
fi
}
# @FUNCTION: check_extra_config
# @DESCRIPTION:
# It checks the kernel config options specified by CONFIG_CHECK. It dies only when a required config option (i.e.
# the prefix ~ is not used) doesn't satisfy the directive.
check_extra_config() {
local config negate die error reworkmodulenames
local soft_errors_count=0 hard_errors_count=0 config_required=0
# store the value of the QA check, because otherwise we won't catch usages
# after if check_extra_config is called AND other direct calls are done
# later.
local old_LINUX_CONFIG_EXISTS_DONE="${_LINUX_CONFIG_EXISTS_DONE}"
# if we haven't determined the version yet, we need to
linux-info_get_any_version
# Determine if we really need a .config. The only time when we don't need
# one is when all of the CONFIG_CHECK options are prefixed with "~".
for config in ${CONFIG_CHECK}
do
if [[ "${config:0:1}" != "~" ]]; then
config_required=1
break
fi
done
if [[ ${config_required} == 0 ]]; then
# In the case where we don't require a .config, we can now bail out
# if the user has no .config as there is nothing to do. Otherwise
# code later will cause a failure due to missing .config.
if ! linux_config_exists; then
ewarn "Unable to check for the following kernel config options due"
ewarn "to absence of any configured kernel sources or compiled"
ewarn "config:"
for config in ${CONFIG_CHECK}; do
local_error="ERROR_${config#\~}"
msg="${!local_error}"
if [[ "x${msg}" == "x" ]]; then
local_error="WARNING_${config#\~}"
msg="${!local_error}"
fi
ewarn " - ${config#\~}${msg:+ - }${msg}"
done
ewarn "You're on your own to make sure they are set if needed."
export LINUX_CONFIG_EXISTS_DONE="${old_LINUX_CONFIG_EXISTS_DONE}"
return 0
fi
else
require_configured_kernel
fi
einfo "Checking for suitable kernel configuration options..."
for config in ${CONFIG_CHECK}
do
# if we specify any fatal, ensure we honor them
die=1
error=0
negate=0
reworkmodulenames=0
if [[ ${config:0:1} == "~" ]]; then
die=0
config=${config:1}
elif [[ ${config:0:1} == "@" ]]; then
die=0
reworkmodulenames=1
config=${config:1}
fi
if [[ ${config:0:1} == "!" ]]; then
negate=1
config=${config:1}
fi
if [[ ${negate} == 1 ]]; then
linux_chkconfig_present ${config} && error=2
elif [[ ${reworkmodulenames} == 1 ]]; then
local temp_config="${config//*:}" i n
config="${config//:*}"
if linux_chkconfig_present ${config}; then
for i in ${MODULE_NAMES}; do
n="${i//${temp_config}}"
[[ -z ${n//\(*} ]] && \
MODULE_IGNORE="${MODULE_IGNORE} ${temp_config}"
done
error=2
fi
else
linux_chkconfig_present ${config} || error=1
fi
if [[ ${error} > 0 ]]; then
local report_func="eerror" local_error
local_error="ERROR_${config}"
local_error="${!local_error}"
if [[ -z "${local_error}" ]]; then
# using old, deprecated format.
local_error="${config}_ERROR"
local_error="${!local_error}"
fi
if [[ ${die} == 0 && -z "${local_error}" ]]; then
#soft errors can be warnings
local_error="WARNING_${config}"
local_error="${!local_error}"
if [[ -n "${local_error}" ]] ; then
report_func="ewarn"
fi
fi
if [[ -z "${local_error}" ]]; then
[[ ${error} == 1 ]] \
&& local_error="is not set when it should be." \
|| local_error="should not be set. But it is."
local_error="CONFIG_${config}:\t ${local_error}"
fi
if [[ ${die} == 0 ]]; then
${report_func} " ${local_error}"
soft_errors_count=$[soft_errors_count + 1]
else
${report_func} " ${local_error}"
hard_errors_count=$[hard_errors_count + 1]
fi
fi
done
if [[ ${hard_errors_count} > 0 ]]; then
eerror "Please check to make sure these options are set correctly."
eerror "Failure to do so may cause unexpected problems."
eerror "Once you have satisfied these options, please try merging"
eerror "this package again."
export LINUX_CONFIG_EXISTS_DONE="${old_LINUX_CONFIG_EXISTS_DONE}"
die "Incorrect kernel configuration options"
elif [[ ${soft_errors_count} > 0 ]]; then
ewarn "Please check to make sure these options are set correctly."
ewarn "Failure to do so may cause unexpected problems."
else
eend 0
fi
export LINUX_CONFIG_EXISTS_DONE="${old_LINUX_CONFIG_EXISTS_DONE}"
}
check_zlibinflate() {
# if we haven't determined the version yet, we need to
require_configured_kernel
get_version
# although I restructured this code - I really really really dont support it!
# bug #27882 - zlib routines are only linked into the kernel
# if something compiled into the kernel calls them
#
# plus, for the cloop module, it appears that there's no way
# to get cloop.o to include a static zlib if CONFIG_MODVERSIONS
# is on
local INFLATE
local DEFLATE
einfo "Determining the usability of ZLIB_INFLATE support in your kernel"
ebegin "checking ZLIB_INFLATE"
linux_chkconfig_builtin CONFIG_ZLIB_INFLATE
eend $?
[ "$?" != 0 ] && die
ebegin "checking ZLIB_DEFLATE"
linux_chkconfig_builtin CONFIG_ZLIB_DEFLATE
eend $?
[ "$?" != 0 ] && die
local LINENO_START
local LINENO_END
local SYMBOLS
local x
LINENO_END="$(grep -n 'CONFIG_ZLIB_INFLATE y' ${KV_DIR}/lib/Config.in | cut -d : -f 1)"
LINENO_START="$(head -n $LINENO_END ${KV_DIR}/lib/Config.in | grep -n 'if \[' | tail -n 1 | cut -d : -f 1)"
(( LINENO_AMOUNT = $LINENO_END - $LINENO_START ))
(( LINENO_END = $LINENO_END - 1 ))
SYMBOLS="$(head -n $LINENO_END ${KV_DIR}/lib/Config.in | tail -n $LINENO_AMOUNT | sed -e 's/^.*\(CONFIG_[^\" ]*\).*/\1/g;')"
# okay, now we have a list of symbols
# we need to check each one in turn, to see whether it is set or not
for x in $SYMBOLS ; do
if [ "${!x}" = "y" ]; then
# we have a winner!
einfo "${x} ensures zlib is linked into your kernel - excellent"
return 0
fi
done
eerror
eerror "This kernel module requires ZLIB library support."
eerror "You have enabled zlib support in your kernel, but haven't enabled"
eerror "enabled any option that will ensure that zlib is linked into your"
eerror "kernel."
eerror
eerror "Please ensure that you enable at least one of these options:"
eerror
for x in $SYMBOLS ; do
eerror " * $x"
done
eerror
eerror "Please remember to recompile and install your kernel, and reboot"
eerror "into your new kernel before attempting to load this kernel module."
die "Kernel doesn't include zlib support"
}
################################
# Default pkg_setup
# Also used when inheriting linux-mod to force a get_version call
# @FUNCTION: linux-info_pkg_setup
# @DESCRIPTION:
# Force a get_version() call when inherited from linux-mod.eclass and then check if the kernel is configured
# to support the options specified in CONFIG_CHECK (if not null)
linux-info_pkg_setup() {
linux-info_get_any_version
if kernel_is 2 4; then
if [ "$( gcc-major-version )" -eq "4" ] ; then
echo
ewarn "Be warned !! >=sys-devel/gcc-4.0.0 isn't supported with"
ewarn "linux-2.4 (or modules building against a linux-2.4 kernel)!"
echo
ewarn "Either switch to another gcc-version (via gcc-config) or use a"
ewarn "newer kernel that supports gcc-4."
echo
ewarn "Also be aware that bugreports about gcc-4 not working"
ewarn "with linux-2.4 based ebuilds will be closed as INVALID!"
echo
epause 10
fi
fi
[ -n "${CONFIG_CHECK}" ] && check_extra_config;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,525 @@
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/subversion.eclass,v 1.67 2009/05/10 20:33:38 arfrever Exp $
# @ECLASS: subversion.eclass
# @MAINTAINER:
# Akinori Hattori <hattya@gentoo.org>
# Bo Ørsted Andresen <zlin@gentoo.org>
# Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
#
# Original Author: Akinori Hattori <hattya@gentoo.org>
#
# @BLURB: The subversion eclass is written to fetch software sources from subversion repositories
# @DESCRIPTION:
# The subversion eclass provides functions to fetch, patch and bootstrap
# software sources from subversion repositories.
inherit eutils
ESVN="${ECLASS}"
case "${EAPI:-0}" in
0|1)
EXPORT_FUNCTIONS src_unpack pkg_preinst
;;
*)
EXPORT_FUNCTIONS src_unpack src_prepare pkg_preinst
;;
esac
DESCRIPTION="Based on the ${ECLASS} eclass"
SUBVERSION_DEPEND="dev-vcs/subversion[webdav-neon,webdav-serf]
net-misc/rsync"
if [[ -z "${ESVN_DISABLE_DEPENDENCIES}" ]]; then
DEPEND="${SUBVERSION_DEPEND}"
fi
# @ECLASS-VARIABLE: ESVN_STORE_DIR
# @DESCRIPTION:
# subversion sources store directory. Users may override this in /etc/make.conf
if [[ -z ${ESVN_STORE_DIR} ]]; then
ESVN_STORE_DIR="${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/svn-src"
# Pick a directory with the same permissions now and in the future. Note
# that we cannot just use USERNAME because the eventual effective user when
# doing the svn commands may change - PORTAGE_USERNAME has not taken effect
# yet. Further complicating things, if features userpriv is not set,
# PORTAGE_USERNAME is going to be ignored. We assume that if we enable
# userpriv in the future, we will also set PORTAGE_USERNAME to something
# other than "portage".
# TODO: remove this once we are using consistent users and userpriv settings
# for emerge and emerge-${BOARD}.
ESVN_STORE_DIR="${ESVN_STORE_DIR}/${PORTAGE_USERNAME:-portage}"
fi
# @ECLASS-VARIABLE: ESVN_FETCH_CMD
# @DESCRIPTION:
# subversion checkout command
ESVN_FETCH_CMD="svn checkout"
# @ECLASS-VARIABLE: ESVN_UPDATE_CMD
# @DESCRIPTION:
# subversion update command
ESVN_UPDATE_CMD="svn update"
# @ECLASS-VARIABLE: ESVN_SWITCH_CMD
# @DESCRIPTION:
# subversion switch command
ESVN_SWITCH_CMD="svn switch"
# @ECLASS-VARIABLE: ESVN_OPTIONS
# @DESCRIPTION:
# the options passed to checkout or update. If you want a specific revision see
# ESVN_REPO_URI instead of using -rREV.
ESVN_OPTIONS="${ESVN_OPTIONS:-}"
# @ECLASS-VARIABLE: ESVN_REPO_URI
# @DESCRIPTION:
# repository uri
#
# e.g. http://foo/trunk, svn://bar/trunk, svn://bar/branch/foo@1234
#
# supported protocols:
# http://
# https://
# svn://
# svn+ssh://
#
# to peg to a specific revision, append @REV to the repo's uri
ESVN_REPO_URI="${ESVN_REPO_URI:-}"
# @ECLASS-VARIABLE: ESVN_REVISION
# @DESCRIPTION:
# User configurable revision checkout or update to from the repository
#
# Useful for live svn or trunk svn ebuilds allowing the user to peg
# to a specific revision
#
# Note: This should never be set in an ebuild!
ESVN_REVISION="${ESVN_REVISION:-}"
# @ECLASS-VARIABLE: ESVN_USER
# @DESCRIPTION:
# User name
ESVN_USER="${ESVN_USER:-}"
# @ECLASS-VARIABLE: ESVN_PASSWORD
# @DESCRIPTION:
# Password
ESVN_PASSWORD="${ESVN_PASSWORD:-}"
# @ECLASS-VARIABLE: ESVN_PROJECT
# @DESCRIPTION:
# project name of your ebuild (= name space)
#
# subversion eclass will check out the subversion repository like:
#
# ${ESVN_STORE_DIR}/${ESVN_PROJECT}/${ESVN_REPO_URI##*/}
#
# so if you define ESVN_REPO_URI as http://svn.collab.net/repo/svn/trunk or
# http://svn.collab.net/repo/svn/trunk/. and PN is subversion-svn.
# it will check out like:
#
# ${ESVN_STORE_DIR}/subversion/trunk
#
# this is not used in order to declare the name of the upstream project.
# so that you can declare this like:
#
# # jakarta commons-loggin
# ESVN_PROJECT=commons/logging
#
# default: ${PN/-svn}.
ESVN_PROJECT="${ESVN_PROJECT:-${PN/-svn}}"
# @ECLASS-VARIABLE: ESVN_BOOTSTRAP
# @DESCRIPTION:
# bootstrap script or command like autogen.sh or etc..
ESVN_BOOTSTRAP="${ESVN_BOOTSTRAP:-}"
# @ECLASS-VARIABLE: ESVN_PATCHES
# @DESCRIPTION:
# subversion eclass can apply patches in subversion_bootstrap().
# you can use regexp in this variable like *.diff or *.patch or etc.
# NOTE: patches will be applied before ESVN_BOOTSTRAP is processed.
#
# Patches are searched both in ${PWD} and ${FILESDIR}, if not found in either
# location, the installation dies.
ESVN_PATCHES="${ESVN_PATCHES:-}"
# @ECLASS-VARIABLE: ESVN_RESTRICT
# @DESCRIPTION:
# this should be a space delimited list of subversion eclass features to
# restrict.
# export)
# don't export the working copy to S.
ESVN_RESTRICT="${ESVN_RESTRICT:-}"
# @ECLASS-VARIABLE: ESVN_DISABLE_DEPENDENCIES
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic inclusion of
# Subversion in dependencies.
ESVN_DISABLE_DEPENDENCIES="${ESVN_DISABLE_DEPENDENCIES:-}"
# @ECLASS-VARIABLE: ESVN_OFFLINE
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic updating of
# an svn source tree. This is intended to be set outside the subversion source
# tree by users.
ESVN_OFFLINE="${ESVN_OFFLINE:-${ESCM_OFFLINE}}"
# @ECLASS-VARIABLE: ESVN_UP_FREQ
# @DESCRIPTION:
# Set the minimum number of hours between svn up'ing in any given svn module. This is particularly
# useful for split KDE ebuilds where we want to ensure that all submodules are compiled for the same
# revision. It should also be kept user overrideable.
ESVN_UP_FREQ="${ESVN_UP_FREQ:=}"
# @ECLASS-VARIABLE: ESCM_LOGDIR
# @DESCRIPTION:
# User configuration variable. If set to a path such as e.g. /var/log/scm any
# package inheriting from subversion.eclass will record svn revision to
# ${CATEGORY}/${PN}.log in that path in pkg_preinst. This is not supposed to be
# set by ebuilds/eclasses. It defaults to empty so users need to opt in.
ESCM_LOGDIR="${ESCM_LOGDIR:=}"
# @FUNCTION: subversion_fetch
# @USAGE: [repo_uri] [destination]
# @DESCRIPTION:
# Wrapper function to fetch sources from subversion via svn checkout or svn update,
# depending on whether there is an existing working copy in ${ESVN_STORE_DIR}.
#
# Can take two optional parameters:
# repo_uri - a repository URI. default is ESVN_REPO_URI.
# destination - a check out path in S.
subversion_fetch() {
local repo_uri="$(subversion__get_repository_uri "${1:-${ESVN_REPO_URI}}")"
local revision="$(subversion__get_peg_revision "${1:-${ESVN_REPO_URI}}")"
local S_dest="${2}"
if [[ -z ${repo_uri} ]]; then
die "${ESVN}: ESVN_REPO_URI (or specified URI) is empty."
fi
[[ -n "${ESVN_REVISION}" ]] && revision="${ESVN_REVISION}"
# check for the protocol
local protocol="${repo_uri%%:*}"
case "${protocol}" in
http|https)
;;
svn|svn+ssh)
;;
*)
die "${ESVN}: fetch from '${protocol}' is not yet implemented."
;;
esac
addread "/etc/subversion"
addwrite "${ESVN_STORE_DIR}"
# Also make the /var/lib/portage/distfiles/svn-src directory writeable in sandbox
# so we can create it if necessary.
addwrite "$(dirname ${ESVN_STORE_DIR})"
if [[ ! -d ${ESVN_STORE_DIR} ]]; then
debug-print "${FUNCNAME}: initial checkout. creating subversion directory"
mkdir -p "${ESVN_STORE_DIR}" || die "${ESVN}: can't mkdir ${ESVN_STORE_DIR}."
fi
cd "${ESVN_STORE_DIR}" || die "${ESVN}: can't chdir to ${ESVN_STORE_DIR}"
local wc_path="$(subversion__get_wc_path "${repo_uri}")"
local options="${ESVN_OPTIONS} --config-dir ${ESVN_STORE_DIR}/.subversion"
[[ -n "${revision}" ]] && options="${options} -r ${revision}"
if [[ "${ESVN_OPTIONS}" = *-r* ]]; then
ewarn "\${ESVN_OPTIONS} contains -r, this usage is unsupported. Please"
ewarn "see \${ESVN_REPO_URI}"
fi
if has_version ">=dev-vcs/subversion-1.6.0"; then
options="${options} --config-option=config:auth:password-stores="
fi
debug-print "${FUNCNAME}: wc_path = \"${wc_path}\""
debug-print "${FUNCNAME}: ESVN_OPTIONS = \"${ESVN_OPTIONS}\""
debug-print "${FUNCNAME}: options = \"${options}\""
if [[ ! -d ${wc_path}/.svn ]]; then
if [[ -n ${ESVN_OFFLINE} ]]; then
ewarn "ESVN_OFFLINE cannot be used when there is no existing checkout."
fi
# first check out
einfo "subversion check out start -->"
einfo " repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_FETCH_CMD} ${options} ${repo_uri}"
mkdir -p "${ESVN_PROJECT}" || die "${ESVN}: can't mkdir ${ESVN_PROJECT}."
cd "${ESVN_PROJECT}" || die "${ESVN}: can't chdir to ${ESVN_PROJECT}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_FETCH_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" "${repo_uri}" || die "${ESVN}: can't fetch to ${wc_path} from ${repo_uri}."
else
${ESVN_FETCH_CMD} ${options} "${repo_uri}" || die "${ESVN}: can't fetch to ${wc_path} from ${repo_uri}."
fi
elif [[ -n ${ESVN_OFFLINE} ]]; then
svn upgrade "${wc_path}" &>/dev/null
svn cleanup "${wc_path}" &>/dev/null
subversion_wc_info "${repo_uri}" || die "${ESVN}: unknown problem occurred while accessing working copy."
if [[ -n ${ESVN_REVISION} && ${ESVN_REVISION} != ${ESVN_WC_REVISION} ]]; then
die "${ESVN}: You requested off-line updating and revision ${ESVN_REVISION} but only revision ${ESVN_WC_REVISION} is available locally."
fi
einfo "Fetching disabled: Using existing repository copy at revision ${ESVN_WC_REVISION}."
else
svn upgrade "${wc_path}" &>/dev/null
svn cleanup "${wc_path}" &>/dev/null
subversion_wc_info "${repo_uri}" || die "${ESVN}: unknown problem occurred while accessing working copy."
local esvn_up_freq=
if [[ -n ${ESVN_UP_FREQ} ]]; then
if [[ -n ${ESVN_UP_FREQ//[[:digit:]]} ]]; then
die "${ESVN}: ESVN_UP_FREQ must be an integer value corresponding to the minimum number of hours between svn up."
elif [[ -z $(find "${wc_path}/.svn/entries" -mmin "+$((ESVN_UP_FREQ*60))") ]]; then
einfo "Fetching disabled since ${ESVN_UP_FREQ} hours has not passed since last update."
einfo "Using existing repository copy at revision ${ESVN_WC_REVISION}."
esvn_up_freq=no_update
fi
fi
if [[ -z ${esvn_up_freq} ]]; then
if [[ ${ESVN_WC_URL} != $(subversion__get_repository_uri "${repo_uri}") ]]; then
einfo "subversion switch start -->"
einfo " old repository: ${ESVN_WC_URL}@${ESVN_WC_REVISION}"
einfo " new repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_SWITCH_CMD} ${options} ${repo_uri}"
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_SWITCH_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" ${repo_uri} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
else
${ESVN_SWITCH_CMD} ${options} ${repo_uri} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
fi
else
# update working copy
einfo "subversion update start -->"
einfo " repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_UPDATE_CMD} ${options}"
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_UPDATE_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
else
${ESVN_UPDATE_CMD} ${options} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
fi
fi
fi
fi
einfo " working copy: ${wc_path}"
if ! has "export" ${ESVN_RESTRICT}; then
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
local S="${S}/${S_dest}"
mkdir -p "${S}"
# export to the ${WORKDIR}
#* "svn export" has a bug. see http://bugs.gentoo.org/119236
#* svn export . "${S}" || die "${ESVN}: can't export to ${S}."
rsync -rlpgo --exclude=".svn/" . "${S}" || die "${ESVN}: can't export to ${S}."
fi
echo
}
# @FUNCTION: subversion_bootstrap
# @DESCRIPTION:
# Apply patches in ${ESVN_PATCHES} and run ${ESVN_BOOTSTRAP} if specified.
subversion_bootstrap() {
if has "export" ${ESVN_RESTRICT}; then
return
fi
cd "${S}"
if [[ -n ${ESVN_PATCHES} ]]; then
einfo "apply patches -->"
local patch fpatch
for patch in ${ESVN_PATCHES}; do
if [[ -f ${patch} ]]; then
epatch "${patch}"
else
for fpatch in ${FILESDIR}/${patch}; do
if [[ -f ${fpatch} ]]; then
epatch "${fpatch}"
else
die "${ESVN}: ${patch} not found"
fi
done
fi
done
echo
fi
if [[ -n ${ESVN_BOOTSTRAP} ]]; then
einfo "begin bootstrap -->"
if [[ -f ${ESVN_BOOTSTRAP} && -x ${ESVN_BOOTSTRAP} ]]; then
einfo " bootstrap with a file: ${ESVN_BOOTSTRAP}"
eval "./${ESVN_BOOTSTRAP}" || die "${ESVN}: can't execute ESVN_BOOTSTRAP."
else
einfo " bootstrap with command: ${ESVN_BOOTSTRAP}"
eval "${ESVN_BOOTSTRAP}" || die "${ESVN}: can't eval ESVN_BOOTSTRAP."
fi
fi
}
# @FUNCTION: subversion_src_unpack
# @DESCRIPTION:
# Default src_unpack. Fetch and, in older EAPIs, bootstrap.
subversion_src_unpack() {
subversion_fetch || die "${ESVN}: unknown problem occurred in subversion_fetch."
if has "${EAPI:-0}" 0 1; then
subversion_bootstrap || die "${ESVN}: unknown problem occurred in subversion_bootstrap."
fi
}
# @FUNCTION: subversion_src_prepare
# @DESCRIPTION:
# Default src_prepare. Bootstrap.
subversion_src_prepare() {
subversion_bootstrap || die "${ESVN}: unknown problem occurred in subversion_bootstrap."
}
# @FUNCTION: subversion_wc_info
# @USAGE: [repo_uri]
# @RETURN: ESVN_WC_URL, ESVN_WC_ROOT, ESVN_WC_UUID, ESVN_WC_REVISION and ESVN_WC_PATH
# @DESCRIPTION:
# Get svn info for the specified repo_uri. The default repo_uri is ESVN_REPO_URI.
#
# The working copy information on the specified repository URI are set to
# ESVN_WC_* variables.
subversion_wc_info() {
local repo_uri="$(subversion__get_repository_uri "${1:-${ESVN_REPO_URI}}")"
local wc_path="$(subversion__get_wc_path "${repo_uri}")"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
debug-print "${FUNCNAME}: wc_path = ${wc_path}"
if [[ ! -d ${wc_path} ]]; then
return 1
fi
export ESVN_WC_URL="$(subversion__svn_info "${wc_path}" "URL")"
export ESVN_WC_ROOT="$(subversion__svn_info "${wc_path}" "Repository Root")"
export ESVN_WC_UUID="$(subversion__svn_info "${wc_path}" "Repository UUID")"
export ESVN_WC_REVISION="$(subversion__svn_info "${wc_path}" "Revision")"
export ESVN_WC_PATH="${wc_path}"
}
## -- Private Functions
## -- subversion__svn_info() ------------------------------------------------- #
#
# param $1 - a target.
# param $2 - a key name.
#
subversion__svn_info() {
local target="${1}"
local key="${2}"
env LC_ALL=C svn info "${target}" | grep -i "^${key}" | cut -d" " -f2-
}
## -- subversion__get_repository_uri() --------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_repository_uri() {
local repo_uri="${1}"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
if [[ -z ${repo_uri} ]]; then
die "${ESVN}: ESVN_REPO_URI (or specified URI) is empty."
fi
# delete trailing slash
if [[ -z ${repo_uri##*/} ]]; then
repo_uri="${repo_uri%/}"
fi
repo_uri="${repo_uri%@*}"
echo "${repo_uri}"
}
## -- subversion__get_wc_path() ---------------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_wc_path() {
local repo_uri="$(subversion__get_repository_uri "${1}")"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
echo "${ESVN_STORE_DIR}/${ESVN_PROJECT}/${repo_uri##*/}"
}
## -- subversion__get_peg_revision() ----------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_peg_revision() {
local repo_uri="${1}"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
# repo_uri has peg revision ?
if [[ ${repo_uri} != *@* ]]; then
debug-print "${FUNCNAME}: repo_uri does not have a peg revision."
fi
local peg_rev=
[[ ${repo_uri} = *@* ]] && peg_rev="${repo_uri##*@}"
debug-print "${FUNCNAME}: peg_rev = ${peg_rev}"
echo "${peg_rev}"
}
# @FUNCTION: subversion_pkg_preinst
# @USAGE: [repo_uri]
# @DESCRIPTION:
# Log the svn revision of source code. Doing this in pkg_preinst because we
# want the logs to stick around if packages are uninstalled without messing with
# config protection.
subversion_pkg_preinst() {
local pkgdate=$(date "+%Y%m%d %H:%M:%S")
subversion_wc_info "${1:-${ESVN_REPO_URI}}"
if [[ -n ${ESCM_LOGDIR} ]]; then
local dir="${ROOT}/${ESCM_LOGDIR}/${CATEGORY}"
if [[ ! -d ${dir} ]]; then
mkdir -p "${dir}" || \
eerror "Failed to create '${dir}' for logging svn revision to '${PORTDIR_SCM}'"
fi
local logmessage="svn: ${pkgdate} - ${PF}:${SLOT} was merged at revision ${ESVN_WC_REVISION}"
if [[ -d ${dir} ]]; then
echo "${logmessage}" >> "${dir}/${PN}.log"
else
eerror "Could not log the message '${logmessage}' to '${dir}/${PN}.log'"
fi
fi
}

View File

@ -0,0 +1,85 @@
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
EAPI="2"
inherit eutils cros-binary
# Synaptics touchpad generic eclass.
IUSE="is_touchpad ps_touchpad"
RDEPEND="x11-base/xorg-server"
DEPEND="${RDEPEND}"
# @ECLASS-VARIABLE: SYNAPTICS_TOUCHPAD_PN
# @DESCRIPTION: The packagename used as part of the binary tarball filename.
: ${SYNAPTICS_TOUCHPAD_PN:=${PN}}
export_uri() {
local XORG_VERSION_STRING
local XORG_VERSION
local X_VERSION
XORG_VERSION_STRING=$(grep "XORG_VERSION_CURRENT" "$ROOT/usr/include/xorg/xorg-server.h")
XORG_VERSION_STRING=${XORG_VERSION_STRING/#\#define*XORG_VERSION_CURRENT}
XORG_VERSION=$(($XORG_VERSION_STRING))
if [ $XORG_VERSION -ge 11100000 ]; then
X_VERSION=1.11
elif [ $XORG_VERSION -ge 11000000 ]; then
X_VERSION=1.10
elif [ $XORG_VERSION -ge 10903000 ]; then
X_VERSION=1.9
else
X_VERSION=1.7
fi
CROS_BINARY_URI="http://commondatastorage.googleapis.com/synaptics/${SYNAPTICS_TOUCHPAD_PN}-xorg-${X_VERSION}-${PV}-${PR}.tar.gz"
}
function synaptics-touchpad_src_unpack() {
export_uri
cros-binary_src_unpack
}
function synaptics-touchpad_src_install() {
# Currently you must have files/* in each ebuild that inherits
# from here. These files will go away soon after they are pushed
# into the synaptics tarball.
export_uri
cros-binary_src_install
if [ $(ls "${D}" | wc -l) -eq 1 ]; then
local extra_dir="$(ls "${D}")"
mv "${D}/${extra_dir}/"* "${D}/"
rmdir "${D}/${extra_dir}/"
fi
exeinto /opt/Synaptics/bin
doexe "${FILESDIR}/tpcontrol_syncontrol" || die
# If it exists, install synlogger to log calls to the Synaptics binaries.
# The original binaries themselves are appended with _bin, and symlinks are
# created with their original names that point at synlogger.
if [ -f "${FILESDIR}/synlogger" ]; then
doexe "${FILESDIR}/synlogger" || die
local f
for f in syn{control,detect,reflash} ; do
mv "${D}"/opt/Synaptics/bin/${f}{,_bin} || die
dosym synlogger /opt/Synaptics/bin/${f} || die
done
fi
# link the appropriate config files for the type of trackpad
if use is_touchpad && use ps_touchpad; then
die "Specify only one type of touchpad"
elif use is_touchpad; then
dosym HKLM_Kernel_IS /opt/Synaptics/HKLM_Kernel || die
dosym HKLM_User_IS /opt/Synaptics/HKLM_User || die
elif use ps_touchpad; then
dosym HKLM_Kernel_PS /opt/Synaptics/HKLM_Kernel || die
dosym HKLM_User_PS /opt/Synaptics/HKLM_User || die
else
die "Type of touchpad not specified"
fi
}

View File

@ -0,0 +1,84 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Tegra BCT files for firmware construction.
#
# @ECLASS-VARIABLE: TEGRA_BCT_SDRAM_CONFIG
# @DESCRIPTION:
# SDRAM memory timing configuration file to install
: ${TEGRA_BCT_SDRAM_CONFIG:=}
# @ECLASS-VARIABLE: TEGRA_BCT_FLASH_CONFIG
# @DESCRIPTION:
# Flash memory configuration file to install
: ${TEGRA_BCT_FLASH_CONFIG:=}
# @ECLASS-VARIABLE: TEGRA_BCT_CHIP_FAMILY
# @DESCRIPTION:
# Family of Tegra chip (determines BCT configuration)
: ${TEGRA_BCT_CHIP_FAMILY:=t25}
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
tegra-bct_src_configure() {
local sdram_file=${FILESDIR}/${TEGRA_BCT_SDRAM_CONFIG}
local flash_file=${FILESDIR}/${TEGRA_BCT_FLASH_CONFIG}
if [ -z "${TEGRA_BCT_SDRAM_CONFIG}" ]; then
die "No SDRAM configuration file selected."
fi
if [ -z "${TEGRA_BCT_FLASH_CONFIG}" ]; then
die "No flash configuration file selected."
fi
if [ -z "${TEGRA_BCT_CHIP_FAMILY}" ]; then
die "No chip family selected."
fi
einfo "Using sdram config file: ${sdram_file}"
einfo "Using flash config file: ${flash_file}"
einfo "Using chip family : ${TEGRA_BCT_CHIP_FAMILY}"
cat ${flash_file} > board.cfg ||
die "Failed to read flash config file."
cat ${sdram_file} >> board.cfg ||
die "Failed to read SDRAM config file."
}
tegra-bct_src_compile() {
local chip_family="-${TEGRA_BCT_CHIP_FAMILY}"
cbootimage -gbct $chip_family board.cfg board.bct ||
die "Failed to generate BCT."
}
tegra-bct_src_install() {
local sdram_file=${FILESDIR}/${TEGRA_BCT_SDRAM_CONFIG}
local flash_file=${FILESDIR}/${TEGRA_BCT_FLASH_CONFIG}
insinto /firmware/bct
doins "${sdram_file}"
doins "${flash_file}"
if [ "$(basename ${sdram_file})" != "sdram.cfg" ]; then
dosym "$(basename ${sdram_file})" /firmware/bct/sdram.cfg
fi
if [ "$(basename ${flash_file})" != "flash.cfg" ]; then
dosym "$(basename ${flash_file})" /firmware/bct/flash.cfg
fi
doins board.cfg
doins board.bct
}
EXPORT_FUNCTIONS src_configure src_compile src_install

View File

@ -0,0 +1,50 @@
#!/bin/bash
source tests-common.sh
inherit appid
valid_uuids=(
'{01234567-89AB-CDEF-0123-456789ABCDEF}'
'{11111111-1111-1111-1111-111111111111}'
'{DDDDDDDD-DDDD-DDDD-DDDD-DDDDDDDDDDDD}'
$(grep -hs doappid ../../../../{private-,}overlays/overlay-*/chromeos-base/chromeos-bsp-*/*.ebuild | \
gawk '{print gensub(/"/, "", "g", $2)}')
)
invalid_uuids=(
''
'01234567-89AB-CDEF-0123-4567-89ABCDEF0123'
' {01234567-89AB-CDEF-0123-4567-89ABCDEF0123} '
' {01234567-89AB-CDEF-0123-4567-89ABCDEF0123}'
'{01234567-89AB-CDEF-0123-4567-89ABCDEF0123} '
'{01234567-89AB-CDEF-0123-4567-89abcDEF0123}'
'{GGGGGGGG-GGGG-GGGG-GGGG-GGGG-GGGGGGGGGGGG}'
)
tbegin "no args"
! (doappid) >&/dev/null
tend $?
tbegin "too many args"
! (doappid "${valid_uuids[0]}" 1234) >&/dev/null
tend $?
tbegin "invalid appids"
for uuid in "${invalid_uuids[@]}" ; do
if (doappid "${uuid}") >&/dev/null ; then
tend 1 "not caught: ${uuid}"
fi
rm -rf "${D}"
done
tend $?
tbegin "valid appids"
for uuid in "${valid_uuids[@]}" ; do
if ! (doappid "${uuid}") ; then
tend 1 "not accepted: ${uuid}"
fi
rm -rf "${D}"
done
tend $?
texit

View File

@ -0,0 +1 @@
source ../../../portage-stable/eclass/tests/tests-common.sh

View File

@ -0,0 +1,782 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/toolchain-funcs.eclass,v 1.120 2012/12/29 05:08:54 vapier Exp $
# @ECLASS: toolchain-funcs.eclass
# @MAINTAINER:
# Toolchain Ninjas <toolchain@gentoo.org>
# @BLURB: functions to query common info about the toolchain
# @DESCRIPTION:
# The toolchain-funcs aims to provide a complete suite of functions
# for gleaning useful information about the toolchain and to simplify
# ugly things like cross-compiling and multilib. All of this is done
# in such a way that you can rely on the function always returning
# something sane.
if [[ ${___ECLASS_ONCE_TOOLCHAIN_FUNCS} != "recur -_+^+_- spank" ]] ; then
___ECLASS_ONCE_TOOLCHAIN_FUNCS="recur -_+^+_- spank"
inherit multilib binutils-funcs
# tc-getPROG <VAR [search vars]> <default> [tuple]
_tc-getPROG() {
local tuple=$1
local v var vars=$2
local prog=$3
var=${vars%% *}
for v in ${vars} ; do
if [[ -n ${!v} ]] ; then
export ${var}="${!v}"
echo "${!v}"
return 0
fi
done
local search=
[[ -n $4 ]] && search=$(type -p "$4-${prog}")
[[ -z ${search} && -n ${!tuple} ]] && search=$(type -p "${!tuple}-${prog}")
[[ -n ${search} ]] && prog=${search##*/}
export ${var}=${prog}
echo "${!var}"
}
tc-getBUILD_PROG() { _tc-getPROG CBUILD "BUILD_$1 $1_FOR_BUILD HOST$1" "${@:2}"; }
tc-getPROG() { _tc-getPROG CHOST "$@"; }
# @FUNCTION: tc-getAR
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver
tc-getAR() { tc-getPROG AR ar "$@"; }
# @FUNCTION: tc-getAS
# @USAGE: [toolchain prefix]
# @RETURN: name of the assembler
tc-getAS() { tc-getPROG AS as "$@"; }
# @FUNCTION: tc-getCC
# @USAGE: [toolchain prefix]
# @RETURN: name of the C compiler
tc-getCC() { tc-getPROG CC gcc "$@"; }
# @FUNCTION: tc-getCPP
# @USAGE: [toolchain prefix]
# @RETURN: name of the C preprocessor
tc-getCPP() { tc-getPROG CPP cpp "$@"; }
# @FUNCTION: tc-getCXX
# @USAGE: [toolchain prefix]
# @RETURN: name of the C++ compiler
tc-getCXX() { tc-getPROG CXX g++ "$@"; }
# @FUNCTION: tc-getLD
# @USAGE: [toolchain prefix]
# @RETURN: name of the linker
tc-getLD() { tc-getPROG LD ld "$@"; }
# @FUNCTION: tc-getSTRIP
# @USAGE: [toolchain prefix]
# @RETURN: name of the strip program
tc-getSTRIP() { tc-getPROG STRIP strip "$@"; }
# @FUNCTION: tc-getNM
# @USAGE: [toolchain prefix]
# @RETURN: name of the symbol/object thingy
tc-getNM() { tc-getPROG NM nm "$@"; }
# @FUNCTION: tc-getRANLIB
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver indexer
tc-getRANLIB() { tc-getPROG RANLIB ranlib "$@"; }
# @FUNCTION: tc-getOBJCOPY
# @USAGE: [toolchain prefix]
# @RETURN: name of the object copier
tc-getOBJCOPY() { tc-getPROG OBJCOPY objcopy "$@"; }
# @FUNCTION: tc-getF77
# @USAGE: [toolchain prefix]
# @RETURN: name of the Fortran 77 compiler
tc-getF77() { tc-getPROG F77 gfortran "$@"; }
# @FUNCTION: tc-getFC
# @USAGE: [toolchain prefix]
# @RETURN: name of the Fortran 90 compiler
tc-getFC() { tc-getPROG FC gfortran "$@"; }
# @FUNCTION: tc-getGCJ
# @USAGE: [toolchain prefix]
# @RETURN: name of the java compiler
tc-getGCJ() { tc-getPROG GCJ gcj "$@"; }
# @FUNCTION: tc-getPKG_CONFIG
# @USAGE: [toolchain prefix]
# @RETURN: name of the pkg-config tool
tc-getPKG_CONFIG() { tc-getPROG PKG_CONFIG pkg-config "$@"; }
# @FUNCTION: tc-getRC
# @USAGE: [toolchain prefix]
# @RETURN: name of the Windows resource compiler
tc-getRC() { tc-getPROG RC windres "$@"; }
# @FUNCTION: tc-getDLLWRAP
# @USAGE: [toolchain prefix]
# @RETURN: name of the Windows dllwrap utility
tc-getDLLWRAP() { tc-getPROG DLLWRAP dllwrap "$@"; }
# @FUNCTION: tc-getBUILD_AR
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver for building binaries to run on the build machine
tc-getBUILD_AR() { tc-getBUILD_PROG AR ar "$@"; }
# @FUNCTION: tc-getBUILD_AS
# @USAGE: [toolchain prefix]
# @RETURN: name of the assembler for building binaries to run on the build machine
tc-getBUILD_AS() { tc-getBUILD_PROG AS as "$@"; }
# @FUNCTION: tc-getBUILD_CC
# @USAGE: [toolchain prefix]
# @RETURN: name of the C compiler for building binaries to run on the build machine
tc-getBUILD_CC() { tc-getBUILD_PROG CC gcc "$@"; }
# @FUNCTION: tc-getBUILD_CPP
# @USAGE: [toolchain prefix]
# @RETURN: name of the C preprocessor for building binaries to run on the build machine
tc-getBUILD_CPP() { tc-getBUILD_PROG CPP cpp "$@"; }
# @FUNCTION: tc-getBUILD_CXX
# @USAGE: [toolchain prefix]
# @RETURN: name of the C++ compiler for building binaries to run on the build machine
tc-getBUILD_CXX() { tc-getBUILD_PROG CXX g++ "$@"; }
# @FUNCTION: tc-getBUILD_LD
# @USAGE: [toolchain prefix]
# @RETURN: name of the linker for building binaries to run on the build machine
tc-getBUILD_LD() { tc-getBUILD_PROG LD ld "$@"; }
# @FUNCTION: tc-getBUILD_STRIP
# @USAGE: [toolchain prefix]
# @RETURN: name of the strip program for building binaries to run on the build machine
tc-getBUILD_STRIP() { tc-getBUILD_PROG STRIP strip "$@"; }
# @FUNCTION: tc-getBUILD_NM
# @USAGE: [toolchain prefix]
# @RETURN: name of the symbol/object thingy for building binaries to run on the build machine
tc-getBUILD_NM() { tc-getBUILD_PROG NM nm "$@"; }
# @FUNCTION: tc-getBUILD_RANLIB
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver indexer for building binaries to run on the build machine
tc-getBUILD_RANLIB() { tc-getBUILD_PROG RANLIB ranlib "$@"; }
# @FUNCTION: tc-getBUILD_OBJCOPY
# @USAGE: [toolchain prefix]
# @RETURN: name of the object copier for building binaries to run on the build machine
tc-getBUILD_OBJCOPY() { tc-getBUILD_PROG OBJCOPY objcopy "$@"; }
# @FUNCTION: tc-getBUILD_PKG_CONFIG
# @USAGE: [toolchain prefix]
# @RETURN: name of the pkg-config tool for building binaries to run on the build machine
tc-getBUILD_PKG_CONFIG() { tc-getBUILD_PROG PKG_CONFIG pkg-config "$@"; }
# @FUNCTION: tc-export
# @USAGE: <list of toolchain variables>
# @DESCRIPTION:
# Quick way to export a bunch of compiler vars at once.
tc-export() {
local var
for var in "$@" ; do
[[ $(type -t tc-get${var}) != "function" ]] && die "tc-export: invalid export variable '${var}'"
eval tc-get${var} > /dev/null
done
}
# @FUNCTION: tc-is-cross-compiler
# @RETURN: Shell true if we are using a cross-compiler, shell false otherwise
tc-is-cross-compiler() {
return $([[ ${CBUILD:-${CHOST}} != ${CHOST} ]])
}
# @FUNCTION: tc-is-softfloat
# @DESCRIPTION:
# See if this toolchain is a softfloat based one.
# @CODE
# The possible return values:
# - only: the target is always softfloat (never had fpu)
# - yes: the target should support softfloat
# - softfp: (arm specific) the target should use hardfloat insns, but softfloat calling convention
# - no: the target doesn't support softfloat
# @CODE
# This allows us to react differently where packages accept
# softfloat flags in the case where support is optional, but
# rejects softfloat flags where the target always lacks an fpu.
tc-is-softfloat() {
local CTARGET=${CTARGET:-${CHOST}}
case ${CTARGET} in
bfin*|h8300*)
echo "only" ;;
*)
if [[ ${CTARGET//_/-} == *-softfloat-* ]] ; then
echo "yes"
elif [[ ${CTARGET//_/-} == *-softfp-* ]] ; then
echo "softfp"
else
echo "no"
fi
;;
esac
}
# @FUNCTION: tc-is-static-only
# @DESCRIPTION:
# Return shell true if the target does not support shared libs, shell false
# otherwise.
tc-is-static-only() {
local host=${CTARGET:-${CHOST}}
# *MiNT doesn't have shared libraries, only platform so far
return $([[ ${host} == *-mint* ]])
}
# @FUNCTION: tc-export_build_env
# @USAGE: [compiler variables]
# @DESCRIPTION:
# Export common build related compiler settings.
tc-export_build_env() {
tc-export "$@"
: ${BUILD_CFLAGS:=-O1 -pipe}
: ${BUILD_CXXFLAGS:=-O1 -pipe}
: ${BUILD_CPPFLAGS:=}
: ${BUILD_LDFLAGS:=}
export BUILD_{C,CXX,CPP,LD}FLAGS
}
# @FUNCTION: tc-env_build
# @USAGE: <command> [command args]
# @INTERNAL
# @DESCRIPTION:
# Setup the compile environment to the build tools and then execute the
# specified command. We use tc-getBUILD_XX here so that we work with
# all of the semi-[non-]standard env vars like $BUILD_CC which often
# the target build system does not check.
tc-env_build() {
tc-export_build_env
CFLAGS=${BUILD_CFLAGS} \
CXXFLAGS=${BUILD_CXXFLAGS} \
CPPFLAGS=${BUILD_CPPFLAGS} \
LDFLAGS=${BUILD_LDFLAGS} \
AR=$(tc-getBUILD_AR) \
AS=$(tc-getBUILD_AS) \
CC=$(tc-getBUILD_CC) \
CPP=$(tc-getBUILD_CPP) \
CXX=$(tc-getBUILD_CXX) \
LD=$(tc-getBUILD_LD) \
NM=$(tc-getBUILD_NM) \
PKG_CONFIG=$(tc-getBUILD_PKG_CONFIG) \
RANLIB=$(tc-getBUILD_RANLIB) \
"$@"
}
# @FUNCTION: econf_build
# @USAGE: [econf flags]
# @DESCRIPTION:
# Sometimes we need to locally build up some tools to run on CBUILD because
# the package has helper utils which are compiled+executed when compiling.
# This won't work when cross-compiling as the CHOST is set to a target which
# we cannot natively execute.
#
# For example, the python package will build up a local python binary using
# a portable build system (configure+make), but then use that binary to run
# local python scripts to build up other components of the overall python.
# We cannot rely on the python binary in $PATH as that often times will be
# a different version, or not even installed in the first place. Instead,
# we compile the code in a different directory to run on CBUILD, and then
# use that binary when compiling the main package to run on CHOST.
#
# For example, with newer EAPIs, you'd do something like:
# @CODE
# src_configure() {
# ECONF_SOURCE=${S}
# if tc-is-cross-compiler ; then
# mkdir "${WORKDIR}"/${CBUILD}
# pushd "${WORKDIR}"/${CBUILD} >/dev/null
# econf_build --disable-some-unused-stuff
# popd >/dev/null
# fi
# ... normal build paths ...
# }
# src_compile() {
# if tc-is-cross-compiler ; then
# pushd "${WORKDIR}"/${CBUILD} >/dev/null
# emake one-or-two-build-tools
# ln/mv build-tools to normal build paths in ${S}/
# popd >/dev/null
# fi
# ... normal build paths ...
# }
# @CODE
econf_build() {
tc-env_build econf --build=${CBUILD:-${CHOST}} "$@"
}
# @FUNCTION: tc-has-openmp
# @USAGE: [toolchain prefix]
# @DESCRIPTION:
# See if the toolchain supports OpenMP.
tc-has-openmp() {
local base="${T}/test-tc-openmp"
cat <<-EOF > "${base}.c"
#include <omp.h>
int main() {
int nthreads, tid, ret = 0;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
nthreads = omp_get_num_threads(); ret += tid + nthreads;
}
return ret;
}
EOF
$(tc-getCC "$@") -fopenmp "${base}.c" -o "${base}" >&/dev/null
local ret=$?
rm -f "${base}"*
return ${ret}
}
# @FUNCTION: tc-has-tls
# @USAGE: [-s|-c|-l] [toolchain prefix]
# @DESCRIPTION:
# See if the toolchain supports thread local storage (TLS). Use -s to test the
# compiler, -c to also test the assembler, and -l to also test the C library
# (the default).
tc-has-tls() {
local base="${T}/test-tc-tls"
cat <<-EOF > "${base}.c"
int foo(int *i) {
static __thread int j = 0;
return *i ? j : *i;
}
EOF
local flags
case $1 in
-s) flags="-S";;
-c) flags="-c";;
-l) ;;
-*) die "Usage: tc-has-tls [-c|-l] [toolchain prefix]";;
esac
: ${flags:=-fPIC -shared -Wl,-z,defs}
[[ $1 == -* ]] && shift
$(tc-getCC "$@") ${flags} "${base}.c" -o "${base}" >&/dev/null
local ret=$?
rm -f "${base}"*
return ${ret}
}
# Parse information from CBUILD/CHOST/CTARGET rather than
# use external variables from the profile.
tc-ninja_magic_to_arch() {
ninj() { [[ ${type} == "kern" ]] && echo $1 || echo $2 ; }
local type=$1
local host=$2
[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
local KV=${KV:-${KV_FULL}}
[[ ${type} == "kern" ]] && [[ -z ${KV} ]] && \
ewarn "QA: Kernel version could not be determined, please inherit kernel-2 or linux-info"
case ${host} in
aarch64*) ninj arm64 arm;;
alpha*) echo alpha;;
arm*) echo arm;;
avr*) ninj avr32 avr;;
bfin*) ninj blackfin bfin;;
cris*) echo cris;;
hppa*) ninj parisc hppa;;
i?86*)
# Starting with linux-2.6.24, the 'x86_64' and 'i386'
# trees have been unified into 'x86'.
# FreeBSD still uses i386
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -lt $(KV_to_int 2.6.24) || ${host} == *freebsd* ]] ; then
echo i386
else
echo x86
fi
;;
ia64*) echo ia64;;
m68*) echo m68k;;
mips*) echo mips;;
nios2*) echo nios2;;
nios*) echo nios;;
powerpc*)
# Starting with linux-2.6.15, the 'ppc' and 'ppc64' trees
# have been unified into simply 'powerpc', but until 2.6.16,
# ppc32 is still using ARCH="ppc" as default
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.16) ]] ; then
echo powerpc
elif [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -eq $(KV_to_int 2.6.15) ]] ; then
if [[ ${host} == powerpc64* ]] || [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
echo powerpc
else
echo ppc
fi
elif [[ ${host} == powerpc64* ]] ; then
echo ppc64
elif [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
ninj ppc64 ppc
else
echo ppc
fi
;;
s390*) echo s390;;
sh64*) ninj sh64 sh;;
sh*) echo sh;;
sparc64*) ninj sparc64 sparc;;
sparc*) [[ ${PROFILE_ARCH} == "sparc64" ]] \
&& ninj sparc64 sparc \
|| echo sparc
;;
vax*) echo vax;;
x86_64*freebsd*) echo amd64;;
x86_64*)
# Starting with linux-2.6.24, the 'x86_64' and 'i386'
# trees have been unified into 'x86'.
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.24) ]] ; then
echo x86
else
ninj x86_64 amd64
fi
;;
# since our usage of tc-arch is largely concerned with
# normalizing inputs for testing ${CTARGET}, let's filter
# other cross targets (mingw and such) into the unknown.
*) echo unknown;;
esac
}
# @FUNCTION: tc-arch-kernel
# @USAGE: [toolchain prefix]
# @RETURN: name of the kernel arch according to the compiler target
tc-arch-kernel() {
tc-ninja_magic_to_arch kern "$@"
}
# @FUNCTION: tc-arch
# @USAGE: [toolchain prefix]
# @RETURN: name of the portage arch according to the compiler target
tc-arch() {
tc-ninja_magic_to_arch portage "$@"
}
tc-endian() {
local host=$1
[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
host=${host%%-*}
case ${host} in
aarch64*be) echo big;;
aarch64) echo little;;
alpha*) echo big;;
arm*b*) echo big;;
arm*) echo little;;
cris*) echo little;;
hppa*) echo big;;
i?86*) echo little;;
ia64*) echo little;;
m68*) echo big;;
mips*l*) echo little;;
mips*) echo big;;
powerpc*) echo big;;
s390*) echo big;;
sh*b*) echo big;;
sh*) echo little;;
sparc*) echo big;;
x86_64*) echo little;;
*) echo wtf;;
esac
}
# Internal func. The first argument is the version info to expand.
# Query the preprocessor to improve compatibility across different
# compilers rather than maintaining a --version flag matrix. #335943
_gcc_fullversion() {
local ver="$1"; shift
set -- `$(tc-getCPP "$@") -E -P - <<<"__GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__"`
eval echo "$ver"
}
# @FUNCTION: gcc-fullversion
# @RETURN: compiler version (major.minor.micro: [3.4.6])
gcc-fullversion() {
_gcc_fullversion '$1.$2.$3' "$@"
}
# @FUNCTION: gcc-version
# @RETURN: compiler version (major.minor: [3.4].6)
gcc-version() {
_gcc_fullversion '$1.$2' "$@"
}
# @FUNCTION: gcc-major-version
# @RETURN: major compiler version (major: [3].4.6)
gcc-major-version() {
_gcc_fullversion '$1' "$@"
}
# @FUNCTION: gcc-minor-version
# @RETURN: minor compiler version (minor: 3.[4].6)
gcc-minor-version() {
_gcc_fullversion '$2' "$@"
}
# @FUNCTION: gcc-micro-version
# @RETURN: micro compiler version (micro: 3.4.[6])
gcc-micro-version() {
_gcc_fullversion '$3' "$@"
}
# Returns the installation directory - internal toolchain
# function for use by _gcc-specs-exists (for flag-o-matic).
_gcc-install-dir() {
echo "$(LC_ALL=C $(tc-getCC) -print-search-dirs 2> /dev/null |\
awk '$1=="install:" {print $2}')"
}
# Returns true if the indicated specs file exists - internal toolchain
# function for use by flag-o-matic.
_gcc-specs-exists() {
[[ -f $(_gcc-install-dir)/$1 ]]
}
# Returns requested gcc specs directive unprocessed - for used by
# gcc-specs-directive()
# Note; later specs normally overwrite earlier ones; however if a later
# spec starts with '+' then it appends.
# gcc -dumpspecs is parsed first, followed by files listed by "gcc -v"
# as "Reading <file>", in order. Strictly speaking, if there's a
# $(gcc_install_dir)/specs, the built-in specs aren't read, however by
# the same token anything from 'gcc -dumpspecs' is overridden by
# the contents of $(gcc_install_dir)/specs so the result is the
# same either way.
_gcc-specs-directive_raw() {
local cc=$(tc-getCC)
local specfiles=$(LC_ALL=C ${cc} -v 2>&1 | awk '$1=="Reading" {print $NF}')
${cc} -dumpspecs 2> /dev/null | cat - ${specfiles} | awk -v directive=$1 \
'BEGIN { pspec=""; spec=""; outside=1 }
$1=="*"directive":" { pspec=spec; spec=""; outside=0; next }
outside || NF==0 || ( substr($1,1,1)=="*" && substr($1,length($1),1)==":" ) { outside=1; next }
spec=="" && substr($0,1,1)=="+" { spec=pspec " " substr($0,2); next }
{ spec=spec $0 }
END { print spec }'
return 0
}
# Return the requested gcc specs directive, with all included
# specs expanded.
# Note, it does not check for inclusion loops, which cause it
# to never finish - but such loops are invalid for gcc and we're
# assuming gcc is operational.
gcc-specs-directive() {
local directive subdname subdirective
directive="$(_gcc-specs-directive_raw $1)"
while [[ ${directive} == *%\(*\)* ]]; do
subdname=${directive/*%\(}
subdname=${subdname/\)*}
subdirective="$(_gcc-specs-directive_raw ${subdname})"
directive="${directive//\%(${subdname})/${subdirective}}"
done
echo "${directive}"
return 0
}
# Returns true if gcc sets relro
gcc-specs-relro() {
local directive
directive=$(gcc-specs-directive link_command)
return $([[ "${directive/\{!norelro:}" != "${directive}" ]])
}
# Returns true if gcc sets now
gcc-specs-now() {
local directive
directive=$(gcc-specs-directive link_command)
return $([[ "${directive/\{!nonow:}" != "${directive}" ]])
}
# Returns true if gcc builds PIEs
gcc-specs-pie() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!nopie:}" != "${directive}" ]])
}
# Returns true if gcc builds with the stack protector
gcc-specs-ssp() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fno-stack-protector:}" != "${directive}" ]])
}
# Returns true if gcc upgrades fstack-protector to fstack-protector-all
gcc-specs-ssp-to-all() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fno-stack-protector-all:}" != "${directive}" ]])
}
# Returns true if gcc builds with fno-strict-overflow
gcc-specs-nostrict() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fstrict-overflow:}" != "${directive}" ]])
}
# @FUNCTION: gen_usr_ldscript
# @USAGE: [-a] <list of libs to create linker scripts for>
# @DESCRIPTION:
# This function generate linker scripts in /usr/lib for dynamic
# libs in /lib. This is to fix linking problems when you have
# the .so in /lib, and the .a in /usr/lib. What happens is that
# in some cases when linking dynamic, the .a in /usr/lib is used
# instead of the .so in /lib due to gcc/libtool tweaking ld's
# library search path. This causes many builds to fail.
# See bug #4411 for more info.
#
# Note that you should in general use the unversioned name of
# the library (libfoo.so), as ldconfig should usually update it
# correctly to point to the latest version of the library present.
gen_usr_ldscript() {
local lib libdir=$(get_libdir) output_format="" auto=false suffix=$(get_libname)
[[ -z ${ED+set} ]] && local ED=${D%/}${EPREFIX}/
tc-is-static-only && return
# Eventually we'd like to get rid of this func completely #417451
case ${CTARGET:-${CHOST}} in
*-darwin*) ;;
*linux*|*-freebsd*|*-openbsd*|*-netbsd*)
use prefix && return 0 ;;
*) return 0 ;;
esac
# Just make sure it exists
dodir /usr/${libdir}
if [[ $1 == "-a" ]] ; then
auto=true
shift
dodir /${libdir}
fi
# OUTPUT_FORMAT gives hints to the linker as to what binary format
# is referenced ... makes multilib saner
output_format=$($(tc-getCC) ${CFLAGS} ${LDFLAGS} -Wl,--verbose 2>&1 | sed -n 's/^OUTPUT_FORMAT("\([^"]*\)",.*/\1/p')
[[ -n ${output_format} ]] && output_format="OUTPUT_FORMAT ( ${output_format} )"
for lib in "$@" ; do
local tlib
if ${auto} ; then
lib="lib${lib}${suffix}"
else
# Ensure /lib/${lib} exists to avoid dangling scripts/symlinks.
# This especially is for AIX where $(get_libname) can return ".a",
# so /lib/${lib} might be moved to /usr/lib/${lib} (by accident).
[[ -r ${ED}/${libdir}/${lib} ]] || continue
#TODO: better die here?
fi
case ${CTARGET:-${CHOST}} in
*-darwin*)
if ${auto} ; then
tlib=$(scanmacho -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
else
tlib=$(scanmacho -qF'%S#F' "${ED}"/${libdir}/${lib})
fi
[[ -z ${tlib} ]] && die "unable to read install_name from ${lib}"
tlib=${tlib##*/}
if ${auto} ; then
mv "${ED}"/usr/${libdir}/${lib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
# some install_names are funky: they encode a version
if [[ ${tlib} != ${lib%${suffix}}.*${suffix#.} ]] ; then
mv "${ED}"/usr/${libdir}/${tlib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
fi
rm -f "${ED}"/${libdir}/${lib}
fi
# Mach-O files have an id, which is like a soname, it tells how
# another object linking against this lib should reference it.
# Since we moved the lib from usr/lib into lib this reference is
# wrong. Hence, we update it here. We don't configure with
# libdir=/lib because that messes up libtool files.
# Make sure we don't lose the specific version, so just modify the
# existing install_name
if [[ ! -w "${ED}/${libdir}/${tlib}" ]] ; then
chmod u+w "${ED}${libdir}/${tlib}" # needed to write to it
local nowrite=yes
fi
install_name_tool \
-id "${EPREFIX}"/${libdir}/${tlib} \
"${ED}"/${libdir}/${tlib} || die "install_name_tool failed"
[[ -n ${nowrite} ]] && chmod u-w "${ED}${libdir}/${tlib}"
# Now as we don't use GNU binutils and our linker doesn't
# understand linker scripts, just create a symlink.
pushd "${ED}/usr/${libdir}" > /dev/null
ln -snf "../../${libdir}/${tlib}" "${lib}"
popd > /dev/null
;;
*)
if ${auto} ; then
tlib=$(scanelf -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
[[ -z ${tlib} ]] && die "unable to read SONAME from ${lib}"
mv "${ED}"/usr/${libdir}/${lib}* "${ED}"/${libdir}/ || die
# some SONAMEs are funky: they encode a version before the .so
if [[ ${tlib} != ${lib}* ]] ; then
mv "${ED}"/usr/${libdir}/${tlib}* "${ED}"/${libdir}/ || die
fi
rm -f "${ED}"/${libdir}/${lib}
else
tlib=${lib}
fi
cat > "${ED}/usr/${libdir}/${lib}" <<-END_LDSCRIPT
/* GNU ld script
Since Gentoo has critical dynamic libraries in /lib, and the static versions
in /usr/lib, we need to have a "fake" dynamic lib in /usr/lib, otherwise we
run into linking problems. This "fake" dynamic lib is a linker script that
redirects the linker to the real lib. And yes, this works in the cross-
compiling scenario as the sysroot-ed linker will prepend the real path.
See bug http://bugs.gentoo.org/4411 for more info.
*/
${output_format}
GROUP ( ${EPREFIX}/${libdir}/${tlib} )
END_LDSCRIPT
;;
esac
fperms a+x "/usr/${libdir}/${lib}" || die "could not change perms on ${lib}"
done
}
#
# ChromiumOS extensions below here.
#
# Returns true if gcc builds PIEs
# For ARM, readelf -h | grep Type always has REL instead of EXEC.
# That is why we have to read the flags one by one and check them instead
# of test-compiling a small program.
gcc-pie() {
for flag in $(echo "void f(){char a[100];}" | \
${CTARGET}-gcc -v -xc -c -o /dev/null - 2>&1 | \
grep cc1 | \
tr " " "\n" | \
tac)
do
if [[ $flag == "-fPIE" || $flag == "-fPIC" ]]
then
return 0
elif [[ $flag == "-fno-PIE" || $flag == "-fno-PIC" ]]
then
return 1
fi
done
return 1
}
# Returns true if gcc builds with the stack protector
gcc-ssp() {
local obj=$(mktemp)
echo "void f(){char a[100];}" | ${CTARGET}-gcc -xc -c -o ${obj} -
return $(${CTARGET}-readelf -sW ${obj} | grep -q stack_chk_fail)
}
# Sets up environment variables required to build with Clang
# This should be replaced with a sysroot wrapper ala GCC if/when
# we get serious about building with Clang.
clang-setup-env() {
use clang || return 0
case ${ARCH} in
amd64|x86)
export CC="clang" CXX="clang++"
append-flags --sysroot="${SYSROOT}"
append-flags -B$(get_binutils_path_gold)
# Some boards use optimizations (e.g. -mfpmath=sse) that
# clang does not support.
append-flags -Qunused-arguments
;;
*) die "Clang is not yet supported for ${ARCH}"
esac
if use asan; then
append-flags -fsanitize=address -fno-omit-frame-pointer
fi
}
fi

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,132 @@
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header: $
#
# useradd.eclass
#
# Adds a mechanism for adding users/groups into alternate roots.
#
# This will likely go away.
#
# Authors:
# Google, inc. <chromium-os-dev@chromium.org>
#
HOMEPAGE="http://www.chromium.org/"
# Before we manipulate users at all, we want to make sure that
# passwd/group/shadow is initialized in the first place. That's
# what baselayout does.
if [ "${PN}" != "baselayout" ]; then
DEPEND="sys-apps/baselayout"
RDEPEND="sys-apps/baselayout"
fi
# Tests if the user already exists in the passwd file.
#
# $1 - Username (e.g. "messagebus")
user_exists() {
grep -e "^$1\:" "${ROOT}/etc/passwd" > /dev/null 2>&1
}
# Tests if the group already exists in the group file.
#
# $1 - Groupname (e.g. "messagebus")
group_exists() {
grep -e "^$1\:" "${ROOT}/etc/group" > /dev/null 2>&1
}
# Add entry to /etc/passwd
#
# $1 - Username (e.g. "messagebus")
# $2 - "*" to indicate not shadowed, "x" to indicate shadowed
# $3 - UID (e.g. 200)
# $4 - GID (e.g. 200)
# $5 - full name (e.g. "")
# $6 - home dir (e.g. "/home/foo" or "/var/run/dbus")
# $7 - shell (e.g. "/bin/sh" or "/bin/false")
add_user() {
if user_exists "$1"; then
elog "Skipping add_user of existing user: '$1'"
return
fi
echo "${1}:${2}:${3}:${4}:${5}:${6}:${7}" >> "${ROOT}/etc/passwd"
}
# Remove entry from /etc/passwd
#
# $1 - Username
remove_user() {
[ -e "${ROOT}/etc/passwd" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/passwd"
}
# Add entry to /etc/shadow
#
# $1 - Username
# $2 - Crypted password
add_shadow() {
echo "${1}:${2}:14500:0:99999::::" >> "${ROOT}/etc/shadow"
}
# Remove entry from /etc/shadow
#
# $1 - Username
remove_shadow() {
[ -e "${ROOT}/etc/shadow" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/shadow"
}
# Add entry to /etc/group
# $1 - Groupname (e.g. "messagebus")
# $2 - GID (e.g. 200)
add_group() {
if group_exists "$1"; then
elog "Skipping add_group of existing group: '$1'"
return
fi
echo "${1}:x:${2}:" >> "${ROOT}/etc/group"
}
# Copies user entry from host passwd file if it already exists or else
# creates a new user using add_user.
#
# See add_user for argument list.
copy_or_add_user() {
local username="$1"
if user_exists "$1"; then
elog "Skipping copy_or_add_user of existing user '$1'"
return
fi
local entry=$(grep -e "^$1\:" /etc/passwd)
if [ -n "$entry" ]; then
elog "Copying existing passwd entry from root: '$entry'"
echo "$entry" >> "${ROOT}/etc/passwd"
else
add_user "$@"
fi
}
# Copies group entry from host group file if it already exists or else
# creates a new group using add_group.
#
# See add_group for argument list.
copy_or_add_group() {
local groupname="$1"
if group_exists "$1"; then
elog "Skipping copy_or_add_group of existing group '$1'"
return
fi
local entry=$(grep -e "^$1\:" /etc/group)
if [ -n "$entry" ]; then
elog "Copying existing group entry from root: '$entry'"
echo "$entry" >> "${ROOT}/etc/group"
else
add_group "$@"
fi
}

View File

@ -47,3 +47,37 @@
=dev-libs/nspr-4.9.2 -* ~arm ~x86 ~amd64
=dev-libs/nss-3.14 -* ~arm ~x86 ~amd64
=app-crypt/nss-3.14 -* ~arm ~x86 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-python/ws4py-0.2.4 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-libs/dbus-c++-0.9.0-r1 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-embedded/smdk-dltool-0.20-r3 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=sys-fs/libfat-0.3a ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-cpp/gflags-2.0 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=sys-apps/dtc-9999 **
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-python/pyusb-1.0.0_alpha3 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=sys-libs/libnih-1.0.3 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-lang/closure-compiler-bin-20120305 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=sys-devel/clang-3.2 ~amd64
#required by coreos-base/cros-devutils-0.0.1-r516, required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-util/shflags-1.0.3 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=net-misc/gsutil-3.21 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-util/perf-3.4 ~amd64
#required by net-misc/gsutil-3.21, required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=dev-python/boto-2.7.0 ~amd64
#required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=sys-devel/smatch-1.57-r1 ~amd64
#required by sys-devel/clang-3.2, required by coreos-base/hard-host-depends-0.0.1-r145, required by coreos-base/hard-host-depends (argument)
=sys-devel/llvm-3.2 ~amd64

View File

@ -21,13 +21,11 @@ media-libs/libmtp -crypt
media-libs/mesa -llvm -gallium -classic
media-sound/alsa-utils -libsamplerate minimal
net-misc/curl ares static-libs
net-wireless/bluez -consolekit
sci-geosciences/gpsd -cxx
# verity and other packages link statically with libuuid.
sys-apps/util-linux -perl static-libs
sys-boot/grub grub_platforms_pc grub_platforms_efi-64 grub_platforms_coreboot static
sys-devel/gettext -git
x11-libs/cairo X
# Build emulation statically so that we can execute it within a chroot and
# still find the shared libraries.
net-misc/dhcpcd -crash
@ -36,7 +34,7 @@ net-misc/dhcpcd -crash
# this comment was here:
# build kvm with X and sdl so we have an option of running it with local
# display without VNC
app-emulation/qemu-kvm static qemu_user_targets_arm qemu_user_targets_i386 qemu_user_targets_x86_64
app-emulation/qemu aio caps curl jpeg ncurses png python seccomp threads uuid vhost-net vnc static qemu_softmmu_targets_arm qemu_softmmu_targets_i386 qemu_softmmu_targets_x86_64
dev-libs/libaio static-libs
cross-armv7a-cros-linux-gnueabi/gcc hardfp
cross-armv6j-cros-linux-gnueabi/gcc hardfp -thumb

View File

@ -1 +1 @@
chromiumos
coreos

View File

@ -21,7 +21,6 @@ dev-python/pyudev pygobject
dev-util/dialog -unicode minimal
dev-util/perf -doc -demangle -tui -ncurses -perl -python
dev-util/perf-next -doc -demangle -tui -ncurses -perl -python
coreos-base/vboot_reference minimal
media-gfx/imagemagick png
media-libs/freeimage png
media-libs/libdvdread -css