maint(eclass/*): Delete a whole pile of old unused eclasses.

This commit is contained in:
Michael Marineau 2014-02-14 13:45:38 -08:00
parent 05b70651dd
commit 9015fb0881
12 changed files with 0 additions and 6668 deletions

View File

@ -1,43 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header: $
# @ECLASS: appid.eclass
# @MAINTAINER:
# ChromiumOS Build Team
# @BUGREPORTS:
# Please report bugs via http://crosbug.com/new (with label Area-Build)
# @VCSURL: http://git.chromium.org/gitweb/?p=chromiumos/overlays/chromiumos-overlay.git;a=blob;f=eclass/@ECLASS@
# @BLURB: Eclass for setting up the omaha appid field in /etc/lsb-release
# @FUNCTION: doappid
# @USAGE: <appid>
# @DESCRIPTION:
# Initializes /etc/lsb-release with the appid. Note that appid is really
# just a UUID in the canonical {8-4-4-4-12} format (all uppercase). e.g.
# {01234567-89AB-CDEF-0123-456789ABCDEF}
doappid() {
[[ $# -eq 1 && -n $1 ]] || die "Usage: ${FUNCNAME} <appid>"
local appid=$1
# Validate the UUID is formatted correctly. Except for mario --
# it was created before we had strict rules, and so it violates :(.
if [[ ${appid} != '{87efface-864d-49a5-9bb3-4b050a7c227a}' ]] ; then
local uuid_regex='[{][0-9A-F]{8}-([0-9A-F]{4}-){3}[0-9A-F]{12}[}]'
local filtered_appid=$(echo "${appid}" | LC_ALL=C sed -r "s:${uuid_regex}::")
if [[ -n ${filtered_appid} ]] ; then
eerror "Invalid appid: ${appid} -> ${filtered_appid}"
eerror " - must start with '{' and end with '}'"
eerror " - must be all upper case"
eerror " - be a valid UUID (8-4-4-4-12 hex digits)"
die "invalid appid: ${appid}"
fi
fi
dodir /etc
local lsb="${D}/etc/lsb-release"
[[ -e ${lsb} ]] && die "${lsb} already exists!"
echo "CHROMEOS_RELEASE_APPID=${appid}" > "${lsb}" || die "creating ${lsb} failed!"
}

View File

@ -1,92 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# @ECLASS: cros-coreboot.eclass
# @MAINTAINER:
# The Chromium OS Authors
# @BLURB: Unifies logic for building coreboot images for Chromium OS.
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
inherit toolchain-funcs
DESCRIPTION="coreboot x86 firmware"
HOMEPAGE="http://www.coreboot.org"
LICENSE="GPL-2"
SLOT="0"
IUSE="em100-mode"
RDEPEND="!sys-boot/chromeos-coreboot"
DEPEND="sys-power/iasl
sys-apps/coreboot-utils
sys-boot/chromeos-mrc
"
# @ECLASS-VARIABLE: COREBOOT_BOARD
# @DESCRIPTION:
# Coreboot Configuration name.
: ${COREBOOT_BOARD:=}
# @ECLASS-VARIABLE: COREBOOT_BUILD_ROOT
# @DESCRIPTION:
# Build directory root
: ${COREBOOT_BUILD_ROOT:=}
[[ -z ${COREBOOT_BOARD} ]] && die "COREBOOT_BOARD must be set"
[[ -z ${COREBOOT_BUILD_ROOT} ]] && die "COREBOOT_BUILD_ROOT must be set"
cros-coreboot_pre_src_prepare() {
cp configs/config.${COREBOOT_BOARD} .config
}
cros-coreboot_src_compile() {
tc-export CC
local board="${COREBOOT_BOARD}"
local build_root="${COREBOOT_BUILD_ROOT}"
# Set KERNELREVISION (really coreboot revision) to the ebuild revision
# number followed by a dot and the first seven characters of the git
# hash. The name is confusing but consistent with the coreboot
# Makefile.
local sha1v="${VCSID/*-/}"
export KERNELREVISION=".${PV}.${sha1v:0:7}"
# Firmware related binaries are compiled with a 32-bit toolchain
# on 64-bit platforms
if use amd64 ; then
export CROSS_COMPILE="i686-pc-linux-gnu-"
export CC="${CROSS_COMPILE}-gcc"
else
export CROSS_COMPILE=${CHOST}-
fi
elog "Toolchain:\n$(sh util/xcompile/xcompile)\n"
emake obj="${build_root}" oldconfig
emake obj="${build_root}"
# Modify firmware descriptor if building for the EM100 emulator.
if use em100-mode; then
ifdtool --em100 "${build_root}/coreboot.rom" || die
mv "${build_root}/coreboot.rom"{.new,} || die
fi
# Build cbmem for the target
cd util/cbmem
emake clean
CROSS_COMPILE="${CHOST}-" emake
}
cros-coreboot_src_install() {
dobin util/cbmem/cbmem
insinto /firmware
newins "${COREBOOT_BUILD_ROOT}/coreboot.rom" coreboot.rom
OPROM=$( awk 'BEGIN{FS="\""} /CONFIG_VGA_BIOS_FILE=/ { print $2 }' \
configs/config.${COREBOOT_BOARD} )
CBFSOPROM=pci$( awk 'BEGIN{FS="\""} /CONFIG_VGA_BIOS_ID=/ { print $2 }' \
configs/config.${COREBOOT_BOARD} ).rom
newins ${OPROM} ${CBFSOPROM}
}
EXPORT_FUNCTIONS src_compile src_install pre_src_prepare

View File

@ -1,31 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Eclass for use by ebuilds that need to know the debug serial port.
#
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
SERIAL_USE_PREFIX="serial_use_"
ALL_SERIALPORTS=(
ttyAMA{0..5}
ttyO{0..5}
ttyS{0..5}
ttySAC{0..5}
)
IUSE=${ALL_SERIALPORTS[@]/#/${SERIAL_USE_PREFIX}}
# Echo the current serial port name
get_serial_name() {
local item
for item in "${ALL_SERIALPORTS[@]}"; do
if use ${SERIAL_USE_PREFIX}${item}; then
echo ${item}
return
fi
done
die "Unable to determine current serial port."
}

View File

@ -1,305 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Gobi firmware for Chromium OS
#
# @ECLASS-VARIABLE: GOBI_FIRMWARE_OEM
# @DESCRIPTION:
# OEM name for firmware to install
: ${GOBI_FIRMWARE_OEM:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_VID
# @DESCRIPTION:
# OEM Vendor ID
: ${GOBI_FIRMWARE_VID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_PID
# @DESCRIPTION:
# OEM Product ID
: ${GOBI_FIRMWARE_PID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_CARRIERS
# @DESCRIPTION:
# Install firmware for this list of carrier numbers
: ${GOBI_FIRMWARE_CARRIERS:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_ZIP_FILE
# @DESCRIPTION:
# Filename of zip file containing firmware
: ${GOBI_FIRMWARE_ZIP_FILE:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_FLAVOR
# @DESCRIPTION:
# The flavor (gps, xtra) to install
: ${GOBI_FIRMWARE_FLAVOR:="gps"}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_QDL
# @DESCRIPTION:
# Install the qdl program from the firmware zip file
: ${GOBI_FIRMWARE_QDL:="no"}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_DEFAULT_CARRIER
# @DESCRIPTION:
# Default carrier firmware to load if not set on modem
: ${GOBI_FIRMWARE_DEFAULT_CARRIER:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_APPS_DIR
# @DESCRIPTION:
# directory name for the .apps files
: ${GOBI_FIRMWARE_APPS_DIR:=""}
GOBI_FIRMWARE_CARRIER_VOD=0
GOBI_FIRMWARE_CARRIER_VZW=1
GOBI_FIRMWARE_CARRIER_ATT=2
GOBI_FIRMWARE_CARRIER_SPRINT=3
GOBI_FIRMWARE_CARRIER_TMO=4
GOBI_FIRMWARE_CARRIER_GEN=6
GOBI_FIRMWARE_CARRIER_TELLFON=7
GOBI_FIRMWARE_CARRIER_TELITAL=8
GOBI_FIRMWARE_CARRIER_ORANGE=9
GOBI_FIRMWARE_CARRIER_DOCO=12
GOBI_FIRMWARE_CARRIER_DELLX=15
GOBI_FIRMWARE_CARRIER_OMH=16
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
gobi-firmware_install_udev_qcserial_rules() {
local oem=${GOBI_FIRMWARE_OEM}
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local file=/etc/udev/rules.d/90-ttyusb-qcserial-${oem}.rules
cat > ${D}${file} <<EOF
# 90-ttyusb-qcserial-${oem}.rules
# Sets ownership of Gobi ttyusb devices belonging to qcserial.
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ACTION!="add", GOTO="ttyusb_qcserial_${oem}_end"
SUBSYSTEM!="tty", GOTO="ttyusb_qcserial_${oem}_end"
KERNEL!="ttyUSB[0-9]*", GOTO="ttyusb_qcserial_${oem}_end"
ATTRS{idVendor}=="${vid}", ATTRS{idProduct}=="${pid}", \
OWNER="qdlservice", GROUP="qdlservice"
LABEL="ttyusb_qcserial_${oem}_end"
EOF
}
gobi-firmware_install_udev_qdlservice_rules() {
local oem=${GOBI_FIRMWARE_OEM}
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local file=/etc/udev/rules.d/99-qdlservice-${oem}.rules
cat > ${D}${file} <<EOF
# 99-qdlservice-${oem}.rules
# Emits a signal in response to a Gobi serial device appearing. Upstart will run
# QDLService when it sees this signal.
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ACTION!="add", GOTO="qdlservice_${oem}_end"
SUBSYSTEM!="tty", GOTO="qdlservice_${oem}_end"
KERNEL!="ttyUSB[0-9]*", GOTO="qdlservice_${oem}_end"
ATTRS{idVendor}=="${vid}", ATTRS{idProduct}=="${pid}", \
RUN+="/sbin/initctl emit gobi_serial_${oem} GOBIDEV=/dev/%k"
LABEL="qdlservice_${oem}_end"
EOF
}
gobi-firmware_install_udev_rules() {
dodir /etc/udev/rules.d
gobi-firmware_install_udev_qcserial_rules
gobi-firmware_install_udev_qdlservice_rules
}
gobi-firmware_install_upstart_scripts() {
dodir /etc/init
file=/etc/init/qdlservice-${GOBI_FIRMWARE_OEM}.conf
cat > ${D}${file} <<EOF
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Starts QDLService if a Gobi ttyusb device appears.
start on gobi_serial_${GOBI_FIRMWARE_OEM}
script
set +e
GOBIQDL="/opt/Qualcomm/QDLService2k/QDLService2k${GOBI_FIRMWARE_OEM}"
ret=1
attempt=0
readonly MAX_ATTEMPTS=10
while [ \$ret -ne 0 -a \$attempt -lt \$MAX_ATTEMPTS ]; do
# Exponential backoff - wait (2^attempt) - 1 seconds
sleep \$(((1 << \$attempt) - 1))
starttime=\$(date +%s%N)
/sbin/minijail0 -u qdlservice -g qdlservice -- "\$GOBIQDL" "\$GOBIDEV"
ret=\$?
endtime=\$(date +%s%N)
logger -t qdlservice "attempt \$attempt: \$ret"
attempt=\$((\$attempt + 1))
if [ \$ret -ne 0 ]; then
logger -t qdlservice "resetting..."
/opt/Qualcomm/bin/powercycle-all-gobis
fi
done
download_time=\$(((\$endtime - \$starttime) / 1000000))
METRICROOT=Network.3G.Gobi.FirmwareDownload
metrics_client \$METRICROOT.Time \$download_time 0 10000 20
metrics_client -e \$METRICROOT.Attempts \$attempt \$MAX_ATTEMPTS
exit \$ret
end script
EOF
}
gobi-firmware_install_firmware_files() {
local oem=${GOBI_FIRMWARE_OEM}
local install_qdl=${GOBI_FIRMWARE_QDL}
local apps_dir=${GOBI_FIRMWARE_APPS_DIR}
# If the apps directory is not sepcified, then use the carrier
# directory. The apps directory should be set to UMTS for most
# UMTS carriers because they share the same firmware
if [ -z "${apps_dir}" ] ; then
apps_dir=${GOBI_FIRMWARE_DEFAULT_CARRIER}
fi
#
# installation directories.
# We could consider installing to more standard locations
# except that QDLService expects to find files in
# /opt/Qualcomm.
#
local firmware_install_dir=${D}/opt/Qualcomm/Images2k
local qdl_install_dir=${D}/opt/Qualcomm/QDLService2k
local log_install_dir=${D}/var/log/
local oemlog_filename=QDLService2k${oem}.txt
local log_filename=QDLService2k.txt
if [ -d Images2k/${oem} ] ; then
# We already have the firmware extracted, this is easy
local base_firmware=Images2k/${oem}
# Do not install qdl it will be build with SDK
install_qdl="no"
else
[ -z "${GOBI_FIRMWARE_ZIP_FILE}" ] && \
die "Must specify GOBI_FIRMWARE_ZIP_FILE"
[ ! -r "${GOBI_FIRMWARE_ZIP_FILE}" ] && \
die "${GOBI_FIRMWARE_ZIP_FILE} is unreadable"
mkdir -p "${T}/${oem}"
unzip ${GOBI_FIRMWARE_ZIP_FILE} -d "${T}/${oem}"
if [ -d "${T}/${oem}/Images2k/${oem}" ] ; then
local base_firmware="${T}/${oem}/Images2k/${oem}"
install_qdl=no
else
rpmfile=$(find "${T}/${oem}" -name \*.rpm -print)
[ -z $rpmfile ] &&
die "Could not find an RPM file in ${GOBI_FIRMWARE_ZIP_FILE}"
# extract the rpm
if [ -d ${oem}_rpm ] ; then
rm -rf ${oem}_rpm
fi
mkdir -p ${oem}_rpm
rpm2tar -O $rpmfile | tar -C ${oem}_rpm -xvf -
local base_firmware=${oem}_rpm/opt/Qualcomm/Images2k/${oem}
fi
fi
# make directories
install -d ${firmware_install_dir}/${oem} \
${qdl_install_dir} ${udev_rules_install_dir}
# install firmware
local flavor_firmware=${base_firmware}_${GOBI_FIRMWARE_FLAVOR}
for carrier in ${GOBI_FIRMWARE_CARRIERS} UMTS ; do
# copy the base firmware
cp -af ${base_firmware}/${carrier} ${firmware_install_dir}/${oem}
if [ -d ${flavor_firmware}/${carrier} ] ; then
# overlay spefic xtra/gps flavor files
cp -af ${flavor_firmware}/${carrier} ${firmware_install_dir}/${oem}
fi
done
# Copy DID file for this device
cp ${base_firmware}/*.did ${firmware_install_dir}/${oem}
# Create a DID file for generic GOBI devices
did_file=$(ls ${base_firmware}/*.did | head -n1)
if [ ! -x $did_file ] ; then
# TODO(jglasgow): Move code for 05c6920b to dogfood ebuild
cp $did_file ${firmware_install_dir}/${oem}/05c6920b.did
fi
# Set firmware and directory permissions
find ${firmware_install_dir}/${oem} -type f -exec chmod 444 {} \;
find ${firmware_install_dir}/${oem} -type d -exec chmod 555 {} \;
# install firmware download program, and associated files
if [ ${install_qdl} == "yes" ] ; then
local qdl_dir=${oem}_rpm/opt/Qualcomm/QDLService2k
install -t ${qdl_install_dir} \
${qdl_dir}/QDLService2k${oem}
ln -sf /opt/Qualcomm/QDLService2k/QDLService2k${oem} \
${qdl_install_dir}/QDLService2kGeneric
fi
# Ensure the default firmware files exists and create Options${oem}.txt
local image_dir=/opt/Qualcomm/Images2k/${oem}
local amss_file=${image_dir}/${apps_dir}/amss.mbn
local apps_file=${image_dir}/${apps_dir}/apps.mbn
local uqcn_file=${image_dir}/${GOBI_FIRMWARE_DEFAULT_CARRIER}/uqcn.mbn
for file in $amss_file $apps_file $uqcn_file ; do
if [ ! -r ${D}${file} ] ; then
die "Could not find file: $file in ${D}"
fi
done
cat > Options2k${oem}.txt <<EOF
${amss_file}
${apps_file}
${uqcn_file}
EOF
install -t ${qdl_install_dir} Options2k${oem}.txt
}
gobi-firmware_src_install() {
# Verify that eclass variables are set
[ -z "${GOBI_FIRMWARE_DEFAULT_CARRIER}" ] && \
die "Must specify GOBI_FIRMWARE_DEFAULT_CARRIER"
[ -z "${GOBI_FIRMWARE_OEM}" ] && \
die "Must specify GOBI_FIRMWARE_OEM"
[ -z "${GOBI_FIRMWARE_VID}" ] && \
die "Must specify GOBI_FIRMWARE_VID"
[ -z "${GOBI_FIRMWARE_PID}" ] && \
die "Must specify GOBI_FIRMWARE_PID"
[ -z "${GOBI_FIRMWARE_CARRIERS}" ] &&
die "Must specify GOBI_FIRMWARE_CARRIERS"
gobi-firmware_install_udev_rules
gobi-firmware_install_upstart_scripts
gobi-firmware_install_firmware_files
}
EXPORT_FUNCTIONS src_install

File diff suppressed because it is too large Load Diff

View File

@ -1,525 +0,0 @@
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/subversion.eclass,v 1.67 2009/05/10 20:33:38 arfrever Exp $
# @ECLASS: subversion.eclass
# @MAINTAINER:
# Akinori Hattori <hattya@gentoo.org>
# Bo Ørsted Andresen <zlin@gentoo.org>
# Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
#
# Original Author: Akinori Hattori <hattya@gentoo.org>
#
# @BLURB: The subversion eclass is written to fetch software sources from subversion repositories
# @DESCRIPTION:
# The subversion eclass provides functions to fetch, patch and bootstrap
# software sources from subversion repositories.
inherit eutils
ESVN="${ECLASS}"
case "${EAPI:-0}" in
0|1)
EXPORT_FUNCTIONS src_unpack pkg_preinst
;;
*)
EXPORT_FUNCTIONS src_unpack src_prepare pkg_preinst
;;
esac
DESCRIPTION="Based on the ${ECLASS} eclass"
SUBVERSION_DEPEND="dev-vcs/subversion[webdav-neon,webdav-serf]
net-misc/rsync"
if [[ -z "${ESVN_DISABLE_DEPENDENCIES}" ]]; then
DEPEND="${SUBVERSION_DEPEND}"
fi
# @ECLASS-VARIABLE: ESVN_STORE_DIR
# @DESCRIPTION:
# subversion sources store directory. Users may override this in /etc/make.conf
if [[ -z ${ESVN_STORE_DIR} ]]; then
ESVN_STORE_DIR="${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/svn-src"
# Pick a directory with the same permissions now and in the future. Note
# that we cannot just use USERNAME because the eventual effective user when
# doing the svn commands may change - PORTAGE_USERNAME has not taken effect
# yet. Further complicating things, if features userpriv is not set,
# PORTAGE_USERNAME is going to be ignored. We assume that if we enable
# userpriv in the future, we will also set PORTAGE_USERNAME to something
# other than "portage".
# TODO: remove this once we are using consistent users and userpriv settings
# for emerge and emerge-${BOARD}.
ESVN_STORE_DIR="${ESVN_STORE_DIR}/${PORTAGE_USERNAME:-portage}"
fi
# @ECLASS-VARIABLE: ESVN_FETCH_CMD
# @DESCRIPTION:
# subversion checkout command
ESVN_FETCH_CMD="svn checkout"
# @ECLASS-VARIABLE: ESVN_UPDATE_CMD
# @DESCRIPTION:
# subversion update command
ESVN_UPDATE_CMD="svn update"
# @ECLASS-VARIABLE: ESVN_SWITCH_CMD
# @DESCRIPTION:
# subversion switch command
ESVN_SWITCH_CMD="svn switch"
# @ECLASS-VARIABLE: ESVN_OPTIONS
# @DESCRIPTION:
# the options passed to checkout or update. If you want a specific revision see
# ESVN_REPO_URI instead of using -rREV.
ESVN_OPTIONS="${ESVN_OPTIONS:-}"
# @ECLASS-VARIABLE: ESVN_REPO_URI
# @DESCRIPTION:
# repository uri
#
# e.g. http://foo/trunk, svn://bar/trunk, svn://bar/branch/foo@1234
#
# supported protocols:
# http://
# https://
# svn://
# svn+ssh://
#
# to peg to a specific revision, append @REV to the repo's uri
ESVN_REPO_URI="${ESVN_REPO_URI:-}"
# @ECLASS-VARIABLE: ESVN_REVISION
# @DESCRIPTION:
# User configurable revision checkout or update to from the repository
#
# Useful for live svn or trunk svn ebuilds allowing the user to peg
# to a specific revision
#
# Note: This should never be set in an ebuild!
ESVN_REVISION="${ESVN_REVISION:-}"
# @ECLASS-VARIABLE: ESVN_USER
# @DESCRIPTION:
# User name
ESVN_USER="${ESVN_USER:-}"
# @ECLASS-VARIABLE: ESVN_PASSWORD
# @DESCRIPTION:
# Password
ESVN_PASSWORD="${ESVN_PASSWORD:-}"
# @ECLASS-VARIABLE: ESVN_PROJECT
# @DESCRIPTION:
# project name of your ebuild (= name space)
#
# subversion eclass will check out the subversion repository like:
#
# ${ESVN_STORE_DIR}/${ESVN_PROJECT}/${ESVN_REPO_URI##*/}
#
# so if you define ESVN_REPO_URI as http://svn.collab.net/repo/svn/trunk or
# http://svn.collab.net/repo/svn/trunk/. and PN is subversion-svn.
# it will check out like:
#
# ${ESVN_STORE_DIR}/subversion/trunk
#
# this is not used in order to declare the name of the upstream project.
# so that you can declare this like:
#
# # jakarta commons-loggin
# ESVN_PROJECT=commons/logging
#
# default: ${PN/-svn}.
ESVN_PROJECT="${ESVN_PROJECT:-${PN/-svn}}"
# @ECLASS-VARIABLE: ESVN_BOOTSTRAP
# @DESCRIPTION:
# bootstrap script or command like autogen.sh or etc..
ESVN_BOOTSTRAP="${ESVN_BOOTSTRAP:-}"
# @ECLASS-VARIABLE: ESVN_PATCHES
# @DESCRIPTION:
# subversion eclass can apply patches in subversion_bootstrap().
# you can use regexp in this variable like *.diff or *.patch or etc.
# NOTE: patches will be applied before ESVN_BOOTSTRAP is processed.
#
# Patches are searched both in ${PWD} and ${FILESDIR}, if not found in either
# location, the installation dies.
ESVN_PATCHES="${ESVN_PATCHES:-}"
# @ECLASS-VARIABLE: ESVN_RESTRICT
# @DESCRIPTION:
# this should be a space delimited list of subversion eclass features to
# restrict.
# export)
# don't export the working copy to S.
ESVN_RESTRICT="${ESVN_RESTRICT:-}"
# @ECLASS-VARIABLE: ESVN_DISABLE_DEPENDENCIES
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic inclusion of
# Subversion in dependencies.
ESVN_DISABLE_DEPENDENCIES="${ESVN_DISABLE_DEPENDENCIES:-}"
# @ECLASS-VARIABLE: ESVN_OFFLINE
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic updating of
# an svn source tree. This is intended to be set outside the subversion source
# tree by users.
ESVN_OFFLINE="${ESVN_OFFLINE:-${ESCM_OFFLINE}}"
# @ECLASS-VARIABLE: ESVN_UP_FREQ
# @DESCRIPTION:
# Set the minimum number of hours between svn up'ing in any given svn module. This is particularly
# useful for split KDE ebuilds where we want to ensure that all submodules are compiled for the same
# revision. It should also be kept user overrideable.
ESVN_UP_FREQ="${ESVN_UP_FREQ:=}"
# @ECLASS-VARIABLE: ESCM_LOGDIR
# @DESCRIPTION:
# User configuration variable. If set to a path such as e.g. /var/log/scm any
# package inheriting from subversion.eclass will record svn revision to
# ${CATEGORY}/${PN}.log in that path in pkg_preinst. This is not supposed to be
# set by ebuilds/eclasses. It defaults to empty so users need to opt in.
ESCM_LOGDIR="${ESCM_LOGDIR:=}"
# @FUNCTION: subversion_fetch
# @USAGE: [repo_uri] [destination]
# @DESCRIPTION:
# Wrapper function to fetch sources from subversion via svn checkout or svn update,
# depending on whether there is an existing working copy in ${ESVN_STORE_DIR}.
#
# Can take two optional parameters:
# repo_uri - a repository URI. default is ESVN_REPO_URI.
# destination - a check out path in S.
subversion_fetch() {
local repo_uri="$(subversion__get_repository_uri "${1:-${ESVN_REPO_URI}}")"
local revision="$(subversion__get_peg_revision "${1:-${ESVN_REPO_URI}}")"
local S_dest="${2}"
if [[ -z ${repo_uri} ]]; then
die "${ESVN}: ESVN_REPO_URI (or specified URI) is empty."
fi
[[ -n "${ESVN_REVISION}" ]] && revision="${ESVN_REVISION}"
# check for the protocol
local protocol="${repo_uri%%:*}"
case "${protocol}" in
http|https)
;;
svn|svn+ssh)
;;
*)
die "${ESVN}: fetch from '${protocol}' is not yet implemented."
;;
esac
addread "/etc/subversion"
addwrite "${ESVN_STORE_DIR}"
# Also make the /var/lib/portage/distfiles/svn-src directory writeable in sandbox
# so we can create it if necessary.
addwrite "$(dirname ${ESVN_STORE_DIR})"
if [[ ! -d ${ESVN_STORE_DIR} ]]; then
debug-print "${FUNCNAME}: initial checkout. creating subversion directory"
mkdir -p "${ESVN_STORE_DIR}" || die "${ESVN}: can't mkdir ${ESVN_STORE_DIR}."
fi
cd "${ESVN_STORE_DIR}" || die "${ESVN}: can't chdir to ${ESVN_STORE_DIR}"
local wc_path="$(subversion__get_wc_path "${repo_uri}")"
local options="${ESVN_OPTIONS} --config-dir ${ESVN_STORE_DIR}/.subversion"
[[ -n "${revision}" ]] && options="${options} -r ${revision}"
if [[ "${ESVN_OPTIONS}" = *-r* ]]; then
ewarn "\${ESVN_OPTIONS} contains -r, this usage is unsupported. Please"
ewarn "see \${ESVN_REPO_URI}"
fi
if has_version ">=dev-vcs/subversion-1.6.0"; then
options="${options} --config-option=config:auth:password-stores="
fi
debug-print "${FUNCNAME}: wc_path = \"${wc_path}\""
debug-print "${FUNCNAME}: ESVN_OPTIONS = \"${ESVN_OPTIONS}\""
debug-print "${FUNCNAME}: options = \"${options}\""
if [[ ! -d ${wc_path}/.svn ]]; then
if [[ -n ${ESVN_OFFLINE} ]]; then
ewarn "ESVN_OFFLINE cannot be used when there is no existing checkout."
fi
# first check out
einfo "subversion check out start -->"
einfo " repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_FETCH_CMD} ${options} ${repo_uri}"
mkdir -p "${ESVN_PROJECT}" || die "${ESVN}: can't mkdir ${ESVN_PROJECT}."
cd "${ESVN_PROJECT}" || die "${ESVN}: can't chdir to ${ESVN_PROJECT}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_FETCH_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" "${repo_uri}" || die "${ESVN}: can't fetch to ${wc_path} from ${repo_uri}."
else
${ESVN_FETCH_CMD} ${options} "${repo_uri}" || die "${ESVN}: can't fetch to ${wc_path} from ${repo_uri}."
fi
elif [[ -n ${ESVN_OFFLINE} ]]; then
svn upgrade "${wc_path}" &>/dev/null
svn cleanup "${wc_path}" &>/dev/null
subversion_wc_info "${repo_uri}" || die "${ESVN}: unknown problem occurred while accessing working copy."
if [[ -n ${ESVN_REVISION} && ${ESVN_REVISION} != ${ESVN_WC_REVISION} ]]; then
die "${ESVN}: You requested off-line updating and revision ${ESVN_REVISION} but only revision ${ESVN_WC_REVISION} is available locally."
fi
einfo "Fetching disabled: Using existing repository copy at revision ${ESVN_WC_REVISION}."
else
svn upgrade "${wc_path}" &>/dev/null
svn cleanup "${wc_path}" &>/dev/null
subversion_wc_info "${repo_uri}" || die "${ESVN}: unknown problem occurred while accessing working copy."
local esvn_up_freq=
if [[ -n ${ESVN_UP_FREQ} ]]; then
if [[ -n ${ESVN_UP_FREQ//[[:digit:]]} ]]; then
die "${ESVN}: ESVN_UP_FREQ must be an integer value corresponding to the minimum number of hours between svn up."
elif [[ -z $(find "${wc_path}/.svn/entries" -mmin "+$((ESVN_UP_FREQ*60))") ]]; then
einfo "Fetching disabled since ${ESVN_UP_FREQ} hours has not passed since last update."
einfo "Using existing repository copy at revision ${ESVN_WC_REVISION}."
esvn_up_freq=no_update
fi
fi
if [[ -z ${esvn_up_freq} ]]; then
if [[ ${ESVN_WC_URL} != $(subversion__get_repository_uri "${repo_uri}") ]]; then
einfo "subversion switch start -->"
einfo " old repository: ${ESVN_WC_URL}@${ESVN_WC_REVISION}"
einfo " new repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_SWITCH_CMD} ${options} ${repo_uri}"
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_SWITCH_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" ${repo_uri} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
else
${ESVN_SWITCH_CMD} ${options} ${repo_uri} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
fi
else
# update working copy
einfo "subversion update start -->"
einfo " repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_UPDATE_CMD} ${options}"
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_UPDATE_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
else
${ESVN_UPDATE_CMD} ${options} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
fi
fi
fi
fi
einfo " working copy: ${wc_path}"
if ! has "export" ${ESVN_RESTRICT}; then
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
local S="${S}/${S_dest}"
mkdir -p "${S}"
# export to the ${WORKDIR}
#* "svn export" has a bug. see http://bugs.gentoo.org/119236
#* svn export . "${S}" || die "${ESVN}: can't export to ${S}."
rsync -rlpgo --exclude=".svn/" . "${S}" || die "${ESVN}: can't export to ${S}."
fi
echo
}
# @FUNCTION: subversion_bootstrap
# @DESCRIPTION:
# Apply patches in ${ESVN_PATCHES} and run ${ESVN_BOOTSTRAP} if specified.
subversion_bootstrap() {
if has "export" ${ESVN_RESTRICT}; then
return
fi
cd "${S}"
if [[ -n ${ESVN_PATCHES} ]]; then
einfo "apply patches -->"
local patch fpatch
for patch in ${ESVN_PATCHES}; do
if [[ -f ${patch} ]]; then
epatch "${patch}"
else
for fpatch in ${FILESDIR}/${patch}; do
if [[ -f ${fpatch} ]]; then
epatch "${fpatch}"
else
die "${ESVN}: ${patch} not found"
fi
done
fi
done
echo
fi
if [[ -n ${ESVN_BOOTSTRAP} ]]; then
einfo "begin bootstrap -->"
if [[ -f ${ESVN_BOOTSTRAP} && -x ${ESVN_BOOTSTRAP} ]]; then
einfo " bootstrap with a file: ${ESVN_BOOTSTRAP}"
eval "./${ESVN_BOOTSTRAP}" || die "${ESVN}: can't execute ESVN_BOOTSTRAP."
else
einfo " bootstrap with command: ${ESVN_BOOTSTRAP}"
eval "${ESVN_BOOTSTRAP}" || die "${ESVN}: can't eval ESVN_BOOTSTRAP."
fi
fi
}
# @FUNCTION: subversion_src_unpack
# @DESCRIPTION:
# Default src_unpack. Fetch and, in older EAPIs, bootstrap.
subversion_src_unpack() {
subversion_fetch || die "${ESVN}: unknown problem occurred in subversion_fetch."
if has "${EAPI:-0}" 0 1; then
subversion_bootstrap || die "${ESVN}: unknown problem occurred in subversion_bootstrap."
fi
}
# @FUNCTION: subversion_src_prepare
# @DESCRIPTION:
# Default src_prepare. Bootstrap.
subversion_src_prepare() {
subversion_bootstrap || die "${ESVN}: unknown problem occurred in subversion_bootstrap."
}
# @FUNCTION: subversion_wc_info
# @USAGE: [repo_uri]
# @RETURN: ESVN_WC_URL, ESVN_WC_ROOT, ESVN_WC_UUID, ESVN_WC_REVISION and ESVN_WC_PATH
# @DESCRIPTION:
# Get svn info for the specified repo_uri. The default repo_uri is ESVN_REPO_URI.
#
# The working copy information on the specified repository URI are set to
# ESVN_WC_* variables.
subversion_wc_info() {
local repo_uri="$(subversion__get_repository_uri "${1:-${ESVN_REPO_URI}}")"
local wc_path="$(subversion__get_wc_path "${repo_uri}")"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
debug-print "${FUNCNAME}: wc_path = ${wc_path}"
if [[ ! -d ${wc_path} ]]; then
return 1
fi
export ESVN_WC_URL="$(subversion__svn_info "${wc_path}" "URL")"
export ESVN_WC_ROOT="$(subversion__svn_info "${wc_path}" "Repository Root")"
export ESVN_WC_UUID="$(subversion__svn_info "${wc_path}" "Repository UUID")"
export ESVN_WC_REVISION="$(subversion__svn_info "${wc_path}" "Revision")"
export ESVN_WC_PATH="${wc_path}"
}
## -- Private Functions
## -- subversion__svn_info() ------------------------------------------------- #
#
# param $1 - a target.
# param $2 - a key name.
#
subversion__svn_info() {
local target="${1}"
local key="${2}"
env LC_ALL=C svn info "${target}" | grep -i "^${key}" | cut -d" " -f2-
}
## -- subversion__get_repository_uri() --------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_repository_uri() {
local repo_uri="${1}"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
if [[ -z ${repo_uri} ]]; then
die "${ESVN}: ESVN_REPO_URI (or specified URI) is empty."
fi
# delete trailing slash
if [[ -z ${repo_uri##*/} ]]; then
repo_uri="${repo_uri%/}"
fi
repo_uri="${repo_uri%@*}"
echo "${repo_uri}"
}
## -- subversion__get_wc_path() ---------------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_wc_path() {
local repo_uri="$(subversion__get_repository_uri "${1}")"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
echo "${ESVN_STORE_DIR}/${ESVN_PROJECT}/${repo_uri##*/}"
}
## -- subversion__get_peg_revision() ----------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_peg_revision() {
local repo_uri="${1}"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
# repo_uri has peg revision ?
if [[ ${repo_uri} != *@* ]]; then
debug-print "${FUNCNAME}: repo_uri does not have a peg revision."
fi
local peg_rev=
[[ ${repo_uri} = *@* ]] && peg_rev="${repo_uri##*@}"
debug-print "${FUNCNAME}: peg_rev = ${peg_rev}"
echo "${peg_rev}"
}
# @FUNCTION: subversion_pkg_preinst
# @USAGE: [repo_uri]
# @DESCRIPTION:
# Log the svn revision of source code. Doing this in pkg_preinst because we
# want the logs to stick around if packages are uninstalled without messing with
# config protection.
subversion_pkg_preinst() {
local pkgdate=$(date "+%Y%m%d %H:%M:%S")
subversion_wc_info "${1:-${ESVN_REPO_URI}}"
if [[ -n ${ESCM_LOGDIR} ]]; then
local dir="${ROOT}/${ESCM_LOGDIR}/${CATEGORY}"
if [[ ! -d ${dir} ]]; then
mkdir -p "${dir}" || \
eerror "Failed to create '${dir}' for logging svn revision to '${PORTDIR_SCM}'"
fi
local logmessage="svn: ${pkgdate} - ${PF}:${SLOT} was merged at revision ${ESVN_WC_REVISION}"
if [[ -d ${dir} ]]; then
echo "${logmessage}" >> "${dir}/${PN}.log"
else
eerror "Could not log the message '${logmessage}' to '${dir}/${PN}.log'"
fi
fi
}

View File

@ -1,84 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Tegra BCT files for firmware construction.
#
# @ECLASS-VARIABLE: TEGRA_BCT_SDRAM_CONFIG
# @DESCRIPTION:
# SDRAM memory timing configuration file to install
: ${TEGRA_BCT_SDRAM_CONFIG:=}
# @ECLASS-VARIABLE: TEGRA_BCT_FLASH_CONFIG
# @DESCRIPTION:
# Flash memory configuration file to install
: ${TEGRA_BCT_FLASH_CONFIG:=}
# @ECLASS-VARIABLE: TEGRA_BCT_CHIP_FAMILY
# @DESCRIPTION:
# Family of Tegra chip (determines BCT configuration)
: ${TEGRA_BCT_CHIP_FAMILY:=t25}
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
tegra-bct_src_configure() {
local sdram_file=${FILESDIR}/${TEGRA_BCT_SDRAM_CONFIG}
local flash_file=${FILESDIR}/${TEGRA_BCT_FLASH_CONFIG}
if [ -z "${TEGRA_BCT_SDRAM_CONFIG}" ]; then
die "No SDRAM configuration file selected."
fi
if [ -z "${TEGRA_BCT_FLASH_CONFIG}" ]; then
die "No flash configuration file selected."
fi
if [ -z "${TEGRA_BCT_CHIP_FAMILY}" ]; then
die "No chip family selected."
fi
einfo "Using sdram config file: ${sdram_file}"
einfo "Using flash config file: ${flash_file}"
einfo "Using chip family : ${TEGRA_BCT_CHIP_FAMILY}"
cat ${flash_file} > board.cfg ||
die "Failed to read flash config file."
cat ${sdram_file} >> board.cfg ||
die "Failed to read SDRAM config file."
}
tegra-bct_src_compile() {
local chip_family="-${TEGRA_BCT_CHIP_FAMILY}"
cbootimage -gbct $chip_family board.cfg board.bct ||
die "Failed to generate BCT."
}
tegra-bct_src_install() {
local sdram_file=${FILESDIR}/${TEGRA_BCT_SDRAM_CONFIG}
local flash_file=${FILESDIR}/${TEGRA_BCT_FLASH_CONFIG}
insinto /firmware/bct
doins "${sdram_file}"
doins "${flash_file}"
if [ "$(basename ${sdram_file})" != "sdram.cfg" ]; then
dosym "$(basename ${sdram_file})" /firmware/bct/sdram.cfg
fi
if [ "$(basename ${flash_file})" != "flash.cfg" ]; then
dosym "$(basename ${flash_file})" /firmware/bct/flash.cfg
fi
doins board.cfg
doins board.bct
}
EXPORT_FUNCTIONS src_configure src_compile src_install

View File

@ -1,50 +0,0 @@
#!/bin/bash
source tests-common.sh
inherit appid
valid_uuids=(
'{01234567-89AB-CDEF-0123-456789ABCDEF}'
'{11111111-1111-1111-1111-111111111111}'
'{DDDDDDDD-DDDD-DDDD-DDDD-DDDDDDDDDDDD}'
$(grep -hs doappid ../../../../{private-,}overlays/overlay-*/chromeos-base/chromeos-bsp-*/*.ebuild | \
gawk '{print gensub(/"/, "", "g", $2)}')
)
invalid_uuids=(
''
'01234567-89AB-CDEF-0123-4567-89ABCDEF0123'
' {01234567-89AB-CDEF-0123-4567-89ABCDEF0123} '
' {01234567-89AB-CDEF-0123-4567-89ABCDEF0123}'
'{01234567-89AB-CDEF-0123-4567-89ABCDEF0123} '
'{01234567-89AB-CDEF-0123-4567-89abcDEF0123}'
'{GGGGGGGG-GGGG-GGGG-GGGG-GGGG-GGGGGGGGGGGG}'
)
tbegin "no args"
! (doappid) >&/dev/null
tend $?
tbegin "too many args"
! (doappid "${valid_uuids[0]}" 1234) >&/dev/null
tend $?
tbegin "invalid appids"
for uuid in "${invalid_uuids[@]}" ; do
if (doappid "${uuid}") >&/dev/null ; then
tend 1 "not caught: ${uuid}"
fi
rm -rf "${D}"
done
tend $?
tbegin "valid appids"
for uuid in "${valid_uuids[@]}" ; do
if ! (doappid "${uuid}") ; then
tend 1 "not accepted: ${uuid}"
fi
rm -rf "${D}"
done
tend $?
texit

View File

@ -1 +0,0 @@
source ../../../portage-stable/eclass/tests/tests-common.sh

View File

@ -1,787 +0,0 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/toolchain-funcs.eclass,v 1.120 2012/12/29 05:08:54 vapier Exp $
# @ECLASS: toolchain-funcs.eclass
# @MAINTAINER:
# Toolchain Ninjas <toolchain@gentoo.org>
# @BLURB: functions to query common info about the toolchain
# @DESCRIPTION:
# The toolchain-funcs aims to provide a complete suite of functions
# for gleaning useful information about the toolchain and to simplify
# ugly things like cross-compiling and multilib. All of this is done
# in such a way that you can rely on the function always returning
# something sane.
if [[ ${___ECLASS_ONCE_TOOLCHAIN_FUNCS} != "recur -_+^+_- spank" ]] ; then
___ECLASS_ONCE_TOOLCHAIN_FUNCS="recur -_+^+_- spank"
inherit multilib binutils-funcs
# tc-getPROG <VAR [search vars]> <default> [tuple]
_tc-getPROG() {
local tuple=$1
local v var vars=$2
local prog=$3
var=${vars%% *}
for v in ${vars} ; do
if [[ -n ${!v} ]] ; then
export ${var}="${!v}"
echo "${!v}"
return 0
fi
done
local search=
[[ -n $4 ]] && search=$(type -p "$4-${prog}")
[[ -z ${search} && -n ${!tuple} ]] && search=$(type -p "${!tuple}-${prog}")
[[ -n ${search} ]] && prog=${search##*/}
export ${var}=${prog}
echo "${!var}"
}
tc-getBUILD_PROG() { _tc-getPROG CBUILD "BUILD_$1 $1_FOR_BUILD HOST$1" "${@:2}"; }
tc-getPROG() { _tc-getPROG CHOST "$@"; }
# @FUNCTION: tc-getAR
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver
tc-getAR() { tc-getPROG AR ar "$@"; }
# @FUNCTION: tc-getAS
# @USAGE: [toolchain prefix]
# @RETURN: name of the assembler
tc-getAS() { tc-getPROG AS as "$@"; }
# @FUNCTION: tc-getCC
# @USAGE: [toolchain prefix]
# @RETURN: name of the C compiler
tc-getCC() { tc-getPROG CC gcc "$@"; }
# @FUNCTION: tc-getCPP
# @USAGE: [toolchain prefix]
# @RETURN: name of the C preprocessor
tc-getCPP() { tc-getPROG CPP cpp "$@"; }
# @FUNCTION: tc-getCXX
# @USAGE: [toolchain prefix]
# @RETURN: name of the C++ compiler
tc-getCXX() { tc-getPROG CXX g++ "$@"; }
# @FUNCTION: tc-getLD
# @USAGE: [toolchain prefix]
# @RETURN: name of the linker
tc-getLD() { tc-getPROG LD ld "$@"; }
# @FUNCTION: tc-getSTRIP
# @USAGE: [toolchain prefix]
# @RETURN: name of the strip program
tc-getSTRIP() { tc-getPROG STRIP strip "$@"; }
# @FUNCTION: tc-getNM
# @USAGE: [toolchain prefix]
# @RETURN: name of the symbol/object thingy
tc-getNM() { tc-getPROG NM nm "$@"; }
# @FUNCTION: tc-getRANLIB
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver indexer
tc-getRANLIB() { tc-getPROG RANLIB ranlib "$@"; }
# @FUNCTION: tc-getOBJCOPY
# @USAGE: [toolchain prefix]
# @RETURN: name of the object copier
tc-getOBJCOPY() { tc-getPROG OBJCOPY objcopy "$@"; }
# @FUNCTION: tc-getF77
# @USAGE: [toolchain prefix]
# @RETURN: name of the Fortran 77 compiler
tc-getF77() { tc-getPROG F77 gfortran "$@"; }
# @FUNCTION: tc-getFC
# @USAGE: [toolchain prefix]
# @RETURN: name of the Fortran 90 compiler
tc-getFC() { tc-getPROG FC gfortran "$@"; }
# @FUNCTION: tc-getGCJ
# @USAGE: [toolchain prefix]
# @RETURN: name of the java compiler
tc-getGCJ() { tc-getPROG GCJ gcj "$@"; }
# @FUNCTION: tc-getPKG_CONFIG
# @USAGE: [toolchain prefix]
# @RETURN: name of the pkg-config tool
tc-getPKG_CONFIG() { tc-getPROG PKG_CONFIG pkg-config "$@"; }
# @FUNCTION: tc-getRC
# @USAGE: [toolchain prefix]
# @RETURN: name of the Windows resource compiler
tc-getRC() { tc-getPROG RC windres "$@"; }
# @FUNCTION: tc-getDLLWRAP
# @USAGE: [toolchain prefix]
# @RETURN: name of the Windows dllwrap utility
tc-getDLLWRAP() { tc-getPROG DLLWRAP dllwrap "$@"; }
# @FUNCTION: tc-getBUILD_AR
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver for building binaries to run on the build machine
tc-getBUILD_AR() { tc-getBUILD_PROG AR ar "$@"; }
# @FUNCTION: tc-getBUILD_AS
# @USAGE: [toolchain prefix]
# @RETURN: name of the assembler for building binaries to run on the build machine
tc-getBUILD_AS() { tc-getBUILD_PROG AS as "$@"; }
# @FUNCTION: tc-getBUILD_CC
# @USAGE: [toolchain prefix]
# @RETURN: name of the C compiler for building binaries to run on the build machine
tc-getBUILD_CC() { tc-getBUILD_PROG CC gcc "$@"; }
# @FUNCTION: tc-getBUILD_CPP
# @USAGE: [toolchain prefix]
# @RETURN: name of the C preprocessor for building binaries to run on the build machine
tc-getBUILD_CPP() { tc-getBUILD_PROG CPP cpp "$@"; }
# @FUNCTION: tc-getBUILD_CXX
# @USAGE: [toolchain prefix]
# @RETURN: name of the C++ compiler for building binaries to run on the build machine
tc-getBUILD_CXX() { tc-getBUILD_PROG CXX g++ "$@"; }
# @FUNCTION: tc-getBUILD_LD
# @USAGE: [toolchain prefix]
# @RETURN: name of the linker for building binaries to run on the build machine
tc-getBUILD_LD() { tc-getBUILD_PROG LD ld "$@"; }
# @FUNCTION: tc-getBUILD_STRIP
# @USAGE: [toolchain prefix]
# @RETURN: name of the strip program for building binaries to run on the build machine
tc-getBUILD_STRIP() { tc-getBUILD_PROG STRIP strip "$@"; }
# @FUNCTION: tc-getBUILD_NM
# @USAGE: [toolchain prefix]
# @RETURN: name of the symbol/object thingy for building binaries to run on the build machine
tc-getBUILD_NM() { tc-getBUILD_PROG NM nm "$@"; }
# @FUNCTION: tc-getBUILD_RANLIB
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver indexer for building binaries to run on the build machine
tc-getBUILD_RANLIB() { tc-getBUILD_PROG RANLIB ranlib "$@"; }
# @FUNCTION: tc-getBUILD_OBJCOPY
# @USAGE: [toolchain prefix]
# @RETURN: name of the object copier for building binaries to run on the build machine
tc-getBUILD_OBJCOPY() { tc-getBUILD_PROG OBJCOPY objcopy "$@"; }
# @FUNCTION: tc-getBUILD_PKG_CONFIG
# @USAGE: [toolchain prefix]
# @RETURN: name of the pkg-config tool for building binaries to run on the build machine
tc-getBUILD_PKG_CONFIG() { tc-getBUILD_PROG PKG_CONFIG pkg-config "$@"; }
# @FUNCTION: tc-export
# @USAGE: <list of toolchain variables>
# @DESCRIPTION:
# Quick way to export a bunch of compiler vars at once.
tc-export() {
local var
for var in "$@" ; do
[[ $(type -t tc-get${var}) != "function" ]] && die "tc-export: invalid export variable '${var}'"
eval tc-get${var} > /dev/null
done
}
# @FUNCTION: tc-is-cross-compiler
# @RETURN: Shell true if we are using a cross-compiler, shell false otherwise
tc-is-cross-compiler() {
return $([[ ${CBUILD:-${CHOST}} != ${CHOST} ]])
}
# @FUNCTION: tc-is-softfloat
# @DESCRIPTION:
# See if this toolchain is a softfloat based one.
# @CODE
# The possible return values:
# - only: the target is always softfloat (never had fpu)
# - yes: the target should support softfloat
# - softfp: (arm specific) the target should use hardfloat insns, but softfloat calling convention
# - no: the target doesn't support softfloat
# @CODE
# This allows us to react differently where packages accept
# softfloat flags in the case where support is optional, but
# rejects softfloat flags where the target always lacks an fpu.
tc-is-softfloat() {
local CTARGET=${CTARGET:-${CHOST}}
case ${CTARGET} in
bfin*|h8300*)
echo "only" ;;
*)
if [[ ${CTARGET//_/-} == *-softfloat-* ]] ; then
echo "yes"
elif [[ ${CTARGET//_/-} == *-softfp-* ]] ; then
echo "softfp"
else
echo "no"
fi
;;
esac
}
# @FUNCTION: tc-is-static-only
# @DESCRIPTION:
# Return shell true if the target does not support shared libs, shell false
# otherwise.
tc-is-static-only() {
local host=${CTARGET:-${CHOST}}
# *MiNT doesn't have shared libraries, only platform so far
return $([[ ${host} == *-mint* ]])
}
# @FUNCTION: tc-export_build_env
# @USAGE: [compiler variables]
# @DESCRIPTION:
# Export common build related compiler settings.
tc-export_build_env() {
tc-export "$@"
: ${BUILD_CFLAGS:=-O1 -pipe}
: ${BUILD_CXXFLAGS:=-O1 -pipe}
: ${BUILD_CPPFLAGS:=}
: ${BUILD_LDFLAGS:=}
export BUILD_{C,CXX,CPP,LD}FLAGS
}
# @FUNCTION: tc-env_build
# @USAGE: <command> [command args]
# @INTERNAL
# @DESCRIPTION:
# Setup the compile environment to the build tools and then execute the
# specified command. We use tc-getBUILD_XX here so that we work with
# all of the semi-[non-]standard env vars like $BUILD_CC which often
# the target build system does not check.
tc-env_build() {
tc-export_build_env
CFLAGS=${BUILD_CFLAGS} \
CXXFLAGS=${BUILD_CXXFLAGS} \
CPPFLAGS=${BUILD_CPPFLAGS} \
LDFLAGS=${BUILD_LDFLAGS} \
AR=$(tc-getBUILD_AR) \
AS=$(tc-getBUILD_AS) \
CC=$(tc-getBUILD_CC) \
CPP=$(tc-getBUILD_CPP) \
CXX=$(tc-getBUILD_CXX) \
LD=$(tc-getBUILD_LD) \
NM=$(tc-getBUILD_NM) \
PKG_CONFIG=$(tc-getBUILD_PKG_CONFIG) \
RANLIB=$(tc-getBUILD_RANLIB) \
"$@"
}
# @FUNCTION: econf_build
# @USAGE: [econf flags]
# @DESCRIPTION:
# Sometimes we need to locally build up some tools to run on CBUILD because
# the package has helper utils which are compiled+executed when compiling.
# This won't work when cross-compiling as the CHOST is set to a target which
# we cannot natively execute.
#
# For example, the python package will build up a local python binary using
# a portable build system (configure+make), but then use that binary to run
# local python scripts to build up other components of the overall python.
# We cannot rely on the python binary in $PATH as that often times will be
# a different version, or not even installed in the first place. Instead,
# we compile the code in a different directory to run on CBUILD, and then
# use that binary when compiling the main package to run on CHOST.
#
# For example, with newer EAPIs, you'd do something like:
# @CODE
# src_configure() {
# ECONF_SOURCE=${S}
# if tc-is-cross-compiler ; then
# mkdir "${WORKDIR}"/${CBUILD}
# pushd "${WORKDIR}"/${CBUILD} >/dev/null
# econf_build --disable-some-unused-stuff
# popd >/dev/null
# fi
# ... normal build paths ...
# }
# src_compile() {
# if tc-is-cross-compiler ; then
# pushd "${WORKDIR}"/${CBUILD} >/dev/null
# emake one-or-two-build-tools
# ln/mv build-tools to normal build paths in ${S}/
# popd >/dev/null
# fi
# ... normal build paths ...
# }
# @CODE
econf_build() {
tc-env_build econf --build=${CBUILD:-${CHOST}} "$@"
}
# @FUNCTION: tc-has-openmp
# @USAGE: [toolchain prefix]
# @DESCRIPTION:
# See if the toolchain supports OpenMP.
tc-has-openmp() {
local base="${T}/test-tc-openmp"
cat <<-EOF > "${base}.c"
#include <omp.h>
int main() {
int nthreads, tid, ret = 0;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
nthreads = omp_get_num_threads(); ret += tid + nthreads;
}
return ret;
}
EOF
$(tc-getCC "$@") -fopenmp "${base}.c" -o "${base}" >&/dev/null
local ret=$?
rm -f "${base}"*
return ${ret}
}
# @FUNCTION: tc-has-tls
# @USAGE: [-s|-c|-l] [toolchain prefix]
# @DESCRIPTION:
# See if the toolchain supports thread local storage (TLS). Use -s to test the
# compiler, -c to also test the assembler, and -l to also test the C library
# (the default).
tc-has-tls() {
local base="${T}/test-tc-tls"
cat <<-EOF > "${base}.c"
int foo(int *i) {
static __thread int j = 0;
return *i ? j : *i;
}
EOF
local flags
case $1 in
-s) flags="-S";;
-c) flags="-c";;
-l) ;;
-*) die "Usage: tc-has-tls [-c|-l] [toolchain prefix]";;
esac
: ${flags:=-fPIC -shared -Wl,-z,defs}
[[ $1 == -* ]] && shift
$(tc-getCC "$@") ${flags} "${base}.c" -o "${base}" >&/dev/null
local ret=$?
rm -f "${base}"*
return ${ret}
}
# Parse information from CBUILD/CHOST/CTARGET rather than
# use external variables from the profile.
tc-ninja_magic_to_arch() {
ninj() { [[ ${type} == "kern" ]] && echo $1 || echo $2 ; }
local type=$1
local host=$2
[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
local KV=${KV:-${KV_FULL}}
[[ ${type} == "kern" ]] && [[ -z ${KV} ]] && \
ewarn "QA: Kernel version could not be determined, please inherit kernel-2 or linux-info"
case ${host} in
aarch64*) ninj arm64 arm;;
alpha*) echo alpha;;
arm*) echo arm;;
avr*) ninj avr32 avr;;
bfin*) ninj blackfin bfin;;
cris*) echo cris;;
hppa*) ninj parisc hppa;;
i?86*)
# Starting with linux-2.6.24, the 'x86_64' and 'i386'
# trees have been unified into 'x86'.
# FreeBSD still uses i386
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -lt $(KV_to_int 2.6.24) || ${host} == *freebsd* ]] ; then
echo i386
else
echo x86
fi
;;
ia64*) echo ia64;;
m68*) echo m68k;;
mips*) echo mips;;
nios2*) echo nios2;;
nios*) echo nios;;
powerpc*)
# Starting with linux-2.6.15, the 'ppc' and 'ppc64' trees
# have been unified into simply 'powerpc', but until 2.6.16,
# ppc32 is still using ARCH="ppc" as default
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.16) ]] ; then
echo powerpc
elif [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -eq $(KV_to_int 2.6.15) ]] ; then
if [[ ${host} == powerpc64* ]] || [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
echo powerpc
else
echo ppc
fi
elif [[ ${host} == powerpc64* ]] ; then
echo ppc64
elif [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
ninj ppc64 ppc
else
echo ppc
fi
;;
s390*) echo s390;;
sh64*) ninj sh64 sh;;
sh*) echo sh;;
sparc64*) ninj sparc64 sparc;;
sparc*) [[ ${PROFILE_ARCH} == "sparc64" ]] \
&& ninj sparc64 sparc \
|| echo sparc
;;
vax*) echo vax;;
x86_64*freebsd*) echo amd64;;
x86_64*)
# Starting with linux-2.6.24, the 'x86_64' and 'i386'
# trees have been unified into 'x86'.
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.24) ]] ; then
echo x86
else
ninj x86_64 amd64
fi
;;
# since our usage of tc-arch is largely concerned with
# normalizing inputs for testing ${CTARGET}, let's filter
# other cross targets (mingw and such) into the unknown.
*) echo unknown;;
esac
}
# @FUNCTION: tc-arch-kernel
# @USAGE: [toolchain prefix]
# @RETURN: name of the kernel arch according to the compiler target
tc-arch-kernel() {
tc-ninja_magic_to_arch kern "$@"
}
# @FUNCTION: tc-arch
# @USAGE: [toolchain prefix]
# @RETURN: name of the portage arch according to the compiler target
tc-arch() {
tc-ninja_magic_to_arch portage "$@"
}
tc-endian() {
local host=$1
[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
host=${host%%-*}
case ${host} in
aarch64*be) echo big;;
aarch64) echo little;;
alpha*) echo big;;
arm*b*) echo big;;
arm*) echo little;;
cris*) echo little;;
hppa*) echo big;;
i?86*) echo little;;
ia64*) echo little;;
m68*) echo big;;
mips*l*) echo little;;
mips*) echo big;;
powerpc*) echo big;;
s390*) echo big;;
sh*b*) echo big;;
sh*) echo little;;
sparc*) echo big;;
x86_64*) echo little;;
*) echo wtf;;
esac
}
# Internal func. The first argument is the version info to expand.
# Query the preprocessor to improve compatibility across different
# compilers rather than maintaining a --version flag matrix. #335943
_gcc_fullversion() {
local ver="$1"; shift
set -- `$(tc-getCPP "$@") -E -P - <<<"__GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__"`
eval echo "$ver"
}
# @FUNCTION: gcc-fullversion
# @RETURN: compiler version (major.minor.micro: [3.4.6])
gcc-fullversion() {
_gcc_fullversion '$1.$2.$3' "$@"
}
# @FUNCTION: gcc-version
# @RETURN: compiler version (major.minor: [3.4].6)
gcc-version() {
_gcc_fullversion '$1.$2' "$@"
}
# @FUNCTION: gcc-major-version
# @RETURN: major compiler version (major: [3].4.6)
gcc-major-version() {
_gcc_fullversion '$1' "$@"
}
# @FUNCTION: gcc-minor-version
# @RETURN: minor compiler version (minor: 3.[4].6)
gcc-minor-version() {
_gcc_fullversion '$2' "$@"
}
# @FUNCTION: gcc-micro-version
# @RETURN: micro compiler version (micro: 3.4.[6])
gcc-micro-version() {
_gcc_fullversion '$3' "$@"
}
# Returns the installation directory - internal toolchain
# function for use by _gcc-specs-exists (for flag-o-matic).
_gcc-install-dir() {
echo "$(LC_ALL=C $(tc-getCC) -print-search-dirs 2> /dev/null |\
awk '$1=="install:" {print $2}')"
}
# Returns true if the indicated specs file exists - internal toolchain
# function for use by flag-o-matic.
_gcc-specs-exists() {
[[ -f $(_gcc-install-dir)/$1 ]]
}
# Returns requested gcc specs directive unprocessed - for used by
# gcc-specs-directive()
# Note; later specs normally overwrite earlier ones; however if a later
# spec starts with '+' then it appends.
# gcc -dumpspecs is parsed first, followed by files listed by "gcc -v"
# as "Reading <file>", in order. Strictly speaking, if there's a
# $(gcc_install_dir)/specs, the built-in specs aren't read, however by
# the same token anything from 'gcc -dumpspecs' is overridden by
# the contents of $(gcc_install_dir)/specs so the result is the
# same either way.
_gcc-specs-directive_raw() {
local cc=$(tc-getCC)
local specfiles=$(LC_ALL=C ${cc} -v 2>&1 | awk '$1=="Reading" {print $NF}')
${cc} -dumpspecs 2> /dev/null | cat - ${specfiles} | awk -v directive=$1 \
'BEGIN { pspec=""; spec=""; outside=1 }
$1=="*"directive":" { pspec=spec; spec=""; outside=0; next }
outside || NF==0 || ( substr($1,1,1)=="*" && substr($1,length($1),1)==":" ) { outside=1; next }
spec=="" && substr($0,1,1)=="+" { spec=pspec " " substr($0,2); next }
{ spec=spec $0 }
END { print spec }'
return 0
}
# Return the requested gcc specs directive, with all included
# specs expanded.
# Note, it does not check for inclusion loops, which cause it
# to never finish - but such loops are invalid for gcc and we're
# assuming gcc is operational.
gcc-specs-directive() {
local directive subdname subdirective
directive="$(_gcc-specs-directive_raw $1)"
while [[ ${directive} == *%\(*\)* ]]; do
subdname=${directive/*%\(}
subdname=${subdname/\)*}
subdirective="$(_gcc-specs-directive_raw ${subdname})"
directive="${directive//\%(${subdname})/${subdirective}}"
done
echo "${directive}"
return 0
}
# Returns true if gcc sets relro
gcc-specs-relro() {
local directive
directive=$(gcc-specs-directive link_command)
return $([[ "${directive/\{!norelro:}" != "${directive}" ]])
}
# Returns true if gcc sets now
gcc-specs-now() {
local directive
directive=$(gcc-specs-directive link_command)
return $([[ "${directive/\{!nonow:}" != "${directive}" ]])
}
# Returns true if gcc builds PIEs
gcc-specs-pie() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!nopie:}" != "${directive}" ]])
}
# Returns true if gcc builds with the stack protector
gcc-specs-ssp() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fno-stack-protector:}" != "${directive}" ]])
}
# Returns true if gcc upgrades fstack-protector to fstack-protector-all
gcc-specs-ssp-to-all() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fno-stack-protector-all:}" != "${directive}" ]])
}
# Returns true if gcc builds with fno-strict-overflow
gcc-specs-nostrict() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fstrict-overflow:}" != "${directive}" ]])
}
# @FUNCTION: gen_usr_ldscript
# @USAGE: [-a] <list of libs to create linker scripts for>
# @DESCRIPTION:
# This function generate linker scripts in /usr/lib for dynamic
# libs in /lib. This is to fix linking problems when you have
# the .so in /lib, and the .a in /usr/lib. What happens is that
# in some cases when linking dynamic, the .a in /usr/lib is used
# instead of the .so in /lib due to gcc/libtool tweaking ld's
# library search path. This causes many builds to fail.
# See bug #4411 for more info.
#
# Note that you should in general use the unversioned name of
# the library (libfoo.so), as ldconfig should usually update it
# correctly to point to the latest version of the library present.
gen_usr_ldscript() {
local lib libdir=$(get_libdir) output_format="" auto=false suffix=$(get_libname)
[[ -z ${ED+set} ]] && local ED=${D%/}${EPREFIX}/
tc-is-static-only && return
# Eventually we'd like to get rid of this func completely #417451
case ${CTARGET:-${CHOST}} in
*-darwin*) ;;
*linux*|*-freebsd*|*-openbsd*|*-netbsd*)
use prefix && return 0 ;;
*) return 0 ;;
esac
# Just make sure it exists
dodir /usr/${libdir}
if [[ $1 == "-a" ]] ; then
auto=true
shift
dodir /${libdir}
fi
# OUTPUT_FORMAT gives hints to the linker as to what binary format
# is referenced ... makes multilib saner
output_format=$($(tc-getCC) ${CFLAGS} ${LDFLAGS} -Wl,--verbose 2>&1 | sed -n 's/^OUTPUT_FORMAT("\([^"]*\)",.*/\1/p')
[[ -n ${output_format} ]] && output_format="OUTPUT_FORMAT ( ${output_format} )"
for lib in "$@" ; do
local tlib
if ${auto} ; then
lib="lib${lib}${suffix}"
else
# Ensure /lib/${lib} exists to avoid dangling scripts/symlinks.
# This especially is for AIX where $(get_libname) can return ".a",
# so /lib/${lib} might be moved to /usr/lib/${lib} (by accident).
[[ -r ${ED}/${libdir}/${lib} ]] || continue
#TODO: better die here?
fi
case ${CTARGET:-${CHOST}} in
*-darwin*)
if ${auto} ; then
tlib=$(scanmacho -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
else
tlib=$(scanmacho -qF'%S#F' "${ED}"/${libdir}/${lib})
fi
[[ -z ${tlib} ]] && die "unable to read install_name from ${lib}"
tlib=${tlib##*/}
if ${auto} ; then
mv "${ED}"/usr/${libdir}/${lib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
# some install_names are funky: they encode a version
if [[ ${tlib} != ${lib%${suffix}}.*${suffix#.} ]] ; then
mv "${ED}"/usr/${libdir}/${tlib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
fi
rm -f "${ED}"/${libdir}/${lib}
fi
# Mach-O files have an id, which is like a soname, it tells how
# another object linking against this lib should reference it.
# Since we moved the lib from usr/lib into lib this reference is
# wrong. Hence, we update it here. We don't configure with
# libdir=/lib because that messes up libtool files.
# Make sure we don't lose the specific version, so just modify the
# existing install_name
if [[ ! -w "${ED}/${libdir}/${tlib}" ]] ; then
chmod u+w "${ED}${libdir}/${tlib}" # needed to write to it
local nowrite=yes
fi
install_name_tool \
-id "${EPREFIX}"/${libdir}/${tlib} \
"${ED}"/${libdir}/${tlib} || die "install_name_tool failed"
[[ -n ${nowrite} ]] && chmod u-w "${ED}${libdir}/${tlib}"
# Now as we don't use GNU binutils and our linker doesn't
# understand linker scripts, just create a symlink.
pushd "${ED}/usr/${libdir}" > /dev/null
ln -snf "../../${libdir}/${tlib}" "${lib}"
popd > /dev/null
;;
*)
if ${auto} ; then
tlib=$(scanelf -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
[[ -z ${tlib} ]] && die "unable to read SONAME from ${lib}"
mv "${ED}"/usr/${libdir}/${lib}* "${ED}"/${libdir}/ || die
# some SONAMEs are funky: they encode a version before the .so
if [[ ${tlib} != ${lib}* ]] ; then
mv "${ED}"/usr/${libdir}/${tlib}* "${ED}"/${libdir}/ || die
fi
rm -f "${ED}"/${libdir}/${lib}
else
tlib=${lib}
fi
cat > "${ED}/usr/${libdir}/${lib}" <<-END_LDSCRIPT
/* GNU ld script
Since Gentoo has critical dynamic libraries in /lib, and the static versions
in /usr/lib, we need to have a "fake" dynamic lib in /usr/lib, otherwise we
run into linking problems. This "fake" dynamic lib is a linker script that
redirects the linker to the real lib. And yes, this works in the cross-
compiling scenario as the sysroot-ed linker will prepend the real path.
See bug http://bugs.gentoo.org/4411 for more info.
*/
${output_format}
GROUP ( ${EPREFIX}/${libdir}/${tlib} )
END_LDSCRIPT
;;
esac
fperms a+x "/usr/${libdir}/${lib}" || die "could not change perms on ${lib}"
done
}
#
# ChromiumOS extensions below here.
#
# Returns true if gcc builds PIEs
# For ARM, readelf -h | grep Type always has REL instead of EXEC.
# That is why we have to read the flags one by one and check them instead
# of test-compiling a small program.
gcc-pie() {
for flag in $(echo "void f(){char a[100];}" | \
${CTARGET}-gcc -v -xc -c -o /dev/null - 2>&1 | \
grep cc1 | \
tr " " "\n" | \
tac)
do
if [[ $flag == "-fPIE" || $flag == "-fPIC" ]]
then
return 0
elif [[ $flag == "-fno-PIE" || $flag == "-fno-PIC" ]]
then
return 1
fi
done
return 1
}
# Returns true if gcc builds with the stack protector
gcc-ssp() {
local obj=$(mktemp)
echo "void f(){char a[100];}" | ${CTARGET}-gcc -xc -c -o ${obj} -
return $(${CTARGET}-readelf -sW ${obj} | grep -q stack_chk_fail)
}
# Sets up environment variables required to build with Clang
# This should be replaced with a sysroot wrapper ala GCC if/when
# we get serious about building with Clang.
clang-setup-env() {
use clang || return 0
case ${ARCH} in
amd64|x86)
export CC="clang" CXX="clang++"
local clang_flags=(
--sysroot="${SYSROOT}"
-B$(get_binutils_path_gold)
$(usex x86 -m32 '')
)
append-flags "${clang_flags[@]}"
# Some boards use optimizations (e.g. -mfpmath=sse) that
# clang does not support.
append-flags -Qunused-arguments
;;
*) die "Clang is not yet supported for ${ARCH}"
esac
if use asan; then
append-flags -fsanitize=address -fno-omit-frame-pointer
fi
}
fi

View File

@ -1,132 +0,0 @@
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header: $
#
# useradd.eclass
#
# Adds a mechanism for adding users/groups into alternate roots.
#
# This will likely go away.
#
# Authors:
# Google, inc. <chromium-os-dev@chromium.org>
#
HOMEPAGE="http://www.chromium.org/"
# Before we manipulate users at all, we want to make sure that
# passwd/group/shadow is initialized in the first place. That's
# what baselayout does.
if [ "${PN}" != "baselayout" ]; then
DEPEND="sys-apps/baselayout"
RDEPEND="sys-apps/baselayout"
fi
# Tests if the user already exists in the passwd file.
#
# $1 - Username (e.g. "messagebus")
user_exists() {
grep -e "^$1\:" "${ROOT}/etc/passwd" > /dev/null 2>&1
}
# Tests if the group already exists in the group file.
#
# $1 - Groupname (e.g. "messagebus")
group_exists() {
grep -e "^$1\:" "${ROOT}/etc/group" > /dev/null 2>&1
}
# Add entry to /etc/passwd
#
# $1 - Username (e.g. "messagebus")
# $2 - "*" to indicate not shadowed, "x" to indicate shadowed
# $3 - UID (e.g. 200)
# $4 - GID (e.g. 200)
# $5 - full name (e.g. "")
# $6 - home dir (e.g. "/home/foo" or "/var/run/dbus")
# $7 - shell (e.g. "/bin/sh" or "/bin/false")
add_user() {
if user_exists "$1"; then
elog "Skipping add_user of existing user: '$1'"
return
fi
echo "${1}:${2}:${3}:${4}:${5}:${6}:${7}" >> "${ROOT}/etc/passwd"
}
# Remove entry from /etc/passwd
#
# $1 - Username
remove_user() {
[ -e "${ROOT}/etc/passwd" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/passwd"
}
# Add entry to /etc/shadow
#
# $1 - Username
# $2 - Crypted password
add_shadow() {
echo "${1}:${2}:14500:0:99999::::" >> "${ROOT}/etc/shadow"
}
# Remove entry from /etc/shadow
#
# $1 - Username
remove_shadow() {
[ -e "${ROOT}/etc/shadow" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/shadow"
}
# Add entry to /etc/group
# $1 - Groupname (e.g. "messagebus")
# $2 - GID (e.g. 200)
add_group() {
if group_exists "$1"; then
elog "Skipping add_group of existing group: '$1'"
return
fi
echo "${1}:x:${2}:" >> "${ROOT}/etc/group"
}
# Copies user entry from host passwd file if it already exists or else
# creates a new user using add_user.
#
# See add_user for argument list.
copy_or_add_user() {
local username="$1"
if user_exists "$1"; then
elog "Skipping copy_or_add_user of existing user '$1'"
return
fi
local entry=$(grep -e "^$1\:" /etc/passwd)
if [ -n "$entry" ]; then
elog "Copying existing passwd entry from root: '$entry'"
echo "$entry" >> "${ROOT}/etc/passwd"
else
add_user "$@"
fi
}
# Copies group entry from host group file if it already exists or else
# creates a new group using add_group.
#
# See add_group for argument list.
copy_or_add_group() {
local groupname="$1"
if group_exists "$1"; then
elog "Skipping copy_or_add_group of existing group '$1'"
return
fi
local entry=$(grep -e "^$1\:" /etc/group)
if [ -n "$entry" ]; then
elog "Copying existing group entry from root: '$entry'"
echo "$entry" >> "${ROOT}/etc/group"
else
add_group "$@"
fi
}