Merge pull request #359 from marineam/baselayout

Baselayout
This commit is contained in:
Michael Marineau 2014-02-18 17:18:34 -08:00
commit 3e89e1b4ca
34 changed files with 262 additions and 7537 deletions

View File

@ -1,216 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
EAPI=5
inherit useradd
DESCRIPTION="ChromeOS specific system setup"
HOMEPAGE="http://src.chromium.org/"
SRC_URI=""
LICENSE="GPL-2"
SLOT="0"
KEYWORDS="amd64 arm x86"
IUSE="cros_host"
# We need to make sure timezone-data is merged before us.
# See pkg_setup below as well as http://crosbug.com/27413
# and friends.
DEPEND="sys-apps/baselayout
sys-apps/efunctions
!<sys-libs/timezone-data-2011d
!<=app-admin/sudo-1.8.2
!<sys-apps/mawk-1.3.4
!<app-shells/bash-4.1
!<app-shells/dash-0.5.5
!<net-misc/openssh-5.2_p1-r8
!cros_host? (
sys-libs/timezone-data
)"
RDEPEND="${DEPEND}
sys-apps/systemd
"
# no source directory
S="${WORKDIR}"
# Remove entry from /etc/group
#
# $1 - Group name
remove_group() {
[ -e "${ROOT}/etc/group" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/group"
}
# Adds a "daemon"-type user with no login or shell.
copy_or_add_daemon_user() {
local username="$1"
local uid="$2"
if user_exists "${username}"; then
elog "Removing existing user '$1' for copy_or_add_daemon_user"
remove_user "${username}"
fi
copy_or_add_user "${username}" "*" $uid $uid "" /dev/null /bin/false
if group_exists "${username}"; then
elog "Removing existing group '$1' for copy_or_add_daemon_user"
elog "Any existing group memberships will be lost"
remove_group "${username}"
fi
copy_or_add_group "${username}" $uid
}
# Removes all users from a group in /etc/group.
# No changes if the group does not exist.
remove_all_users_from_group() {
local group="$1"
sed -i "/^${group}:/s/:[^:]*$/:/" "${ROOT}/etc/group"
}
# Removes a list of users from a group in /etc/group.
# No changes if the group does not exist or the user is not in the group.
remove_users_from_group() {
local group="$1"; shift
local username
for username in "$@"; do
sed -i -r "/^${group}:/{s/([,:])${username}(,|$)/\1/; s/,$//}" \
"${ROOT}/etc/group"
done
}
# Adds a list of users to a group in /etc/group.
# No changes if the group does not exist.
add_users_to_group() {
local group="$1"; shift
local username
remove_users_from_group "${group}" "$@"
for username in "$@"; do
sed -i "/^${group}:/{ s/$/,${username}/ ; s/:,/:/ }" "${ROOT}/etc/group"
done
}
pkg_setup() {
if ! use cros_host ; then
# The sys-libs/timezone-data package installs a default /etc/localtime
# file automatically, so scrub that if it's a regular file.
local etc_tz="${ROOT}etc/localtime"
[[ -L ${etc_tz} ]] || rm -f "${etc_tz}"
fi
}
src_install() {
dodir /usr/lib/sysctl.d
insinto /usr/lib/sysctl.d
newins "${FILESDIR}"/sysctl.conf ${PN}.conf
# Add a /srv directory for mounting into later
dodir /srv
keepdir /srv
# target-specific fun
if ! use cros_host ; then
# Make mount work in the way systemd prescribes
dosym /proc/mounts /etc/mtab
# Put resolv.conf in /var/run so root can be read-only
dosym /var/run/resolv.conf /etc/resolv.conf
# Insert a cool motd ;)
insinto /etc
doins "${FILESDIR}"/motd
# Insert empty fstab
doins "${FILESDIR}"/fstab
# Insert a mini vimrc to avoid driving everyone insane
insinto /usr/share/vim
doins "${FILESDIR}"/vimrc
dosym ../../usr/share/vim/vimrc /etc/vim/vimrc
fi
# Add a sudo file for the core use
if [[ -n ${SHARED_USER_NAME} ]] ; then
insinto /etc/sudoers.d
echo "${SHARED_USER_NAME} ALL=(ALL) NOPASSWD: ALL" > 95_core_base
insopts -m 440
doins 95_core_base || die
fi
}
pkg_postinst() {
local x
# We explicitly add all of the users needed in the system here. The
# build of Chromium OS uses a single build chroot environment to build
# for various targets with distinct ${ROOT}. This causes two problems:
# 1. The target rootfs needs to have the same UIDs as the build
# chroot so that chmod operations work.
# 2. The portage tools to add a new user in an ebuild don't work when
# $ROOT != /
# We solve this by having baselayout install in both the build and
# target and pre-create all needed users. In order to support existing
# build roots we copy over the user entries if they already exist.
local system_user="core"
local system_id="1000"
local system_home="/home/${system_user}"
# Add a chronos-access group to provide non-chronos users,
# mostly system daemons running as a non-chronos user, group permissions
# to access files/directories owned by chronos.
# local system_access_user="core-access"
# local system_access_id="1001"
local crypted_password='*'
[ -r "${SHARED_USER_PASSWD_FILE}" ] &&
crypted_password=$(cat "${SHARED_USER_PASSWD_FILE}")
remove_user "${system_user}"
add_user "${system_user}" "x" "${system_id}" \
"${system_id}" "system_user" "${system_home}" /bin/bash
remove_shadow "${system_user}"
add_shadow "${system_user}" "${crypted_password}"
copy_or_add_group "${system_user}" "${system_id}"
# copy_or_add_daemon_user "${system_access_user}" "${system_access_id}"
copy_or_add_daemon_user "messagebus" 201 # For dbus
copy_or_add_daemon_user "syslog" 202 # For rsyslog
copy_or_add_daemon_user "ntp" 203
copy_or_add_daemon_user "sshd" 204
# copy_or_add_daemon_user "polkituser" 206 # For policykit
# copy_or_add_daemon_user "tss" 207 # For trousers (TSS/TPM)
# copy_or_add_daemon_user "pkcs11" 208 # For pkcs11 clients
# copy_or_add_daemon_user "qdlservice" 209 # for QDLService
# copy_or_add_daemon_user "cromo" 210 # For cromo (modem manager)
# copy_or_add_daemon_user "cashew" 211 # Deprecated, do not reuse
# copy_or_add_daemon_user "ipsec" 212 # For strongswan/ipsec VPN
# copy_or_add_daemon_user "cros-disks" 213 # For cros-disks
# copy_or_add_daemon_user "tor" 214 # For tor (anonymity service)
# copy_or_add_daemon_user "tcpdump" 215 # For tcpdump --with-user
# copy_or_add_daemon_user "debugd" 216 # For debugd
# copy_or_add_daemon_user "openvpn" 217 # For openvpn
# copy_or_add_daemon_user "bluetooth" 218 # For bluez
# copy_or_add_daemon_user "wpa" 219 # For wpa_supplicant
# copy_or_add_daemon_user "cras" 220 # For cras (audio)
# copy_or_add_daemon_user "gavd" 221 # For gavd (audio) (deprecated)
# copy_or_add_daemon_user "input" 222 # For /dev/input/event access
# copy_or_add_daemon_user "chaps" 223 # For chaps (pkcs11)
copy_or_add_daemon_user "dhcp" 224 # For dhcpcd (DHCP client)
# copy_or_add_daemon_user "tpmd" 225 # For tpmd
# copy_or_add_daemon_user "mtp" 226 # For libmtp
# copy_or_add_daemon_user "proxystate" 227 # For proxy monitoring
# copy_or_add_daemon_user "power" 228 # For powerd
# copy_or_add_daemon_user "watchdog" 229 # For daisydog
# copy_or_add_daemon_user "devbroker" 230 # For permission_broker
# copy_or_add_daemon_user "xorg" 231 # For Xorg
copy_or_add_daemon_user "etcd" 232 # For etcd
copy_or_add_daemon_user "docker" 233 # For docker
copy_or_add_daemon_user "tlsdate" 234 # For tlsdate
copy_or_add_group "systemd-journal" 248 # For journalctl access
copy_or_add_group "dialout" 249 # For udev rules
# copy_or_add_daemon_user "ntfs-3g" 300 # For ntfs-3g prcoess
# copy_or_add_daemon_user "avfs" 301 # For avfs process
# copy_or_add_daemon_user "fuse-exfat" 302 # For exfat-fuse prcoess
# copy_or_add_group "serial" 402
# Give the core user access to some system tools
add_users_to_group "docker" "${system_user}"
add_users_to_group "systemd-journal" "${system_user}"
}

View File

@ -1,14 +0,0 @@
# /etc/fstab: static file system information.
#
# noatime turns off atimes for increased performance (atimes normally aren't
# needed); notail increases performance of ReiserFS (at the expense of storage
# efficiency). It's safe to drop the noatime options if you want and to
# switch between notail / tail freely.
#
# The root filesystem should have a pass number of either 0 or 1.
# All other filesystems should have a pass number of 0 or greater than 1.
#
# See the manpage fstab(5) for more information.
#
# <fs> <mountpoint> <type> <opts> <dump/pass>

View File

@ -1,5 +0,0 @@
______ ____ _____
/ ____/___ ________ / __ \/ ___/
/ / / __ \/ ___/ _ \/ / / /\__ \
/ /___/ /_/ / / / __/ /_/ /___/ /
\____/\____/_/ \___/\____//____/

View File

@ -1,19 +0,0 @@
# sysctl defaults for CoreOS
# Enable IPv4 forwarding to support NAT in containers
net.ipv4.ip_forward = 1
# Enables source route verification
net.ipv4.conf.default.rp_filter = 1
# Enable reverse path
net.ipv4.conf.all.rp_filter = 1
# Set watchdog_thresh
kernel.watchdog_thresh = 5
# When the kernel panics, automatically reboot to preserve dump in ram
kernel.panic = -1
# Reboot on oops as well
kernel.panic_on_oops = 1
# Disable kernel address visibility to non-root users.
kernel.kptr_restrict = 1

View File

@ -1,18 +0,0 @@
" Minimal configuration file for Vim on CoreOS
"
" The vim package is installed with USE=minimal to avoid installing lots of
" extra files but that doesn't mean we like vim acting as if it were vi.
" General settings from Gentoo's default vimrc:
set nocompatible " Use Vim defaults (much better!)
set bs=2 " Allow backspacing over everything in insert mode
set ruler " Show the cursor position all the time
set nomodeline " We don't allow modelines by default
" Read vimrc from the state partition if it exists.
if filereadable("/media/state/etc/vim/vimrc")
source /etc/vim/vimrc
endif
if filereadable("/media/state/etc/vim/vimrc.local")
source /etc/vim/vimrc.local
endif

View File

@ -101,7 +101,6 @@ RDEPEND="${RDEPEND}
app-arch/gzip
app-arch/tar
app-shells/bash
coreos-base/coreos-base
coreos-base/cros_boot_mode
coreos-base/vboot_reference
coreos-base/update_engine

View File

@ -39,7 +39,6 @@ RDEPEND="${RDEPEND}
app-emulation/qemu
app-text/texi2html
coreos-base/google-breakpad
coreos-base/coreos-base
coreos-base/coreos-installer
coreos-base/cros-devutils[cros_host]
coreos-base/cros-factoryutils

View File

@ -17,7 +17,6 @@ DEPEND="
app-admin/sudo
app-arch/pbzip2
app-shells/bash-completion
coreos-base/coreos-base
coreos-base/hard-host-depends
dev-python/setuptools
dev-util/boost-build

View File

@ -1,6 +0,0 @@
PKG_INSTALL_MASK+=" /etc/sysctl.conf"
INSTALL_MASK+=" /etc/sysctl.conf"
# Don't filter out /etc/init.d/functions.sh
PKG_INSTALL_MASK=${PKG_INSTALL_MASK/\/etc\/init.d}
INSTALL_MASK=${INSTALL_MASK/\/etc\/init.d}

View File

@ -1,43 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header: $
# @ECLASS: appid.eclass
# @MAINTAINER:
# ChromiumOS Build Team
# @BUGREPORTS:
# Please report bugs via http://crosbug.com/new (with label Area-Build)
# @VCSURL: http://git.chromium.org/gitweb/?p=chromiumos/overlays/chromiumos-overlay.git;a=blob;f=eclass/@ECLASS@
# @BLURB: Eclass for setting up the omaha appid field in /etc/lsb-release
# @FUNCTION: doappid
# @USAGE: <appid>
# @DESCRIPTION:
# Initializes /etc/lsb-release with the appid. Note that appid is really
# just a UUID in the canonical {8-4-4-4-12} format (all uppercase). e.g.
# {01234567-89AB-CDEF-0123-456789ABCDEF}
doappid() {
[[ $# -eq 1 && -n $1 ]] || die "Usage: ${FUNCNAME} <appid>"
local appid=$1
# Validate the UUID is formatted correctly. Except for mario --
# it was created before we had strict rules, and so it violates :(.
if [[ ${appid} != '{87efface-864d-49a5-9bb3-4b050a7c227a}' ]] ; then
local uuid_regex='[{][0-9A-F]{8}-([0-9A-F]{4}-){3}[0-9A-F]{12}[}]'
local filtered_appid=$(echo "${appid}" | LC_ALL=C sed -r "s:${uuid_regex}::")
if [[ -n ${filtered_appid} ]] ; then
eerror "Invalid appid: ${appid} -> ${filtered_appid}"
eerror " - must start with '{' and end with '}'"
eerror " - must be all upper case"
eerror " - be a valid UUID (8-4-4-4-12 hex digits)"
die "invalid appid: ${appid}"
fi
fi
dodir /etc
local lsb="${D}/etc/lsb-release"
[[ -e ${lsb} ]] && die "${lsb} already exists!"
echo "CHROMEOS_RELEASE_APPID=${appid}" > "${lsb}" || die "creating ${lsb} failed!"
}

View File

@ -1,92 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
# @ECLASS: cros-coreboot.eclass
# @MAINTAINER:
# The Chromium OS Authors
# @BLURB: Unifies logic for building coreboot images for Chromium OS.
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
inherit toolchain-funcs
DESCRIPTION="coreboot x86 firmware"
HOMEPAGE="http://www.coreboot.org"
LICENSE="GPL-2"
SLOT="0"
IUSE="em100-mode"
RDEPEND="!sys-boot/chromeos-coreboot"
DEPEND="sys-power/iasl
sys-apps/coreboot-utils
sys-boot/chromeos-mrc
"
# @ECLASS-VARIABLE: COREBOOT_BOARD
# @DESCRIPTION:
# Coreboot Configuration name.
: ${COREBOOT_BOARD:=}
# @ECLASS-VARIABLE: COREBOOT_BUILD_ROOT
# @DESCRIPTION:
# Build directory root
: ${COREBOOT_BUILD_ROOT:=}
[[ -z ${COREBOOT_BOARD} ]] && die "COREBOOT_BOARD must be set"
[[ -z ${COREBOOT_BUILD_ROOT} ]] && die "COREBOOT_BUILD_ROOT must be set"
cros-coreboot_pre_src_prepare() {
cp configs/config.${COREBOOT_BOARD} .config
}
cros-coreboot_src_compile() {
tc-export CC
local board="${COREBOOT_BOARD}"
local build_root="${COREBOOT_BUILD_ROOT}"
# Set KERNELREVISION (really coreboot revision) to the ebuild revision
# number followed by a dot and the first seven characters of the git
# hash. The name is confusing but consistent with the coreboot
# Makefile.
local sha1v="${VCSID/*-/}"
export KERNELREVISION=".${PV}.${sha1v:0:7}"
# Firmware related binaries are compiled with a 32-bit toolchain
# on 64-bit platforms
if use amd64 ; then
export CROSS_COMPILE="i686-pc-linux-gnu-"
export CC="${CROSS_COMPILE}-gcc"
else
export CROSS_COMPILE=${CHOST}-
fi
elog "Toolchain:\n$(sh util/xcompile/xcompile)\n"
emake obj="${build_root}" oldconfig
emake obj="${build_root}"
# Modify firmware descriptor if building for the EM100 emulator.
if use em100-mode; then
ifdtool --em100 "${build_root}/coreboot.rom" || die
mv "${build_root}/coreboot.rom"{.new,} || die
fi
# Build cbmem for the target
cd util/cbmem
emake clean
CROSS_COMPILE="${CHOST}-" emake
}
cros-coreboot_src_install() {
dobin util/cbmem/cbmem
insinto /firmware
newins "${COREBOOT_BUILD_ROOT}/coreboot.rom" coreboot.rom
OPROM=$( awk 'BEGIN{FS="\""} /CONFIG_VGA_BIOS_FILE=/ { print $2 }' \
configs/config.${COREBOOT_BOARD} )
CBFSOPROM=pci$( awk 'BEGIN{FS="\""} /CONFIG_VGA_BIOS_ID=/ { print $2 }' \
configs/config.${COREBOOT_BOARD} ).rom
newins ${OPROM} ${CBFSOPROM}
}
EXPORT_FUNCTIONS src_compile src_install pre_src_prepare

View File

@ -1,31 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Eclass for use by ebuilds that need to know the debug serial port.
#
[[ ${EAPI} != "4" ]] && die "Only EAPI=4 is supported"
SERIAL_USE_PREFIX="serial_use_"
ALL_SERIALPORTS=(
ttyAMA{0..5}
ttyO{0..5}
ttyS{0..5}
ttySAC{0..5}
)
IUSE=${ALL_SERIALPORTS[@]/#/${SERIAL_USE_PREFIX}}
# Echo the current serial port name
get_serial_name() {
local item
for item in "${ALL_SERIALPORTS[@]}"; do
if use ${SERIAL_USE_PREFIX}${item}; then
echo ${item}
return
fi
done
die "Unable to determine current serial port."
}

View File

@ -0,0 +1,93 @@
# Copyright 2014 The CoreOS Authors
# Distributed under the terms of the GNU General Public License v2
# @ECLASS: cros-tmpfiles
# @AUTHOR: marineam
# @BLURB: A basic systemd-tmpfiles --create implementation for ebuilds.
# @DESCRIPTION:
# Any location that is outside of /usr must be initialized during the build
# and (re)created during boot if it is missing. To avoid duplicating
# definitions of these directories/symlinks in ebuilds and tmpfiles configs
# packages can instead only install a tmpfiles config and use this eclass to
# create teh paths in an ebuild friendly way.
#
# Note: in the future if we add a --root option to systemd-tmpfiles we can
# switch to calling that instead of using this simplified implementation.
# Enforce use of recent EAPIs for the sake of consistancy/sanity
case "${EAPI:-0}" in
0|1|2|3)
die "Unsupported EAPI=${EAPI:-0} (too old) for ${ECLASS}"
;;
4|5)
;;
*)
die "Unsupported EAPI=${EAPI} (unknown) for ${ECLASS}"
;;
esac
# Since bash doesn't have a slick syntax for subsituting default values
# for anything other than blank vs. non-blank variables this helps.
# Usage: _tmpfiles_set_defaults mode uid gid age arg
_tmpfiles_do_file() {
[[ ${tmode} == - ]] && tmode=0644
if [[ "${ttype}" == F ]]; then
rm -rf "${ED}/${tpath}"
elif [[ -e "${ED}/${tpath}" ]]; then
return 0
fi
if [[ "${targ}" != - ]]; then
echo "${targ}" > "${ED}/${tpath}" || return 1
else
echo -n > "${ED}/${tpath}" || return 1
fi
chmod "${tmode}" "${ED}/${tpath}" || return 1
chown "${tuid}:${tgid}" "${ED}/${tpath}" || return 1
}
_tmpfiles_do_dir() {
[[ ${tmode} == - ]] && tmode=0755
if [[ "${ttype}" == d && -e "${ED}/${tpath}" ]]; then
return 0
else
rm -rf "${ED}/${tpath}"
fi
mkdir -m "${tmode}" "${ED}/${tpath}" || return 1
chown "${tuid}:${tgid}" "${ED}/${tpath}" || return 1
}
_tmpfiles_do_link() {
if [[ -e "${ED}/${tpath}" || -h "${ED}/${tpath}" ]]; then
return 0
fi
ln -s "${targ}" "${ED}/${tpath}" || return 1
}
_tmpfiles_do_create() {
local ttype tpath tmode tuid tgid tage targ trule
while read ttype tpath tmode tuid tgid tage targ; do
trule="$ttype $tpath $tmode $tuid $tgid $tage $targ"
[[ "${tuid}" == - ]] && tuid=root
[[ "${tgid}" == - ]] && tgid=root
case "${ttype}" in
f|F) _tmpfiles_do_file;;
d|D) _tmpfiles_do_dir;;
L) _tmpfiles_do_link;;
*) ewarn "Skipping tmpfiles rule: ${trule}";;
esac
if [[ $? -ne 0 ]]; then
eerror "Bad tmpfiles rule: ${trule}"
return 1
fi
done
}
tmpfiles_create() {
if [[ $# -eq 0 ]]; then
set -- "${ED}"/usr/lib*/tmpfiles.d/*.conf
fi
local conf
for conf in "$@"; do
_tmpfiles_do_create < "${conf}" || die "Bad tmpfiles config: ${conf}"
done
}

View File

@ -1,305 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Gobi firmware for Chromium OS
#
# @ECLASS-VARIABLE: GOBI_FIRMWARE_OEM
# @DESCRIPTION:
# OEM name for firmware to install
: ${GOBI_FIRMWARE_OEM:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_VID
# @DESCRIPTION:
# OEM Vendor ID
: ${GOBI_FIRMWARE_VID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_PID
# @DESCRIPTION:
# OEM Product ID
: ${GOBI_FIRMWARE_PID:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_CARRIERS
# @DESCRIPTION:
# Install firmware for this list of carrier numbers
: ${GOBI_FIRMWARE_CARRIERS:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_ZIP_FILE
# @DESCRIPTION:
# Filename of zip file containing firmware
: ${GOBI_FIRMWARE_ZIP_FILE:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_FLAVOR
# @DESCRIPTION:
# The flavor (gps, xtra) to install
: ${GOBI_FIRMWARE_FLAVOR:="gps"}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_QDL
# @DESCRIPTION:
# Install the qdl program from the firmware zip file
: ${GOBI_FIRMWARE_QDL:="no"}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_DEFAULT_CARRIER
# @DESCRIPTION:
# Default carrier firmware to load if not set on modem
: ${GOBI_FIRMWARE_DEFAULT_CARRIER:=}
# @ECLASS-VARIABLE: GOBI_FIRMWARE_APPS_DIR
# @DESCRIPTION:
# directory name for the .apps files
: ${GOBI_FIRMWARE_APPS_DIR:=""}
GOBI_FIRMWARE_CARRIER_VOD=0
GOBI_FIRMWARE_CARRIER_VZW=1
GOBI_FIRMWARE_CARRIER_ATT=2
GOBI_FIRMWARE_CARRIER_SPRINT=3
GOBI_FIRMWARE_CARRIER_TMO=4
GOBI_FIRMWARE_CARRIER_GEN=6
GOBI_FIRMWARE_CARRIER_TELLFON=7
GOBI_FIRMWARE_CARRIER_TELITAL=8
GOBI_FIRMWARE_CARRIER_ORANGE=9
GOBI_FIRMWARE_CARRIER_DOCO=12
GOBI_FIRMWARE_CARRIER_DELLX=15
GOBI_FIRMWARE_CARRIER_OMH=16
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
gobi-firmware_install_udev_qcserial_rules() {
local oem=${GOBI_FIRMWARE_OEM}
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local file=/etc/udev/rules.d/90-ttyusb-qcserial-${oem}.rules
cat > ${D}${file} <<EOF
# 90-ttyusb-qcserial-${oem}.rules
# Sets ownership of Gobi ttyusb devices belonging to qcserial.
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ACTION!="add", GOTO="ttyusb_qcserial_${oem}_end"
SUBSYSTEM!="tty", GOTO="ttyusb_qcserial_${oem}_end"
KERNEL!="ttyUSB[0-9]*", GOTO="ttyusb_qcserial_${oem}_end"
ATTRS{idVendor}=="${vid}", ATTRS{idProduct}=="${pid}", \
OWNER="qdlservice", GROUP="qdlservice"
LABEL="ttyusb_qcserial_${oem}_end"
EOF
}
gobi-firmware_install_udev_qdlservice_rules() {
local oem=${GOBI_FIRMWARE_OEM}
local vid=${GOBI_FIRMWARE_VID}
local pid=${GOBI_FIRMWARE_PID}
local file=/etc/udev/rules.d/99-qdlservice-${oem}.rules
cat > ${D}${file} <<EOF
# 99-qdlservice-${oem}.rules
# Emits a signal in response to a Gobi serial device appearing. Upstart will run
# QDLService when it sees this signal.
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ACTION!="add", GOTO="qdlservice_${oem}_end"
SUBSYSTEM!="tty", GOTO="qdlservice_${oem}_end"
KERNEL!="ttyUSB[0-9]*", GOTO="qdlservice_${oem}_end"
ATTRS{idVendor}=="${vid}", ATTRS{idProduct}=="${pid}", \
RUN+="/sbin/initctl emit gobi_serial_${oem} GOBIDEV=/dev/%k"
LABEL="qdlservice_${oem}_end"
EOF
}
gobi-firmware_install_udev_rules() {
dodir /etc/udev/rules.d
gobi-firmware_install_udev_qcserial_rules
gobi-firmware_install_udev_qdlservice_rules
}
gobi-firmware_install_upstart_scripts() {
dodir /etc/init
file=/etc/init/qdlservice-${GOBI_FIRMWARE_OEM}.conf
cat > ${D}${file} <<EOF
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Starts QDLService if a Gobi ttyusb device appears.
start on gobi_serial_${GOBI_FIRMWARE_OEM}
script
set +e
GOBIQDL="/opt/Qualcomm/QDLService2k/QDLService2k${GOBI_FIRMWARE_OEM}"
ret=1
attempt=0
readonly MAX_ATTEMPTS=10
while [ \$ret -ne 0 -a \$attempt -lt \$MAX_ATTEMPTS ]; do
# Exponential backoff - wait (2^attempt) - 1 seconds
sleep \$(((1 << \$attempt) - 1))
starttime=\$(date +%s%N)
/sbin/minijail0 -u qdlservice -g qdlservice -- "\$GOBIQDL" "\$GOBIDEV"
ret=\$?
endtime=\$(date +%s%N)
logger -t qdlservice "attempt \$attempt: \$ret"
attempt=\$((\$attempt + 1))
if [ \$ret -ne 0 ]; then
logger -t qdlservice "resetting..."
/opt/Qualcomm/bin/powercycle-all-gobis
fi
done
download_time=\$(((\$endtime - \$starttime) / 1000000))
METRICROOT=Network.3G.Gobi.FirmwareDownload
metrics_client \$METRICROOT.Time \$download_time 0 10000 20
metrics_client -e \$METRICROOT.Attempts \$attempt \$MAX_ATTEMPTS
exit \$ret
end script
EOF
}
gobi-firmware_install_firmware_files() {
local oem=${GOBI_FIRMWARE_OEM}
local install_qdl=${GOBI_FIRMWARE_QDL}
local apps_dir=${GOBI_FIRMWARE_APPS_DIR}
# If the apps directory is not sepcified, then use the carrier
# directory. The apps directory should be set to UMTS for most
# UMTS carriers because they share the same firmware
if [ -z "${apps_dir}" ] ; then
apps_dir=${GOBI_FIRMWARE_DEFAULT_CARRIER}
fi
#
# installation directories.
# We could consider installing to more standard locations
# except that QDLService expects to find files in
# /opt/Qualcomm.
#
local firmware_install_dir=${D}/opt/Qualcomm/Images2k
local qdl_install_dir=${D}/opt/Qualcomm/QDLService2k
local log_install_dir=${D}/var/log/
local oemlog_filename=QDLService2k${oem}.txt
local log_filename=QDLService2k.txt
if [ -d Images2k/${oem} ] ; then
# We already have the firmware extracted, this is easy
local base_firmware=Images2k/${oem}
# Do not install qdl it will be build with SDK
install_qdl="no"
else
[ -z "${GOBI_FIRMWARE_ZIP_FILE}" ] && \
die "Must specify GOBI_FIRMWARE_ZIP_FILE"
[ ! -r "${GOBI_FIRMWARE_ZIP_FILE}" ] && \
die "${GOBI_FIRMWARE_ZIP_FILE} is unreadable"
mkdir -p "${T}/${oem}"
unzip ${GOBI_FIRMWARE_ZIP_FILE} -d "${T}/${oem}"
if [ -d "${T}/${oem}/Images2k/${oem}" ] ; then
local base_firmware="${T}/${oem}/Images2k/${oem}"
install_qdl=no
else
rpmfile=$(find "${T}/${oem}" -name \*.rpm -print)
[ -z $rpmfile ] &&
die "Could not find an RPM file in ${GOBI_FIRMWARE_ZIP_FILE}"
# extract the rpm
if [ -d ${oem}_rpm ] ; then
rm -rf ${oem}_rpm
fi
mkdir -p ${oem}_rpm
rpm2tar -O $rpmfile | tar -C ${oem}_rpm -xvf -
local base_firmware=${oem}_rpm/opt/Qualcomm/Images2k/${oem}
fi
fi
# make directories
install -d ${firmware_install_dir}/${oem} \
${qdl_install_dir} ${udev_rules_install_dir}
# install firmware
local flavor_firmware=${base_firmware}_${GOBI_FIRMWARE_FLAVOR}
for carrier in ${GOBI_FIRMWARE_CARRIERS} UMTS ; do
# copy the base firmware
cp -af ${base_firmware}/${carrier} ${firmware_install_dir}/${oem}
if [ -d ${flavor_firmware}/${carrier} ] ; then
# overlay spefic xtra/gps flavor files
cp -af ${flavor_firmware}/${carrier} ${firmware_install_dir}/${oem}
fi
done
# Copy DID file for this device
cp ${base_firmware}/*.did ${firmware_install_dir}/${oem}
# Create a DID file for generic GOBI devices
did_file=$(ls ${base_firmware}/*.did | head -n1)
if [ ! -x $did_file ] ; then
# TODO(jglasgow): Move code for 05c6920b to dogfood ebuild
cp $did_file ${firmware_install_dir}/${oem}/05c6920b.did
fi
# Set firmware and directory permissions
find ${firmware_install_dir}/${oem} -type f -exec chmod 444 {} \;
find ${firmware_install_dir}/${oem} -type d -exec chmod 555 {} \;
# install firmware download program, and associated files
if [ ${install_qdl} == "yes" ] ; then
local qdl_dir=${oem}_rpm/opt/Qualcomm/QDLService2k
install -t ${qdl_install_dir} \
${qdl_dir}/QDLService2k${oem}
ln -sf /opt/Qualcomm/QDLService2k/QDLService2k${oem} \
${qdl_install_dir}/QDLService2kGeneric
fi
# Ensure the default firmware files exists and create Options${oem}.txt
local image_dir=/opt/Qualcomm/Images2k/${oem}
local amss_file=${image_dir}/${apps_dir}/amss.mbn
local apps_file=${image_dir}/${apps_dir}/apps.mbn
local uqcn_file=${image_dir}/${GOBI_FIRMWARE_DEFAULT_CARRIER}/uqcn.mbn
for file in $amss_file $apps_file $uqcn_file ; do
if [ ! -r ${D}${file} ] ; then
die "Could not find file: $file in ${D}"
fi
done
cat > Options2k${oem}.txt <<EOF
${amss_file}
${apps_file}
${uqcn_file}
EOF
install -t ${qdl_install_dir} Options2k${oem}.txt
}
gobi-firmware_src_install() {
# Verify that eclass variables are set
[ -z "${GOBI_FIRMWARE_DEFAULT_CARRIER}" ] && \
die "Must specify GOBI_FIRMWARE_DEFAULT_CARRIER"
[ -z "${GOBI_FIRMWARE_OEM}" ] && \
die "Must specify GOBI_FIRMWARE_OEM"
[ -z "${GOBI_FIRMWARE_VID}" ] && \
die "Must specify GOBI_FIRMWARE_VID"
[ -z "${GOBI_FIRMWARE_PID}" ] && \
die "Must specify GOBI_FIRMWARE_PID"
[ -z "${GOBI_FIRMWARE_CARRIERS}" ] &&
die "Must specify GOBI_FIRMWARE_CARRIERS"
gobi-firmware_install_udev_rules
gobi-firmware_install_upstart_scripts
gobi-firmware_install_firmware_files
}
EXPORT_FUNCTIONS src_install

File diff suppressed because it is too large Load Diff

View File

@ -1,525 +0,0 @@
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/subversion.eclass,v 1.67 2009/05/10 20:33:38 arfrever Exp $
# @ECLASS: subversion.eclass
# @MAINTAINER:
# Akinori Hattori <hattya@gentoo.org>
# Bo Ørsted Andresen <zlin@gentoo.org>
# Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
#
# Original Author: Akinori Hattori <hattya@gentoo.org>
#
# @BLURB: The subversion eclass is written to fetch software sources from subversion repositories
# @DESCRIPTION:
# The subversion eclass provides functions to fetch, patch and bootstrap
# software sources from subversion repositories.
inherit eutils
ESVN="${ECLASS}"
case "${EAPI:-0}" in
0|1)
EXPORT_FUNCTIONS src_unpack pkg_preinst
;;
*)
EXPORT_FUNCTIONS src_unpack src_prepare pkg_preinst
;;
esac
DESCRIPTION="Based on the ${ECLASS} eclass"
SUBVERSION_DEPEND="dev-vcs/subversion[webdav-neon,webdav-serf]
net-misc/rsync"
if [[ -z "${ESVN_DISABLE_DEPENDENCIES}" ]]; then
DEPEND="${SUBVERSION_DEPEND}"
fi
# @ECLASS-VARIABLE: ESVN_STORE_DIR
# @DESCRIPTION:
# subversion sources store directory. Users may override this in /etc/make.conf
if [[ -z ${ESVN_STORE_DIR} ]]; then
ESVN_STORE_DIR="${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/svn-src"
# Pick a directory with the same permissions now and in the future. Note
# that we cannot just use USERNAME because the eventual effective user when
# doing the svn commands may change - PORTAGE_USERNAME has not taken effect
# yet. Further complicating things, if features userpriv is not set,
# PORTAGE_USERNAME is going to be ignored. We assume that if we enable
# userpriv in the future, we will also set PORTAGE_USERNAME to something
# other than "portage".
# TODO: remove this once we are using consistent users and userpriv settings
# for emerge and emerge-${BOARD}.
ESVN_STORE_DIR="${ESVN_STORE_DIR}/${PORTAGE_USERNAME:-portage}"
fi
# @ECLASS-VARIABLE: ESVN_FETCH_CMD
# @DESCRIPTION:
# subversion checkout command
ESVN_FETCH_CMD="svn checkout"
# @ECLASS-VARIABLE: ESVN_UPDATE_CMD
# @DESCRIPTION:
# subversion update command
ESVN_UPDATE_CMD="svn update"
# @ECLASS-VARIABLE: ESVN_SWITCH_CMD
# @DESCRIPTION:
# subversion switch command
ESVN_SWITCH_CMD="svn switch"
# @ECLASS-VARIABLE: ESVN_OPTIONS
# @DESCRIPTION:
# the options passed to checkout or update. If you want a specific revision see
# ESVN_REPO_URI instead of using -rREV.
ESVN_OPTIONS="${ESVN_OPTIONS:-}"
# @ECLASS-VARIABLE: ESVN_REPO_URI
# @DESCRIPTION:
# repository uri
#
# e.g. http://foo/trunk, svn://bar/trunk, svn://bar/branch/foo@1234
#
# supported protocols:
# http://
# https://
# svn://
# svn+ssh://
#
# to peg to a specific revision, append @REV to the repo's uri
ESVN_REPO_URI="${ESVN_REPO_URI:-}"
# @ECLASS-VARIABLE: ESVN_REVISION
# @DESCRIPTION:
# User configurable revision checkout or update to from the repository
#
# Useful for live svn or trunk svn ebuilds allowing the user to peg
# to a specific revision
#
# Note: This should never be set in an ebuild!
ESVN_REVISION="${ESVN_REVISION:-}"
# @ECLASS-VARIABLE: ESVN_USER
# @DESCRIPTION:
# User name
ESVN_USER="${ESVN_USER:-}"
# @ECLASS-VARIABLE: ESVN_PASSWORD
# @DESCRIPTION:
# Password
ESVN_PASSWORD="${ESVN_PASSWORD:-}"
# @ECLASS-VARIABLE: ESVN_PROJECT
# @DESCRIPTION:
# project name of your ebuild (= name space)
#
# subversion eclass will check out the subversion repository like:
#
# ${ESVN_STORE_DIR}/${ESVN_PROJECT}/${ESVN_REPO_URI##*/}
#
# so if you define ESVN_REPO_URI as http://svn.collab.net/repo/svn/trunk or
# http://svn.collab.net/repo/svn/trunk/. and PN is subversion-svn.
# it will check out like:
#
# ${ESVN_STORE_DIR}/subversion/trunk
#
# this is not used in order to declare the name of the upstream project.
# so that you can declare this like:
#
# # jakarta commons-loggin
# ESVN_PROJECT=commons/logging
#
# default: ${PN/-svn}.
ESVN_PROJECT="${ESVN_PROJECT:-${PN/-svn}}"
# @ECLASS-VARIABLE: ESVN_BOOTSTRAP
# @DESCRIPTION:
# bootstrap script or command like autogen.sh or etc..
ESVN_BOOTSTRAP="${ESVN_BOOTSTRAP:-}"
# @ECLASS-VARIABLE: ESVN_PATCHES
# @DESCRIPTION:
# subversion eclass can apply patches in subversion_bootstrap().
# you can use regexp in this variable like *.diff or *.patch or etc.
# NOTE: patches will be applied before ESVN_BOOTSTRAP is processed.
#
# Patches are searched both in ${PWD} and ${FILESDIR}, if not found in either
# location, the installation dies.
ESVN_PATCHES="${ESVN_PATCHES:-}"
# @ECLASS-VARIABLE: ESVN_RESTRICT
# @DESCRIPTION:
# this should be a space delimited list of subversion eclass features to
# restrict.
# export)
# don't export the working copy to S.
ESVN_RESTRICT="${ESVN_RESTRICT:-}"
# @ECLASS-VARIABLE: ESVN_DISABLE_DEPENDENCIES
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic inclusion of
# Subversion in dependencies.
ESVN_DISABLE_DEPENDENCIES="${ESVN_DISABLE_DEPENDENCIES:-}"
# @ECLASS-VARIABLE: ESVN_OFFLINE
# @DESCRIPTION:
# Set this variable to a non-empty value to disable the automatic updating of
# an svn source tree. This is intended to be set outside the subversion source
# tree by users.
ESVN_OFFLINE="${ESVN_OFFLINE:-${ESCM_OFFLINE}}"
# @ECLASS-VARIABLE: ESVN_UP_FREQ
# @DESCRIPTION:
# Set the minimum number of hours between svn up'ing in any given svn module. This is particularly
# useful for split KDE ebuilds where we want to ensure that all submodules are compiled for the same
# revision. It should also be kept user overrideable.
ESVN_UP_FREQ="${ESVN_UP_FREQ:=}"
# @ECLASS-VARIABLE: ESCM_LOGDIR
# @DESCRIPTION:
# User configuration variable. If set to a path such as e.g. /var/log/scm any
# package inheriting from subversion.eclass will record svn revision to
# ${CATEGORY}/${PN}.log in that path in pkg_preinst. This is not supposed to be
# set by ebuilds/eclasses. It defaults to empty so users need to opt in.
ESCM_LOGDIR="${ESCM_LOGDIR:=}"
# @FUNCTION: subversion_fetch
# @USAGE: [repo_uri] [destination]
# @DESCRIPTION:
# Wrapper function to fetch sources from subversion via svn checkout or svn update,
# depending on whether there is an existing working copy in ${ESVN_STORE_DIR}.
#
# Can take two optional parameters:
# repo_uri - a repository URI. default is ESVN_REPO_URI.
# destination - a check out path in S.
subversion_fetch() {
local repo_uri="$(subversion__get_repository_uri "${1:-${ESVN_REPO_URI}}")"
local revision="$(subversion__get_peg_revision "${1:-${ESVN_REPO_URI}}")"
local S_dest="${2}"
if [[ -z ${repo_uri} ]]; then
die "${ESVN}: ESVN_REPO_URI (or specified URI) is empty."
fi
[[ -n "${ESVN_REVISION}" ]] && revision="${ESVN_REVISION}"
# check for the protocol
local protocol="${repo_uri%%:*}"
case "${protocol}" in
http|https)
;;
svn|svn+ssh)
;;
*)
die "${ESVN}: fetch from '${protocol}' is not yet implemented."
;;
esac
addread "/etc/subversion"
addwrite "${ESVN_STORE_DIR}"
# Also make the /var/lib/portage/distfiles/svn-src directory writeable in sandbox
# so we can create it if necessary.
addwrite "$(dirname ${ESVN_STORE_DIR})"
if [[ ! -d ${ESVN_STORE_DIR} ]]; then
debug-print "${FUNCNAME}: initial checkout. creating subversion directory"
mkdir -p "${ESVN_STORE_DIR}" || die "${ESVN}: can't mkdir ${ESVN_STORE_DIR}."
fi
cd "${ESVN_STORE_DIR}" || die "${ESVN}: can't chdir to ${ESVN_STORE_DIR}"
local wc_path="$(subversion__get_wc_path "${repo_uri}")"
local options="${ESVN_OPTIONS} --config-dir ${ESVN_STORE_DIR}/.subversion"
[[ -n "${revision}" ]] && options="${options} -r ${revision}"
if [[ "${ESVN_OPTIONS}" = *-r* ]]; then
ewarn "\${ESVN_OPTIONS} contains -r, this usage is unsupported. Please"
ewarn "see \${ESVN_REPO_URI}"
fi
if has_version ">=dev-vcs/subversion-1.6.0"; then
options="${options} --config-option=config:auth:password-stores="
fi
debug-print "${FUNCNAME}: wc_path = \"${wc_path}\""
debug-print "${FUNCNAME}: ESVN_OPTIONS = \"${ESVN_OPTIONS}\""
debug-print "${FUNCNAME}: options = \"${options}\""
if [[ ! -d ${wc_path}/.svn ]]; then
if [[ -n ${ESVN_OFFLINE} ]]; then
ewarn "ESVN_OFFLINE cannot be used when there is no existing checkout."
fi
# first check out
einfo "subversion check out start -->"
einfo " repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_FETCH_CMD} ${options} ${repo_uri}"
mkdir -p "${ESVN_PROJECT}" || die "${ESVN}: can't mkdir ${ESVN_PROJECT}."
cd "${ESVN_PROJECT}" || die "${ESVN}: can't chdir to ${ESVN_PROJECT}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_FETCH_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" "${repo_uri}" || die "${ESVN}: can't fetch to ${wc_path} from ${repo_uri}."
else
${ESVN_FETCH_CMD} ${options} "${repo_uri}" || die "${ESVN}: can't fetch to ${wc_path} from ${repo_uri}."
fi
elif [[ -n ${ESVN_OFFLINE} ]]; then
svn upgrade "${wc_path}" &>/dev/null
svn cleanup "${wc_path}" &>/dev/null
subversion_wc_info "${repo_uri}" || die "${ESVN}: unknown problem occurred while accessing working copy."
if [[ -n ${ESVN_REVISION} && ${ESVN_REVISION} != ${ESVN_WC_REVISION} ]]; then
die "${ESVN}: You requested off-line updating and revision ${ESVN_REVISION} but only revision ${ESVN_WC_REVISION} is available locally."
fi
einfo "Fetching disabled: Using existing repository copy at revision ${ESVN_WC_REVISION}."
else
svn upgrade "${wc_path}" &>/dev/null
svn cleanup "${wc_path}" &>/dev/null
subversion_wc_info "${repo_uri}" || die "${ESVN}: unknown problem occurred while accessing working copy."
local esvn_up_freq=
if [[ -n ${ESVN_UP_FREQ} ]]; then
if [[ -n ${ESVN_UP_FREQ//[[:digit:]]} ]]; then
die "${ESVN}: ESVN_UP_FREQ must be an integer value corresponding to the minimum number of hours between svn up."
elif [[ -z $(find "${wc_path}/.svn/entries" -mmin "+$((ESVN_UP_FREQ*60))") ]]; then
einfo "Fetching disabled since ${ESVN_UP_FREQ} hours has not passed since last update."
einfo "Using existing repository copy at revision ${ESVN_WC_REVISION}."
esvn_up_freq=no_update
fi
fi
if [[ -z ${esvn_up_freq} ]]; then
if [[ ${ESVN_WC_URL} != $(subversion__get_repository_uri "${repo_uri}") ]]; then
einfo "subversion switch start -->"
einfo " old repository: ${ESVN_WC_URL}@${ESVN_WC_REVISION}"
einfo " new repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_SWITCH_CMD} ${options} ${repo_uri}"
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_SWITCH_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" ${repo_uri} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
else
${ESVN_SWITCH_CMD} ${options} ${repo_uri} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
fi
else
# update working copy
einfo "subversion update start -->"
einfo " repository: ${repo_uri}${revision:+@}${revision}"
debug-print "${FUNCNAME}: ${ESVN_UPDATE_CMD} ${options}"
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
if [[ -n "${ESVN_USER}" ]]; then
${ESVN_UPDATE_CMD} ${options} --username "${ESVN_USER}" --password "${ESVN_PASSWORD}" || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
else
${ESVN_UPDATE_CMD} ${options} || die "${ESVN}: can't update ${wc_path} from ${repo_uri}."
fi
fi
fi
fi
einfo " working copy: ${wc_path}"
if ! has "export" ${ESVN_RESTRICT}; then
cd "${wc_path}" || die "${ESVN}: can't chdir to ${wc_path}"
local S="${S}/${S_dest}"
mkdir -p "${S}"
# export to the ${WORKDIR}
#* "svn export" has a bug. see http://bugs.gentoo.org/119236
#* svn export . "${S}" || die "${ESVN}: can't export to ${S}."
rsync -rlpgo --exclude=".svn/" . "${S}" || die "${ESVN}: can't export to ${S}."
fi
echo
}
# @FUNCTION: subversion_bootstrap
# @DESCRIPTION:
# Apply patches in ${ESVN_PATCHES} and run ${ESVN_BOOTSTRAP} if specified.
subversion_bootstrap() {
if has "export" ${ESVN_RESTRICT}; then
return
fi
cd "${S}"
if [[ -n ${ESVN_PATCHES} ]]; then
einfo "apply patches -->"
local patch fpatch
for patch in ${ESVN_PATCHES}; do
if [[ -f ${patch} ]]; then
epatch "${patch}"
else
for fpatch in ${FILESDIR}/${patch}; do
if [[ -f ${fpatch} ]]; then
epatch "${fpatch}"
else
die "${ESVN}: ${patch} not found"
fi
done
fi
done
echo
fi
if [[ -n ${ESVN_BOOTSTRAP} ]]; then
einfo "begin bootstrap -->"
if [[ -f ${ESVN_BOOTSTRAP} && -x ${ESVN_BOOTSTRAP} ]]; then
einfo " bootstrap with a file: ${ESVN_BOOTSTRAP}"
eval "./${ESVN_BOOTSTRAP}" || die "${ESVN}: can't execute ESVN_BOOTSTRAP."
else
einfo " bootstrap with command: ${ESVN_BOOTSTRAP}"
eval "${ESVN_BOOTSTRAP}" || die "${ESVN}: can't eval ESVN_BOOTSTRAP."
fi
fi
}
# @FUNCTION: subversion_src_unpack
# @DESCRIPTION:
# Default src_unpack. Fetch and, in older EAPIs, bootstrap.
subversion_src_unpack() {
subversion_fetch || die "${ESVN}: unknown problem occurred in subversion_fetch."
if has "${EAPI:-0}" 0 1; then
subversion_bootstrap || die "${ESVN}: unknown problem occurred in subversion_bootstrap."
fi
}
# @FUNCTION: subversion_src_prepare
# @DESCRIPTION:
# Default src_prepare. Bootstrap.
subversion_src_prepare() {
subversion_bootstrap || die "${ESVN}: unknown problem occurred in subversion_bootstrap."
}
# @FUNCTION: subversion_wc_info
# @USAGE: [repo_uri]
# @RETURN: ESVN_WC_URL, ESVN_WC_ROOT, ESVN_WC_UUID, ESVN_WC_REVISION and ESVN_WC_PATH
# @DESCRIPTION:
# Get svn info for the specified repo_uri. The default repo_uri is ESVN_REPO_URI.
#
# The working copy information on the specified repository URI are set to
# ESVN_WC_* variables.
subversion_wc_info() {
local repo_uri="$(subversion__get_repository_uri "${1:-${ESVN_REPO_URI}}")"
local wc_path="$(subversion__get_wc_path "${repo_uri}")"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
debug-print "${FUNCNAME}: wc_path = ${wc_path}"
if [[ ! -d ${wc_path} ]]; then
return 1
fi
export ESVN_WC_URL="$(subversion__svn_info "${wc_path}" "URL")"
export ESVN_WC_ROOT="$(subversion__svn_info "${wc_path}" "Repository Root")"
export ESVN_WC_UUID="$(subversion__svn_info "${wc_path}" "Repository UUID")"
export ESVN_WC_REVISION="$(subversion__svn_info "${wc_path}" "Revision")"
export ESVN_WC_PATH="${wc_path}"
}
## -- Private Functions
## -- subversion__svn_info() ------------------------------------------------- #
#
# param $1 - a target.
# param $2 - a key name.
#
subversion__svn_info() {
local target="${1}"
local key="${2}"
env LC_ALL=C svn info "${target}" | grep -i "^${key}" | cut -d" " -f2-
}
## -- subversion__get_repository_uri() --------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_repository_uri() {
local repo_uri="${1}"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
if [[ -z ${repo_uri} ]]; then
die "${ESVN}: ESVN_REPO_URI (or specified URI) is empty."
fi
# delete trailing slash
if [[ -z ${repo_uri##*/} ]]; then
repo_uri="${repo_uri%/}"
fi
repo_uri="${repo_uri%@*}"
echo "${repo_uri}"
}
## -- subversion__get_wc_path() ---------------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_wc_path() {
local repo_uri="$(subversion__get_repository_uri "${1}")"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
echo "${ESVN_STORE_DIR}/${ESVN_PROJECT}/${repo_uri##*/}"
}
## -- subversion__get_peg_revision() ----------------------------------------- #
#
# param $1 - a repository URI.
subversion__get_peg_revision() {
local repo_uri="${1}"
debug-print "${FUNCNAME}: repo_uri = ${repo_uri}"
# repo_uri has peg revision ?
if [[ ${repo_uri} != *@* ]]; then
debug-print "${FUNCNAME}: repo_uri does not have a peg revision."
fi
local peg_rev=
[[ ${repo_uri} = *@* ]] && peg_rev="${repo_uri##*@}"
debug-print "${FUNCNAME}: peg_rev = ${peg_rev}"
echo "${peg_rev}"
}
# @FUNCTION: subversion_pkg_preinst
# @USAGE: [repo_uri]
# @DESCRIPTION:
# Log the svn revision of source code. Doing this in pkg_preinst because we
# want the logs to stick around if packages are uninstalled without messing with
# config protection.
subversion_pkg_preinst() {
local pkgdate=$(date "+%Y%m%d %H:%M:%S")
subversion_wc_info "${1:-${ESVN_REPO_URI}}"
if [[ -n ${ESCM_LOGDIR} ]]; then
local dir="${ROOT}/${ESCM_LOGDIR}/${CATEGORY}"
if [[ ! -d ${dir} ]]; then
mkdir -p "${dir}" || \
eerror "Failed to create '${dir}' for logging svn revision to '${PORTDIR_SCM}'"
fi
local logmessage="svn: ${pkgdate} - ${PF}:${SLOT} was merged at revision ${ESVN_WC_REVISION}"
if [[ -d ${dir} ]]; then
echo "${logmessage}" >> "${dir}/${PN}.log"
else
eerror "Could not log the message '${logmessage}' to '${dir}/${PN}.log'"
fi
fi
}

View File

@ -1,84 +0,0 @@
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
#
# Original Author: The Chromium OS Authors <chromium-os-dev@chromium.org>
# Purpose: Install Tegra BCT files for firmware construction.
#
# @ECLASS-VARIABLE: TEGRA_BCT_SDRAM_CONFIG
# @DESCRIPTION:
# SDRAM memory timing configuration file to install
: ${TEGRA_BCT_SDRAM_CONFIG:=}
# @ECLASS-VARIABLE: TEGRA_BCT_FLASH_CONFIG
# @DESCRIPTION:
# Flash memory configuration file to install
: ${TEGRA_BCT_FLASH_CONFIG:=}
# @ECLASS-VARIABLE: TEGRA_BCT_CHIP_FAMILY
# @DESCRIPTION:
# Family of Tegra chip (determines BCT configuration)
: ${TEGRA_BCT_CHIP_FAMILY:=t25}
# Check for EAPI 2+
case "${EAPI:-0}" in
4|3|2) ;;
*) die "unsupported EAPI" ;;
esac
tegra-bct_src_configure() {
local sdram_file=${FILESDIR}/${TEGRA_BCT_SDRAM_CONFIG}
local flash_file=${FILESDIR}/${TEGRA_BCT_FLASH_CONFIG}
if [ -z "${TEGRA_BCT_SDRAM_CONFIG}" ]; then
die "No SDRAM configuration file selected."
fi
if [ -z "${TEGRA_BCT_FLASH_CONFIG}" ]; then
die "No flash configuration file selected."
fi
if [ -z "${TEGRA_BCT_CHIP_FAMILY}" ]; then
die "No chip family selected."
fi
einfo "Using sdram config file: ${sdram_file}"
einfo "Using flash config file: ${flash_file}"
einfo "Using chip family : ${TEGRA_BCT_CHIP_FAMILY}"
cat ${flash_file} > board.cfg ||
die "Failed to read flash config file."
cat ${sdram_file} >> board.cfg ||
die "Failed to read SDRAM config file."
}
tegra-bct_src_compile() {
local chip_family="-${TEGRA_BCT_CHIP_FAMILY}"
cbootimage -gbct $chip_family board.cfg board.bct ||
die "Failed to generate BCT."
}
tegra-bct_src_install() {
local sdram_file=${FILESDIR}/${TEGRA_BCT_SDRAM_CONFIG}
local flash_file=${FILESDIR}/${TEGRA_BCT_FLASH_CONFIG}
insinto /firmware/bct
doins "${sdram_file}"
doins "${flash_file}"
if [ "$(basename ${sdram_file})" != "sdram.cfg" ]; then
dosym "$(basename ${sdram_file})" /firmware/bct/sdram.cfg
fi
if [ "$(basename ${flash_file})" != "flash.cfg" ]; then
dosym "$(basename ${flash_file})" /firmware/bct/flash.cfg
fi
doins board.cfg
doins board.bct
}
EXPORT_FUNCTIONS src_configure src_compile src_install

View File

@ -1,50 +0,0 @@
#!/bin/bash
source tests-common.sh
inherit appid
valid_uuids=(
'{01234567-89AB-CDEF-0123-456789ABCDEF}'
'{11111111-1111-1111-1111-111111111111}'
'{DDDDDDDD-DDDD-DDDD-DDDD-DDDDDDDDDDDD}'
$(grep -hs doappid ../../../../{private-,}overlays/overlay-*/chromeos-base/chromeos-bsp-*/*.ebuild | \
gawk '{print gensub(/"/, "", "g", $2)}')
)
invalid_uuids=(
''
'01234567-89AB-CDEF-0123-4567-89ABCDEF0123'
' {01234567-89AB-CDEF-0123-4567-89ABCDEF0123} '
' {01234567-89AB-CDEF-0123-4567-89ABCDEF0123}'
'{01234567-89AB-CDEF-0123-4567-89ABCDEF0123} '
'{01234567-89AB-CDEF-0123-4567-89abcDEF0123}'
'{GGGGGGGG-GGGG-GGGG-GGGG-GGGG-GGGGGGGGGGGG}'
)
tbegin "no args"
! (doappid) >&/dev/null
tend $?
tbegin "too many args"
! (doappid "${valid_uuids[0]}" 1234) >&/dev/null
tend $?
tbegin "invalid appids"
for uuid in "${invalid_uuids[@]}" ; do
if (doappid "${uuid}") >&/dev/null ; then
tend 1 "not caught: ${uuid}"
fi
rm -rf "${D}"
done
tend $?
tbegin "valid appids"
for uuid in "${valid_uuids[@]}" ; do
if ! (doappid "${uuid}") ; then
tend 1 "not accepted: ${uuid}"
fi
rm -rf "${D}"
done
tend $?
texit

View File

@ -1 +0,0 @@
source ../../../portage-stable/eclass/tests/tests-common.sh

View File

@ -1,787 +0,0 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/eclass/toolchain-funcs.eclass,v 1.120 2012/12/29 05:08:54 vapier Exp $
# @ECLASS: toolchain-funcs.eclass
# @MAINTAINER:
# Toolchain Ninjas <toolchain@gentoo.org>
# @BLURB: functions to query common info about the toolchain
# @DESCRIPTION:
# The toolchain-funcs aims to provide a complete suite of functions
# for gleaning useful information about the toolchain and to simplify
# ugly things like cross-compiling and multilib. All of this is done
# in such a way that you can rely on the function always returning
# something sane.
if [[ ${___ECLASS_ONCE_TOOLCHAIN_FUNCS} != "recur -_+^+_- spank" ]] ; then
___ECLASS_ONCE_TOOLCHAIN_FUNCS="recur -_+^+_- spank"
inherit multilib binutils-funcs
# tc-getPROG <VAR [search vars]> <default> [tuple]
_tc-getPROG() {
local tuple=$1
local v var vars=$2
local prog=$3
var=${vars%% *}
for v in ${vars} ; do
if [[ -n ${!v} ]] ; then
export ${var}="${!v}"
echo "${!v}"
return 0
fi
done
local search=
[[ -n $4 ]] && search=$(type -p "$4-${prog}")
[[ -z ${search} && -n ${!tuple} ]] && search=$(type -p "${!tuple}-${prog}")
[[ -n ${search} ]] && prog=${search##*/}
export ${var}=${prog}
echo "${!var}"
}
tc-getBUILD_PROG() { _tc-getPROG CBUILD "BUILD_$1 $1_FOR_BUILD HOST$1" "${@:2}"; }
tc-getPROG() { _tc-getPROG CHOST "$@"; }
# @FUNCTION: tc-getAR
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver
tc-getAR() { tc-getPROG AR ar "$@"; }
# @FUNCTION: tc-getAS
# @USAGE: [toolchain prefix]
# @RETURN: name of the assembler
tc-getAS() { tc-getPROG AS as "$@"; }
# @FUNCTION: tc-getCC
# @USAGE: [toolchain prefix]
# @RETURN: name of the C compiler
tc-getCC() { tc-getPROG CC gcc "$@"; }
# @FUNCTION: tc-getCPP
# @USAGE: [toolchain prefix]
# @RETURN: name of the C preprocessor
tc-getCPP() { tc-getPROG CPP cpp "$@"; }
# @FUNCTION: tc-getCXX
# @USAGE: [toolchain prefix]
# @RETURN: name of the C++ compiler
tc-getCXX() { tc-getPROG CXX g++ "$@"; }
# @FUNCTION: tc-getLD
# @USAGE: [toolchain prefix]
# @RETURN: name of the linker
tc-getLD() { tc-getPROG LD ld "$@"; }
# @FUNCTION: tc-getSTRIP
# @USAGE: [toolchain prefix]
# @RETURN: name of the strip program
tc-getSTRIP() { tc-getPROG STRIP strip "$@"; }
# @FUNCTION: tc-getNM
# @USAGE: [toolchain prefix]
# @RETURN: name of the symbol/object thingy
tc-getNM() { tc-getPROG NM nm "$@"; }
# @FUNCTION: tc-getRANLIB
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver indexer
tc-getRANLIB() { tc-getPROG RANLIB ranlib "$@"; }
# @FUNCTION: tc-getOBJCOPY
# @USAGE: [toolchain prefix]
# @RETURN: name of the object copier
tc-getOBJCOPY() { tc-getPROG OBJCOPY objcopy "$@"; }
# @FUNCTION: tc-getF77
# @USAGE: [toolchain prefix]
# @RETURN: name of the Fortran 77 compiler
tc-getF77() { tc-getPROG F77 gfortran "$@"; }
# @FUNCTION: tc-getFC
# @USAGE: [toolchain prefix]
# @RETURN: name of the Fortran 90 compiler
tc-getFC() { tc-getPROG FC gfortran "$@"; }
# @FUNCTION: tc-getGCJ
# @USAGE: [toolchain prefix]
# @RETURN: name of the java compiler
tc-getGCJ() { tc-getPROG GCJ gcj "$@"; }
# @FUNCTION: tc-getPKG_CONFIG
# @USAGE: [toolchain prefix]
# @RETURN: name of the pkg-config tool
tc-getPKG_CONFIG() { tc-getPROG PKG_CONFIG pkg-config "$@"; }
# @FUNCTION: tc-getRC
# @USAGE: [toolchain prefix]
# @RETURN: name of the Windows resource compiler
tc-getRC() { tc-getPROG RC windres "$@"; }
# @FUNCTION: tc-getDLLWRAP
# @USAGE: [toolchain prefix]
# @RETURN: name of the Windows dllwrap utility
tc-getDLLWRAP() { tc-getPROG DLLWRAP dllwrap "$@"; }
# @FUNCTION: tc-getBUILD_AR
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver for building binaries to run on the build machine
tc-getBUILD_AR() { tc-getBUILD_PROG AR ar "$@"; }
# @FUNCTION: tc-getBUILD_AS
# @USAGE: [toolchain prefix]
# @RETURN: name of the assembler for building binaries to run on the build machine
tc-getBUILD_AS() { tc-getBUILD_PROG AS as "$@"; }
# @FUNCTION: tc-getBUILD_CC
# @USAGE: [toolchain prefix]
# @RETURN: name of the C compiler for building binaries to run on the build machine
tc-getBUILD_CC() { tc-getBUILD_PROG CC gcc "$@"; }
# @FUNCTION: tc-getBUILD_CPP
# @USAGE: [toolchain prefix]
# @RETURN: name of the C preprocessor for building binaries to run on the build machine
tc-getBUILD_CPP() { tc-getBUILD_PROG CPP cpp "$@"; }
# @FUNCTION: tc-getBUILD_CXX
# @USAGE: [toolchain prefix]
# @RETURN: name of the C++ compiler for building binaries to run on the build machine
tc-getBUILD_CXX() { tc-getBUILD_PROG CXX g++ "$@"; }
# @FUNCTION: tc-getBUILD_LD
# @USAGE: [toolchain prefix]
# @RETURN: name of the linker for building binaries to run on the build machine
tc-getBUILD_LD() { tc-getBUILD_PROG LD ld "$@"; }
# @FUNCTION: tc-getBUILD_STRIP
# @USAGE: [toolchain prefix]
# @RETURN: name of the strip program for building binaries to run on the build machine
tc-getBUILD_STRIP() { tc-getBUILD_PROG STRIP strip "$@"; }
# @FUNCTION: tc-getBUILD_NM
# @USAGE: [toolchain prefix]
# @RETURN: name of the symbol/object thingy for building binaries to run on the build machine
tc-getBUILD_NM() { tc-getBUILD_PROG NM nm "$@"; }
# @FUNCTION: tc-getBUILD_RANLIB
# @USAGE: [toolchain prefix]
# @RETURN: name of the archiver indexer for building binaries to run on the build machine
tc-getBUILD_RANLIB() { tc-getBUILD_PROG RANLIB ranlib "$@"; }
# @FUNCTION: tc-getBUILD_OBJCOPY
# @USAGE: [toolchain prefix]
# @RETURN: name of the object copier for building binaries to run on the build machine
tc-getBUILD_OBJCOPY() { tc-getBUILD_PROG OBJCOPY objcopy "$@"; }
# @FUNCTION: tc-getBUILD_PKG_CONFIG
# @USAGE: [toolchain prefix]
# @RETURN: name of the pkg-config tool for building binaries to run on the build machine
tc-getBUILD_PKG_CONFIG() { tc-getBUILD_PROG PKG_CONFIG pkg-config "$@"; }
# @FUNCTION: tc-export
# @USAGE: <list of toolchain variables>
# @DESCRIPTION:
# Quick way to export a bunch of compiler vars at once.
tc-export() {
local var
for var in "$@" ; do
[[ $(type -t tc-get${var}) != "function" ]] && die "tc-export: invalid export variable '${var}'"
eval tc-get${var} > /dev/null
done
}
# @FUNCTION: tc-is-cross-compiler
# @RETURN: Shell true if we are using a cross-compiler, shell false otherwise
tc-is-cross-compiler() {
return $([[ ${CBUILD:-${CHOST}} != ${CHOST} ]])
}
# @FUNCTION: tc-is-softfloat
# @DESCRIPTION:
# See if this toolchain is a softfloat based one.
# @CODE
# The possible return values:
# - only: the target is always softfloat (never had fpu)
# - yes: the target should support softfloat
# - softfp: (arm specific) the target should use hardfloat insns, but softfloat calling convention
# - no: the target doesn't support softfloat
# @CODE
# This allows us to react differently where packages accept
# softfloat flags in the case where support is optional, but
# rejects softfloat flags where the target always lacks an fpu.
tc-is-softfloat() {
local CTARGET=${CTARGET:-${CHOST}}
case ${CTARGET} in
bfin*|h8300*)
echo "only" ;;
*)
if [[ ${CTARGET//_/-} == *-softfloat-* ]] ; then
echo "yes"
elif [[ ${CTARGET//_/-} == *-softfp-* ]] ; then
echo "softfp"
else
echo "no"
fi
;;
esac
}
# @FUNCTION: tc-is-static-only
# @DESCRIPTION:
# Return shell true if the target does not support shared libs, shell false
# otherwise.
tc-is-static-only() {
local host=${CTARGET:-${CHOST}}
# *MiNT doesn't have shared libraries, only platform so far
return $([[ ${host} == *-mint* ]])
}
# @FUNCTION: tc-export_build_env
# @USAGE: [compiler variables]
# @DESCRIPTION:
# Export common build related compiler settings.
tc-export_build_env() {
tc-export "$@"
: ${BUILD_CFLAGS:=-O1 -pipe}
: ${BUILD_CXXFLAGS:=-O1 -pipe}
: ${BUILD_CPPFLAGS:=}
: ${BUILD_LDFLAGS:=}
export BUILD_{C,CXX,CPP,LD}FLAGS
}
# @FUNCTION: tc-env_build
# @USAGE: <command> [command args]
# @INTERNAL
# @DESCRIPTION:
# Setup the compile environment to the build tools and then execute the
# specified command. We use tc-getBUILD_XX here so that we work with
# all of the semi-[non-]standard env vars like $BUILD_CC which often
# the target build system does not check.
tc-env_build() {
tc-export_build_env
CFLAGS=${BUILD_CFLAGS} \
CXXFLAGS=${BUILD_CXXFLAGS} \
CPPFLAGS=${BUILD_CPPFLAGS} \
LDFLAGS=${BUILD_LDFLAGS} \
AR=$(tc-getBUILD_AR) \
AS=$(tc-getBUILD_AS) \
CC=$(tc-getBUILD_CC) \
CPP=$(tc-getBUILD_CPP) \
CXX=$(tc-getBUILD_CXX) \
LD=$(tc-getBUILD_LD) \
NM=$(tc-getBUILD_NM) \
PKG_CONFIG=$(tc-getBUILD_PKG_CONFIG) \
RANLIB=$(tc-getBUILD_RANLIB) \
"$@"
}
# @FUNCTION: econf_build
# @USAGE: [econf flags]
# @DESCRIPTION:
# Sometimes we need to locally build up some tools to run on CBUILD because
# the package has helper utils which are compiled+executed when compiling.
# This won't work when cross-compiling as the CHOST is set to a target which
# we cannot natively execute.
#
# For example, the python package will build up a local python binary using
# a portable build system (configure+make), but then use that binary to run
# local python scripts to build up other components of the overall python.
# We cannot rely on the python binary in $PATH as that often times will be
# a different version, or not even installed in the first place. Instead,
# we compile the code in a different directory to run on CBUILD, and then
# use that binary when compiling the main package to run on CHOST.
#
# For example, with newer EAPIs, you'd do something like:
# @CODE
# src_configure() {
# ECONF_SOURCE=${S}
# if tc-is-cross-compiler ; then
# mkdir "${WORKDIR}"/${CBUILD}
# pushd "${WORKDIR}"/${CBUILD} >/dev/null
# econf_build --disable-some-unused-stuff
# popd >/dev/null
# fi
# ... normal build paths ...
# }
# src_compile() {
# if tc-is-cross-compiler ; then
# pushd "${WORKDIR}"/${CBUILD} >/dev/null
# emake one-or-two-build-tools
# ln/mv build-tools to normal build paths in ${S}/
# popd >/dev/null
# fi
# ... normal build paths ...
# }
# @CODE
econf_build() {
tc-env_build econf --build=${CBUILD:-${CHOST}} "$@"
}
# @FUNCTION: tc-has-openmp
# @USAGE: [toolchain prefix]
# @DESCRIPTION:
# See if the toolchain supports OpenMP.
tc-has-openmp() {
local base="${T}/test-tc-openmp"
cat <<-EOF > "${base}.c"
#include <omp.h>
int main() {
int nthreads, tid, ret = 0;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
nthreads = omp_get_num_threads(); ret += tid + nthreads;
}
return ret;
}
EOF
$(tc-getCC "$@") -fopenmp "${base}.c" -o "${base}" >&/dev/null
local ret=$?
rm -f "${base}"*
return ${ret}
}
# @FUNCTION: tc-has-tls
# @USAGE: [-s|-c|-l] [toolchain prefix]
# @DESCRIPTION:
# See if the toolchain supports thread local storage (TLS). Use -s to test the
# compiler, -c to also test the assembler, and -l to also test the C library
# (the default).
tc-has-tls() {
local base="${T}/test-tc-tls"
cat <<-EOF > "${base}.c"
int foo(int *i) {
static __thread int j = 0;
return *i ? j : *i;
}
EOF
local flags
case $1 in
-s) flags="-S";;
-c) flags="-c";;
-l) ;;
-*) die "Usage: tc-has-tls [-c|-l] [toolchain prefix]";;
esac
: ${flags:=-fPIC -shared -Wl,-z,defs}
[[ $1 == -* ]] && shift
$(tc-getCC "$@") ${flags} "${base}.c" -o "${base}" >&/dev/null
local ret=$?
rm -f "${base}"*
return ${ret}
}
# Parse information from CBUILD/CHOST/CTARGET rather than
# use external variables from the profile.
tc-ninja_magic_to_arch() {
ninj() { [[ ${type} == "kern" ]] && echo $1 || echo $2 ; }
local type=$1
local host=$2
[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
local KV=${KV:-${KV_FULL}}
[[ ${type} == "kern" ]] && [[ -z ${KV} ]] && \
ewarn "QA: Kernel version could not be determined, please inherit kernel-2 or linux-info"
case ${host} in
aarch64*) ninj arm64 arm;;
alpha*) echo alpha;;
arm*) echo arm;;
avr*) ninj avr32 avr;;
bfin*) ninj blackfin bfin;;
cris*) echo cris;;
hppa*) ninj parisc hppa;;
i?86*)
# Starting with linux-2.6.24, the 'x86_64' and 'i386'
# trees have been unified into 'x86'.
# FreeBSD still uses i386
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -lt $(KV_to_int 2.6.24) || ${host} == *freebsd* ]] ; then
echo i386
else
echo x86
fi
;;
ia64*) echo ia64;;
m68*) echo m68k;;
mips*) echo mips;;
nios2*) echo nios2;;
nios*) echo nios;;
powerpc*)
# Starting with linux-2.6.15, the 'ppc' and 'ppc64' trees
# have been unified into simply 'powerpc', but until 2.6.16,
# ppc32 is still using ARCH="ppc" as default
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.16) ]] ; then
echo powerpc
elif [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -eq $(KV_to_int 2.6.15) ]] ; then
if [[ ${host} == powerpc64* ]] || [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
echo powerpc
else
echo ppc
fi
elif [[ ${host} == powerpc64* ]] ; then
echo ppc64
elif [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
ninj ppc64 ppc
else
echo ppc
fi
;;
s390*) echo s390;;
sh64*) ninj sh64 sh;;
sh*) echo sh;;
sparc64*) ninj sparc64 sparc;;
sparc*) [[ ${PROFILE_ARCH} == "sparc64" ]] \
&& ninj sparc64 sparc \
|| echo sparc
;;
vax*) echo vax;;
x86_64*freebsd*) echo amd64;;
x86_64*)
# Starting with linux-2.6.24, the 'x86_64' and 'i386'
# trees have been unified into 'x86'.
if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.24) ]] ; then
echo x86
else
ninj x86_64 amd64
fi
;;
# since our usage of tc-arch is largely concerned with
# normalizing inputs for testing ${CTARGET}, let's filter
# other cross targets (mingw and such) into the unknown.
*) echo unknown;;
esac
}
# @FUNCTION: tc-arch-kernel
# @USAGE: [toolchain prefix]
# @RETURN: name of the kernel arch according to the compiler target
tc-arch-kernel() {
tc-ninja_magic_to_arch kern "$@"
}
# @FUNCTION: tc-arch
# @USAGE: [toolchain prefix]
# @RETURN: name of the portage arch according to the compiler target
tc-arch() {
tc-ninja_magic_to_arch portage "$@"
}
tc-endian() {
local host=$1
[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
host=${host%%-*}
case ${host} in
aarch64*be) echo big;;
aarch64) echo little;;
alpha*) echo big;;
arm*b*) echo big;;
arm*) echo little;;
cris*) echo little;;
hppa*) echo big;;
i?86*) echo little;;
ia64*) echo little;;
m68*) echo big;;
mips*l*) echo little;;
mips*) echo big;;
powerpc*) echo big;;
s390*) echo big;;
sh*b*) echo big;;
sh*) echo little;;
sparc*) echo big;;
x86_64*) echo little;;
*) echo wtf;;
esac
}
# Internal func. The first argument is the version info to expand.
# Query the preprocessor to improve compatibility across different
# compilers rather than maintaining a --version flag matrix. #335943
_gcc_fullversion() {
local ver="$1"; shift
set -- `$(tc-getCPP "$@") -E -P - <<<"__GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__"`
eval echo "$ver"
}
# @FUNCTION: gcc-fullversion
# @RETURN: compiler version (major.minor.micro: [3.4.6])
gcc-fullversion() {
_gcc_fullversion '$1.$2.$3' "$@"
}
# @FUNCTION: gcc-version
# @RETURN: compiler version (major.minor: [3.4].6)
gcc-version() {
_gcc_fullversion '$1.$2' "$@"
}
# @FUNCTION: gcc-major-version
# @RETURN: major compiler version (major: [3].4.6)
gcc-major-version() {
_gcc_fullversion '$1' "$@"
}
# @FUNCTION: gcc-minor-version
# @RETURN: minor compiler version (minor: 3.[4].6)
gcc-minor-version() {
_gcc_fullversion '$2' "$@"
}
# @FUNCTION: gcc-micro-version
# @RETURN: micro compiler version (micro: 3.4.[6])
gcc-micro-version() {
_gcc_fullversion '$3' "$@"
}
# Returns the installation directory - internal toolchain
# function for use by _gcc-specs-exists (for flag-o-matic).
_gcc-install-dir() {
echo "$(LC_ALL=C $(tc-getCC) -print-search-dirs 2> /dev/null |\
awk '$1=="install:" {print $2}')"
}
# Returns true if the indicated specs file exists - internal toolchain
# function for use by flag-o-matic.
_gcc-specs-exists() {
[[ -f $(_gcc-install-dir)/$1 ]]
}
# Returns requested gcc specs directive unprocessed - for used by
# gcc-specs-directive()
# Note; later specs normally overwrite earlier ones; however if a later
# spec starts with '+' then it appends.
# gcc -dumpspecs is parsed first, followed by files listed by "gcc -v"
# as "Reading <file>", in order. Strictly speaking, if there's a
# $(gcc_install_dir)/specs, the built-in specs aren't read, however by
# the same token anything from 'gcc -dumpspecs' is overridden by
# the contents of $(gcc_install_dir)/specs so the result is the
# same either way.
_gcc-specs-directive_raw() {
local cc=$(tc-getCC)
local specfiles=$(LC_ALL=C ${cc} -v 2>&1 | awk '$1=="Reading" {print $NF}')
${cc} -dumpspecs 2> /dev/null | cat - ${specfiles} | awk -v directive=$1 \
'BEGIN { pspec=""; spec=""; outside=1 }
$1=="*"directive":" { pspec=spec; spec=""; outside=0; next }
outside || NF==0 || ( substr($1,1,1)=="*" && substr($1,length($1),1)==":" ) { outside=1; next }
spec=="" && substr($0,1,1)=="+" { spec=pspec " " substr($0,2); next }
{ spec=spec $0 }
END { print spec }'
return 0
}
# Return the requested gcc specs directive, with all included
# specs expanded.
# Note, it does not check for inclusion loops, which cause it
# to never finish - but such loops are invalid for gcc and we're
# assuming gcc is operational.
gcc-specs-directive() {
local directive subdname subdirective
directive="$(_gcc-specs-directive_raw $1)"
while [[ ${directive} == *%\(*\)* ]]; do
subdname=${directive/*%\(}
subdname=${subdname/\)*}
subdirective="$(_gcc-specs-directive_raw ${subdname})"
directive="${directive//\%(${subdname})/${subdirective}}"
done
echo "${directive}"
return 0
}
# Returns true if gcc sets relro
gcc-specs-relro() {
local directive
directive=$(gcc-specs-directive link_command)
return $([[ "${directive/\{!norelro:}" != "${directive}" ]])
}
# Returns true if gcc sets now
gcc-specs-now() {
local directive
directive=$(gcc-specs-directive link_command)
return $([[ "${directive/\{!nonow:}" != "${directive}" ]])
}
# Returns true if gcc builds PIEs
gcc-specs-pie() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!nopie:}" != "${directive}" ]])
}
# Returns true if gcc builds with the stack protector
gcc-specs-ssp() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fno-stack-protector:}" != "${directive}" ]])
}
# Returns true if gcc upgrades fstack-protector to fstack-protector-all
gcc-specs-ssp-to-all() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fno-stack-protector-all:}" != "${directive}" ]])
}
# Returns true if gcc builds with fno-strict-overflow
gcc-specs-nostrict() {
local directive
directive=$(gcc-specs-directive cc1)
return $([[ "${directive/\{!fstrict-overflow:}" != "${directive}" ]])
}
# @FUNCTION: gen_usr_ldscript
# @USAGE: [-a] <list of libs to create linker scripts for>
# @DESCRIPTION:
# This function generate linker scripts in /usr/lib for dynamic
# libs in /lib. This is to fix linking problems when you have
# the .so in /lib, and the .a in /usr/lib. What happens is that
# in some cases when linking dynamic, the .a in /usr/lib is used
# instead of the .so in /lib due to gcc/libtool tweaking ld's
# library search path. This causes many builds to fail.
# See bug #4411 for more info.
#
# Note that you should in general use the unversioned name of
# the library (libfoo.so), as ldconfig should usually update it
# correctly to point to the latest version of the library present.
gen_usr_ldscript() {
local lib libdir=$(get_libdir) output_format="" auto=false suffix=$(get_libname)
[[ -z ${ED+set} ]] && local ED=${D%/}${EPREFIX}/
tc-is-static-only && return
# Eventually we'd like to get rid of this func completely #417451
case ${CTARGET:-${CHOST}} in
*-darwin*) ;;
*linux*|*-freebsd*|*-openbsd*|*-netbsd*)
use prefix && return 0 ;;
*) return 0 ;;
esac
# Just make sure it exists
dodir /usr/${libdir}
if [[ $1 == "-a" ]] ; then
auto=true
shift
dodir /${libdir}
fi
# OUTPUT_FORMAT gives hints to the linker as to what binary format
# is referenced ... makes multilib saner
output_format=$($(tc-getCC) ${CFLAGS} ${LDFLAGS} -Wl,--verbose 2>&1 | sed -n 's/^OUTPUT_FORMAT("\([^"]*\)",.*/\1/p')
[[ -n ${output_format} ]] && output_format="OUTPUT_FORMAT ( ${output_format} )"
for lib in "$@" ; do
local tlib
if ${auto} ; then
lib="lib${lib}${suffix}"
else
# Ensure /lib/${lib} exists to avoid dangling scripts/symlinks.
# This especially is for AIX where $(get_libname) can return ".a",
# so /lib/${lib} might be moved to /usr/lib/${lib} (by accident).
[[ -r ${ED}/${libdir}/${lib} ]] || continue
#TODO: better die here?
fi
case ${CTARGET:-${CHOST}} in
*-darwin*)
if ${auto} ; then
tlib=$(scanmacho -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
else
tlib=$(scanmacho -qF'%S#F' "${ED}"/${libdir}/${lib})
fi
[[ -z ${tlib} ]] && die "unable to read install_name from ${lib}"
tlib=${tlib##*/}
if ${auto} ; then
mv "${ED}"/usr/${libdir}/${lib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
# some install_names are funky: they encode a version
if [[ ${tlib} != ${lib%${suffix}}.*${suffix#.} ]] ; then
mv "${ED}"/usr/${libdir}/${tlib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
fi
rm -f "${ED}"/${libdir}/${lib}
fi
# Mach-O files have an id, which is like a soname, it tells how
# another object linking against this lib should reference it.
# Since we moved the lib from usr/lib into lib this reference is
# wrong. Hence, we update it here. We don't configure with
# libdir=/lib because that messes up libtool files.
# Make sure we don't lose the specific version, so just modify the
# existing install_name
if [[ ! -w "${ED}/${libdir}/${tlib}" ]] ; then
chmod u+w "${ED}${libdir}/${tlib}" # needed to write to it
local nowrite=yes
fi
install_name_tool \
-id "${EPREFIX}"/${libdir}/${tlib} \
"${ED}"/${libdir}/${tlib} || die "install_name_tool failed"
[[ -n ${nowrite} ]] && chmod u-w "${ED}${libdir}/${tlib}"
# Now as we don't use GNU binutils and our linker doesn't
# understand linker scripts, just create a symlink.
pushd "${ED}/usr/${libdir}" > /dev/null
ln -snf "../../${libdir}/${tlib}" "${lib}"
popd > /dev/null
;;
*)
if ${auto} ; then
tlib=$(scanelf -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
[[ -z ${tlib} ]] && die "unable to read SONAME from ${lib}"
mv "${ED}"/usr/${libdir}/${lib}* "${ED}"/${libdir}/ || die
# some SONAMEs are funky: they encode a version before the .so
if [[ ${tlib} != ${lib}* ]] ; then
mv "${ED}"/usr/${libdir}/${tlib}* "${ED}"/${libdir}/ || die
fi
rm -f "${ED}"/${libdir}/${lib}
else
tlib=${lib}
fi
cat > "${ED}/usr/${libdir}/${lib}" <<-END_LDSCRIPT
/* GNU ld script
Since Gentoo has critical dynamic libraries in /lib, and the static versions
in /usr/lib, we need to have a "fake" dynamic lib in /usr/lib, otherwise we
run into linking problems. This "fake" dynamic lib is a linker script that
redirects the linker to the real lib. And yes, this works in the cross-
compiling scenario as the sysroot-ed linker will prepend the real path.
See bug http://bugs.gentoo.org/4411 for more info.
*/
${output_format}
GROUP ( ${EPREFIX}/${libdir}/${tlib} )
END_LDSCRIPT
;;
esac
fperms a+x "/usr/${libdir}/${lib}" || die "could not change perms on ${lib}"
done
}
#
# ChromiumOS extensions below here.
#
# Returns true if gcc builds PIEs
# For ARM, readelf -h | grep Type always has REL instead of EXEC.
# That is why we have to read the flags one by one and check them instead
# of test-compiling a small program.
gcc-pie() {
for flag in $(echo "void f(){char a[100];}" | \
${CTARGET}-gcc -v -xc -c -o /dev/null - 2>&1 | \
grep cc1 | \
tr " " "\n" | \
tac)
do
if [[ $flag == "-fPIE" || $flag == "-fPIC" ]]
then
return 0
elif [[ $flag == "-fno-PIE" || $flag == "-fno-PIC" ]]
then
return 1
fi
done
return 1
}
# Returns true if gcc builds with the stack protector
gcc-ssp() {
local obj=$(mktemp)
echo "void f(){char a[100];}" | ${CTARGET}-gcc -xc -c -o ${obj} -
return $(${CTARGET}-readelf -sW ${obj} | grep -q stack_chk_fail)
}
# Sets up environment variables required to build with Clang
# This should be replaced with a sysroot wrapper ala GCC if/when
# we get serious about building with Clang.
clang-setup-env() {
use clang || return 0
case ${ARCH} in
amd64|x86)
export CC="clang" CXX="clang++"
local clang_flags=(
--sysroot="${SYSROOT}"
-B$(get_binutils_path_gold)
$(usex x86 -m32 '')
)
append-flags "${clang_flags[@]}"
# Some boards use optimizations (e.g. -mfpmath=sse) that
# clang does not support.
append-flags -Qunused-arguments
;;
*) die "Clang is not yet supported for ${ARCH}"
esac
if use asan; then
append-flags -fsanitize=address -fno-omit-frame-pointer
fi
}
fi

View File

@ -1,132 +0,0 @@
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# $Header: $
#
# useradd.eclass
#
# Adds a mechanism for adding users/groups into alternate roots.
#
# This will likely go away.
#
# Authors:
# Google, inc. <chromium-os-dev@chromium.org>
#
HOMEPAGE="http://www.chromium.org/"
# Before we manipulate users at all, we want to make sure that
# passwd/group/shadow is initialized in the first place. That's
# what baselayout does.
if [ "${PN}" != "baselayout" ]; then
DEPEND="sys-apps/baselayout"
RDEPEND="sys-apps/baselayout"
fi
# Tests if the user already exists in the passwd file.
#
# $1 - Username (e.g. "messagebus")
user_exists() {
grep -e "^$1\:" "${ROOT}/etc/passwd" > /dev/null 2>&1
}
# Tests if the group already exists in the group file.
#
# $1 - Groupname (e.g. "messagebus")
group_exists() {
grep -e "^$1\:" "${ROOT}/etc/group" > /dev/null 2>&1
}
# Add entry to /etc/passwd
#
# $1 - Username (e.g. "messagebus")
# $2 - "*" to indicate not shadowed, "x" to indicate shadowed
# $3 - UID (e.g. 200)
# $4 - GID (e.g. 200)
# $5 - full name (e.g. "")
# $6 - home dir (e.g. "/home/foo" or "/var/run/dbus")
# $7 - shell (e.g. "/bin/sh" or "/bin/false")
add_user() {
if user_exists "$1"; then
elog "Skipping add_user of existing user: '$1'"
return
fi
echo "${1}:${2}:${3}:${4}:${5}:${6}:${7}" >> "${ROOT}/etc/passwd"
}
# Remove entry from /etc/passwd
#
# $1 - Username
remove_user() {
[ -e "${ROOT}/etc/passwd" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/passwd"
}
# Add entry to /etc/shadow
#
# $1 - Username
# $2 - Crypted password
add_shadow() {
echo "${1}:${2}:14500:0:99999::::" >> "${ROOT}/etc/shadow"
}
# Remove entry from /etc/shadow
#
# $1 - Username
remove_shadow() {
[ -e "${ROOT}/etc/shadow" ] && sed -i -e /^${1}:.\*$/d "${ROOT}/etc/shadow"
}
# Add entry to /etc/group
# $1 - Groupname (e.g. "messagebus")
# $2 - GID (e.g. 200)
add_group() {
if group_exists "$1"; then
elog "Skipping add_group of existing group: '$1'"
return
fi
echo "${1}:x:${2}:" >> "${ROOT}/etc/group"
}
# Copies user entry from host passwd file if it already exists or else
# creates a new user using add_user.
#
# See add_user for argument list.
copy_or_add_user() {
local username="$1"
if user_exists "$1"; then
elog "Skipping copy_or_add_user of existing user '$1'"
return
fi
local entry=$(grep -e "^$1\:" /etc/passwd)
if [ -n "$entry" ]; then
elog "Copying existing passwd entry from root: '$entry'"
echo "$entry" >> "${ROOT}/etc/passwd"
else
add_user "$@"
fi
}
# Copies group entry from host group file if it already exists or else
# creates a new group using add_group.
#
# See add_group for argument list.
copy_or_add_group() {
local groupname="$1"
if group_exists "$1"; then
elog "Skipping copy_or_add_group of existing group '$1'"
return
fi
local entry=$(grep -e "^$1\:" /etc/group)
if [ -n "$entry" ]; then
elog "Copying existing group entry from root: '$entry'"
echo "$entry" >> "${ROOT}/etc/group"
else
add_group "$@"
fi
}

View File

@ -1 +0,0 @@
DIST baselayout-2.2.tar.bz2 40744 SHA256 11d4a223b06da545c3e59e07c9195570f334b5b1be05d995df0ebc8ea2203e98 SHA512 a5199c42e835d9f2683cc94f3c4c47ecdc392316c24e0932845736e2e90479b0c5c8ad72ead8e0537f097405b7d7548d00b87b7ff8c9e3651486e3c5c0970b36 WHIRLPOOL 60cc4f7f76c5a45c15303e526decffb3bad2b50ac659b1dd072d2ed4b0eb0b31929a1a733ddb03a31ee5882b889a4efb87206f63ffaa2b11e26d36afd0933a95

View File

@ -1,241 +0,0 @@
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/sys-apps/baselayout/baselayout-2.2.ebuild,v 1.16 2013/07/27 22:56:21 williamh Exp $
inherit eutils multilib
DESCRIPTION="Filesystem baselayout and init scripts"
HOMEPAGE="http://www.gentoo.org/"
SRC_URI="mirror://gentoo/${P}.tar.bz2
http://dev.gentoo.org/~vapier/dist/${P}.tar.bz2"
LICENSE="GPL-2"
SLOT="0"
KEYWORDS="alpha amd64 arm hppa ia64 m68k ~mips ppc ppc64 s390 sh sparc x86 ~amd64-fbsd ~sparc-fbsd ~x86-fbsd"
IUSE="build cros_host kernel_linux"
RDEPEND="cros_host? ( !coreos-base/coreos-init )"
pkg_setup() {
multilib_layout
}
# Create our multilib dirs - the Makefile has no knowledge of this
multilib_warn() {
local syms=$1 dirs=$2 def_libdir=$3
[ -z "${syms}${dirs}" ] && return
ewarn "Your system profile has SYMLINK_LIB=${SYMLINK_LIB}, so that means"
if [ -z "${syms}" ] ; then
ewarn "you need to have these paths as symlinks to ${def_libdir}:"
ewarn "$1"
fi
}
multilib_layout() {
local libdir libdirs=$(get_all_libdirs) def_libdir=$(get_abi_LIBDIR $DEFAULT_ABI)
: ${libdirs:=lib} # it isn't that we don't trust multilib.eclass...
[ -z "${def_libdir}" ] && die "your DEFAULT_ABI=$DEFAULT_ABI appears to be invalid"
# figure out which paths should be symlinks and which should be directories
local dirs syms exp d
for libdir in ${libdirs} ; do
exp=( {,usr/,usr/local/}${libdir} )
for d in "${exp[@]/#/${ROOT}}" ; do
# most things should be dirs
if [ "${SYMLINK_LIB}" = "yes" ] && [ "${libdir}" = "lib" ] ; then
[ ! -h "${d}" ] && [ -e "${d}" ] && dirs+=" ${d}"
else
[ -h "${d}" ] && syms+=" ${d}"
fi
done
done
if [ -n "${syms}${dirs}" ] ; then
ewarn "Your system profile has SYMLINK_LIB=${SYMLINK_LIB:-no}, so that means you need to"
ewarn "have these paths configured as follows:"
[ -n "${dirs}" ] && ewarn "symlinks to '${def_libdir}':${dirs}"
[ -n "${syms}" ] && ewarn "directories:${syms}"
ewarn "The ebuild will attempt to fix these, but only for trivial conversions."
ewarn "If things fail, you will need to manually create/move the directories."
echo
fi
# setup symlinks and dirs where we expect them to be; do not migrate
# data ... just fall over in that case.
local prefix
for prefix in "${ROOT}"{,usr/,usr/local/} ; do
if [ "${SYMLINK_LIB}" = yes ] ; then
# we need to make sure "lib" points to the native libdir
if [ -h "${prefix}lib" ] ; then
# it's already a symlink! assume it's pointing to right place ...
continue
elif [ -d "${prefix}lib" ] ; then
# "lib" is a dir, so need to convert to a symlink
ewarn "Converting ${prefix}lib from a dir to a symlink"
rm -f "${prefix}lib"/.keep
if rmdir "${prefix}lib" 2>/dev/null ; then
ln -s ${def_libdir} "${prefix}lib" || die
else
die "non-empty dir found where we needed a symlink: ${prefix}lib"
fi
else
# nothing exists, so just set it up sanely
ewarn "Initializing ${prefix}lib as a symlink"
mkdir -p "${prefix}" || die
rm -f "${prefix}lib" || die
ln -s ${def_libdir} "${prefix}lib" || die
mkdir -p "${prefix}${def_libdir}" #423571
fi
else
# we need to make sure "lib" is a dir
if [ -h "${prefix}lib" ] ; then
# "lib" is a symlink, so need to convert to a dir
ewarn "Converting ${prefix}lib from a symlink to a dir"
rm -f "${prefix}lib" || die
if [ -d "${prefix}lib32" ] ; then
ewarn "Migrating ${prefix}lib32 to ${prefix}lib"
mv "${prefix}lib32" "${prefix}lib" || die
else
mkdir -p "${prefix}lib" || die
fi
elif [ -d "${prefix}lib" ] ; then
# make sure the old "lib" ABI location does not exist; we
# only symlinked the lib dir on systems where we moved it
# to "lib32" ...
case ${CHOST} in
*-gentoo-freebsd*) ;; # We want it the other way on fbsd.
i?86*|x86_64*|powerpc*|sparc*|s390*)
if [ -d "${prefix}lib32" ] ; then
rm -f "${prefix}lib32"/.keep
if ! rmdir "${prefix}lib32" 2>/dev/null ; then
ewarn "You need to merge ${prefix}lib32 into ${prefix}lib"
die "non-empty dir found where there should be none: ${prefix}lib32"
fi
fi
;;
esac
else
# nothing exists, so just set it up sanely
ewarn "Initializing ${prefix}lib as a dir"
mkdir -p "${prefix}" || die
rm -f "${prefix}lib" || die
ln -s ${def_libdir} "${prefix}lib" || die
fi
fi
done
}
pkg_preinst() {
# Bug #217848 - Since the remap_dns_vars() called by pkg_preinst() of
# the baselayout-1.x ebuild copies all the real configs from the user's
# /etc/conf.d into ${D}, it makes them all appear to be the default
# versions. In order to protect them from being unmerged after this
# upgrade, modify their timestamps.
touch "${ROOT}"/etc/conf.d/* 2>/dev/null
# This is written in src_install (so it's in CONTENTS), but punt all
# pending updates to avoid user having to do etc-update (and make the
# pkg_postinst logic simpler).
rm -f "${ROOT}"/etc/._cfg????_gentoo-release
# We need to install directories and maybe some dev nodes when building
# stages, but they cannot be in CONTENTS.
# Also, we cannot reference $S as binpkg will break so we do this.
multilib_layout
if use build ; then
emake -C "${D}/usr/share/${PN}" DESTDIR="${ROOT}" layout || die
fi
rm -f "${D}"/usr/share/${PN}/Makefile
}
src_install() {
emake \
OS=$(usex kernel_FreeBSD BSD Linux) \
DESTDIR="${D}" \
install || die
dodoc ChangeLog.svn
# need the makefile in pkg_preinst
insinto /usr/share/${PN}
doins Makefile || die
# handle multilib paths. do it here because we want this behavior
# regardless of the C library that you're using. we do explicitly
# list paths which the native ldconfig searches, but this isn't
# problematic as it doesn't change the resulting ld.so.cache or
# take longer to generate. similarly, listing both the native
# path and the symlinked path doesn't change the resulting cache.
local libdir ldpaths
for libdir in $(get_all_libdirs) ; do
ldpaths+=":/${libdir}:/usr/${libdir}:/usr/local/${libdir}"
done
echo "LDPATH='${ldpaths#:}'" >> "${D}"/etc/env.d/00basic
# rc-scripts version for testing of features that *should* be present
echo "Gentoo Base System release ${PV}" > "${D}"/etc/gentoo-release
if use !cros_host; then
# Don't install /etc/issue since it is handled by coreos-init
rm "${D}"/etc/issue
fi
}
pkg_postinst() {
local x
# We installed some files to /usr/share/baselayout instead of /etc to stop
# (1) overwriting the user's settings
# (2) screwing things up when attempting to merge files
# (3) accidentally packaging up personal files with quickpkg
# If they don't exist then we install them
for x in master.passwd passwd shadow group fstab ; do
[ -e "${ROOT}etc/${x}" ] && continue
[ -e "${ROOT}usr/share/baselayout/${x}" ] || continue
cp -p "${ROOT}usr/share/baselayout/${x}" "${ROOT}"etc
done
# Force shadow permissions to not be world-readable #260993
for x in shadow ; do
[ -e "${ROOT}etc/${x}" ] && chmod o-rwx "${ROOT}etc/${x}"
done
# Take care of the etc-update for the user
if [ -e "${ROOT}"/etc/._cfg0000_gentoo-release ] ; then
mv "${ROOT}"/etc/._cfg0000_gentoo-release "${ROOT}"/etc/gentoo-release
fi
# whine about users that lack passwords #193541
if [[ -e ${ROOT}/etc/shadow ]] ; then
local bad_users=$(sed -n '/^[^:]*::/s|^\([^:]*\)::.*|\1|p' "${ROOT}"/etc/shadow)
if [[ -n ${bad_users} ]] ; then
echo
ewarn "The following users lack passwords!"
ewarn ${bad_users}
fi
fi
# baselayout leaves behind a lot of .keep files, so let's clean them up
find "${ROOT}"/lib*/rcscripts/ -name .keep -exec rm -f {} + 2>/dev/null
find "${ROOT}"/lib*/rcscripts/ -depth -type d -exec rmdir {} + 2>/dev/null
# whine about users with invalid shells #215698
if [[ -e ${ROOT}/etc/passwd ]] ; then
local bad_shells=$(awk -F: 'system("test -e " $7) { print $1 " - " $7}' /etc/passwd | sort)
if [[ -n ${bad_shells} ]] ; then
echo
ewarn "The following users have non-existent shells!"
ewarn "${bad_shells}"
fi
fi
# http://bugs.gentoo.org/361349
if use kernel_linux; then
mkdir -p "${ROOT}"/run
if ! grep -qs "^tmpfs.*/run " "${ROOT}"/proc/mounts ; then
echo
ewarn "You should reboot the system now to get /run mounted with tmpfs!"
fi
fi
}

View File

@ -1,255 +0,0 @@
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/sys-apps/baselayout/baselayout-2.2.ebuild,v 1.16 2013/07/27 22:56:21 williamh Exp $
inherit eutils multilib
DESCRIPTION="Filesystem baselayout and init scripts"
HOMEPAGE="http://www.gentoo.org/"
SRC_URI="mirror://gentoo/${P}.tar.bz2
http://dev.gentoo.org/~vapier/dist/${P}.tar.bz2"
LICENSE="GPL-2"
SLOT="0"
KEYWORDS="alpha amd64 arm hppa ia64 m68k ~mips ppc ppc64 s390 sh sparc x86 ~amd64-fbsd ~sparc-fbsd ~x86-fbsd"
IUSE="build cros_host kernel_linux symlink-usr"
RDEPEND="cros_host? ( !coreos-base/coreos-init )"
pkg_setup() {
multilib_layout
}
# Create our multilib dirs - the Makefile has no knowledge of this
multilib_warn() {
local syms=$1 dirs=$2 def_libdir=$3
[ -z "${syms}${dirs}" ] && return
ewarn "Your system profile has SYMLINK_LIB=${SYMLINK_LIB}, so that means"
if [ -z "${syms}" ] ; then
ewarn "you need to have these paths as symlinks to ${def_libdir}:"
ewarn "$1"
fi
}
multilib_layout() {
local libdir libdirs=$(get_all_libdirs) def_libdir=$(get_abi_LIBDIR $DEFAULT_ABI)
: ${libdirs:=lib} # it isn't that we don't trust multilib.eclass...
[ -z "${def_libdir}" ] && die "your DEFAULT_ABI=$DEFAULT_ABI appears to be invalid"
# figure out which paths should be symlinks and which should be directories
local dirs syms exp d
for libdir in ${libdirs} ; do
exp=( {,usr/,usr/local/}${libdir} )
for d in "${exp[@]/#/${ROOT}}" ; do
# most things should be dirs
if [ "${SYMLINK_LIB}" = "yes" ] && [ "${libdir}" = "lib" ] ; then
[ ! -h "${d}" ] && [ -e "${d}" ] && dirs+=" ${d}"
else
[ -h "${d}" ] && syms+=" ${d}"
fi
done
done
if [ -n "${syms}${dirs}" ] ; then
ewarn "Your system profile has SYMLINK_LIB=${SYMLINK_LIB:-no}, so that means you need to"
ewarn "have these paths configured as follows:"
[ -n "${dirs}" ] && ewarn "symlinks to '${def_libdir}':${dirs}"
[ -n "${syms}" ] && ewarn "directories:${syms}"
ewarn "The ebuild will attempt to fix these, but only for trivial conversions."
ewarn "If things fail, you will need to manually create/move the directories."
echo
fi
if use symlink-usr ; then
for libdir in ${libdirs} ; do
if use symlink-usr && [ "${libdir}" != "lib" ] ; then
ln -s usr/${libdir} ${ROOT}/${libdir}
fi
done
fi
# setup symlinks and dirs where we expect them to be; do not migrate
# data ... just fall over in that case.
local prefix
for prefix in "${ROOT}"{,usr/,usr/local/} ; do
if [ "${SYMLINK_LIB}" = yes ] ; then
# we need to make sure "lib" points to the native libdir
if [ -h "${prefix}lib" ] ; then
# it's already a symlink! assume it's pointing to right place ...
continue
elif [ -d "${prefix}lib" ] ; then
# "lib" is a dir, so need to convert to a symlink
ewarn "Converting ${prefix}lib from a dir to a symlink"
rm -f "${prefix}lib"/.keep
if rmdir "${prefix}lib" 2>/dev/null ; then
ln -s ${def_libdir} "${prefix}lib" || die
else
die "non-empty dir found where we needed a symlink: ${prefix}lib"
fi
else
# nothing exists, so just set it up sanely
ewarn "Initializing ${prefix}lib as a symlink"
mkdir -p "${prefix}" || die
rm -f "${prefix}lib" || die
ln -s ${def_libdir} "${prefix}lib" || die
mkdir -p "${prefix}${def_libdir}" #423571
fi
else
# we need to make sure "lib" is a dir
if [ -h "${prefix}lib" ] ; then
# "lib" is a symlink, so need to convert to a dir
ewarn "Converting ${prefix}lib from a symlink to a dir"
rm -f "${prefix}lib" || die
if [ -d "${prefix}lib32" ] ; then
ewarn "Migrating ${prefix}lib32 to ${prefix}lib"
mv "${prefix}lib32" "${prefix}lib" || die
else
mkdir -p "${prefix}lib" || die
fi
elif [ -d "${prefix}lib" ] ; then
# make sure the old "lib" ABI location does not exist; we
# only symlinked the lib dir on systems where we moved it
# to "lib32" ...
case ${CHOST} in
*-gentoo-freebsd*) ;; # We want it the other way on fbsd.
i?86*|x86_64*|powerpc*|sparc*|s390*)
if [ -d "${prefix}lib32" ] ; then
rm -f "${prefix}lib32"/.keep
if ! rmdir "${prefix}lib32" 2>/dev/null ; then
ewarn "You need to merge ${prefix}lib32 into ${prefix}lib"
die "non-empty dir found where there should be none: ${prefix}lib32"
fi
fi
;;
esac
else
# nothing exists, so just set it up sanely
ewarn "Initializing ${prefix}lib as a dir"
mkdir -p "${prefix}" || die
rm -f "${prefix}lib" || die
ln -s ${def_libdir} "${prefix}lib" || die
fi
fi
done
}
pkg_preinst() {
# Bug #217848 - Since the remap_dns_vars() called by pkg_preinst() of
# the baselayout-1.x ebuild copies all the real configs from the user's
# /etc/conf.d into ${D}, it makes them all appear to be the default
# versions. In order to protect them from being unmerged after this
# upgrade, modify their timestamps.
touch "${ROOT}"/etc/conf.d/* 2>/dev/null
# This is written in src_install (so it's in CONTENTS), but punt all
# pending updates to avoid user having to do etc-update (and make the
# pkg_postinst logic simpler).
rm -f "${ROOT}"/etc/._cfg????_gentoo-release
# We need to install directories and maybe some dev nodes when building
# stages, but they cannot be in CONTENTS.
# Also, we cannot reference $S as binpkg will break so we do this.
multilib_layout
if use symlink-usr ; then
for bindir in bin sbin ; do
mkdir -p "${ROOT}"/usr/${bindir}
ln -s usr/${bindir} "${ROOT}"/${bindir}
done
fi
if use build ; then
emake -C "${D}/usr/share/${PN}" DESTDIR="${ROOT}" layout || die
fi
rm -f "${D}"/usr/share/${PN}/Makefile
}
src_install() {
emake \
OS=$(usex kernel_FreeBSD BSD Linux) \
DESTDIR="${D}" \
install || die
dodoc ChangeLog.svn
# need the makefile in pkg_preinst
insinto /usr/share/${PN}
doins Makefile || die
# handle multilib paths. do it here because we want this behavior
# regardless of the C library that you're using. we do explicitly
# list paths which the native ldconfig searches, but this isn't
# problematic as it doesn't change the resulting ld.so.cache or
# take longer to generate. similarly, listing both the native
# path and the symlinked path doesn't change the resulting cache.
local libdir ldpaths
for libdir in $(get_all_libdirs) ; do
ldpaths+=":/${libdir}:/usr/${libdir}:/usr/local/${libdir}"
done
echo "LDPATH='${ldpaths#:}'" >> "${D}"/etc/env.d/00basic
# rc-scripts version for testing of features that *should* be present
echo "Gentoo Base System release ${PV}" > "${D}"/etc/gentoo-release
if use !cros_host; then
# Don't install /etc/issue since it is handled by coreos-init
rm "${D}"/etc/issue
fi
}
pkg_postinst() {
local x
# We installed some files to /usr/share/baselayout instead of /etc to stop
# (1) overwriting the user's settings
# (2) screwing things up when attempting to merge files
# (3) accidentally packaging up personal files with quickpkg
# If they don't exist then we install them
for x in master.passwd passwd shadow group fstab ; do
[ -e "${ROOT}etc/${x}" ] && continue
[ -e "${ROOT}usr/share/baselayout/${x}" ] || continue
cp -p "${ROOT}usr/share/baselayout/${x}" "${ROOT}"etc
done
# Force shadow permissions to not be world-readable #260993
for x in shadow ; do
[ -e "${ROOT}etc/${x}" ] && chmod o-rwx "${ROOT}etc/${x}"
done
# Take care of the etc-update for the user
if [ -e "${ROOT}"/etc/._cfg0000_gentoo-release ] ; then
mv "${ROOT}"/etc/._cfg0000_gentoo-release "${ROOT}"/etc/gentoo-release
fi
# whine about users that lack passwords #193541
if [[ -e ${ROOT}/etc/shadow ]] ; then
local bad_users=$(sed -n '/^[^:]*::/s|^\([^:]*\)::.*|\1|p' "${ROOT}"/etc/shadow)
if [[ -n ${bad_users} ]] ; then
echo
ewarn "The following users lack passwords!"
ewarn ${bad_users}
fi
fi
# baselayout leaves behind a lot of .keep files, so let's clean them up
find "${ROOT}"/lib*/rcscripts/ -name .keep -exec rm -f {} + 2>/dev/null
find "${ROOT}"/lib*/rcscripts/ -depth -type d -exec rmdir {} + 2>/dev/null
# whine about users with invalid shells #215698
if [[ -e ${ROOT}/etc/passwd ]] ; then
local bad_shells=$(awk -F: 'system("test -e " $7) { print $1 " - " $7}' /etc/passwd | sort)
if [[ -n ${bad_shells} ]] ; then
echo
ewarn "The following users have non-existent shells!"
ewarn "${bad_shells}"
fi
fi
# http://bugs.gentoo.org/361349
if use kernel_linux; then
mkdir -p "${ROOT}"/run
if ! grep -qs "^tmpfs.*/run " "${ROOT}"/proc/mounts ; then
echo
ewarn "You should reboot the system now to get /run mounted with tmpfs!"
fi
fi
}

View File

@ -0,0 +1 @@
baselayout-9999.ebuild

View File

@ -0,0 +1,168 @@
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
EAPI="5"
CROS_WORKON_PROJECT="coreos/baselayout"
CROS_WORKON_LOCALNAME="baselayout"
CROS_WORKON_REPO="git://github.com"
if [[ "${PV}" == 9999 ]]; then
KEYWORDS="~amd64 ~arm ~x86"
else
CROS_WORKON_COMMIT="b357b8e3d87851ef821353624e0d872ee1f229da"
KEYWORDS="amd64 arm x86"
fi
inherit cros-workon cros-tmpfiles eutils multilib
DESCRIPTION="Filesystem baselayout for CoreOS"
HOMEPAGE="http://www.coreos.com/"
SRC_URI=""
LICENSE="GPL-2"
SLOT="0"
IUSE="cros_host symlink-usr"
# This version of baselayout replaces coreos-base
DEPEND="!coreos-base/coreos-base
!<sys-libs/glibc-2.17-r1
!<=sys-libs/nss-usrfiles-2.18.1_pre"
# Make sure coreos-init is not installed in the SDK
RDEPEND="${DEPEND}
sys-apps/efunctions
cros_host? ( !coreos-base/coreos-init )"
declare -A LIB_SYMS # list of /lib->lib64 symlinks
declare -A USR_SYMS # list of /foo->usr/foo symlinks
declare -a BASE_DIRS # list of absolute paths that should be directories
# Check that a pre-existing symlink is correct
check_sym() {
local path="$1" value="$2"
local real_path=$(readlink -f "${ROOT}${path}")
local real_value=$(readlink -f "${ROOT}${path%/*}/${value}")
if [[ -e "${read_path}" && "${read_path}" != "${read_value}" ]]; then
die "${path} is not a symlink to ${value}"
fi
}
pkg_setup() {
local libdirs=$(get_all_libdirs) def_libdir=$(get_abi_LIBDIR $DEFAULT_ABI)
if [[ -z "${libdirs}" || -z "${def_libdir}" ]]; then
die "your DEFAULT_ABI=$DEFAULT_ABI appears to be invalid"
fi
# figure out which paths should be symlinks and which should be directories
local d
for d in bin sbin ${libdirs} ; do
if [[ "${SYMLINK_LIB}" == "yes" && "${d}" == "lib" ]] ; then
if use symlink-usr; then
USR_SYMS["/lib"]="usr/${def_libdir}"
else
LIB_SYMS["/lib"]="${def_libdir}"
fi
LIB_SYMS["/usr/lib"]="${def_libdir}"
LIB_SYMS["/usr/local/lib"]="${def_libdir}"
elif use symlink-usr; then
USR_SYMS["/$d"]="usr/$d"
BASE_DIRS+=( "/usr/$d" "/usr/local/$d" )
else
BASE_DIRS+=( "/$d" "/usr/$d" "/usr/local/$d" )
fi
done
# make sure any pre-existing symlinks map to the expected locations.
local sym
for sym in "${!LIB_SYMS[@]}" ; do
check_sym "${sym}" "${LIB_SYMS[$sym]}"
done
if use symlink-usr; then
for sym in "${!USR_SYMS[@]}" ; do
check_sym "${sym}" "${USR_SYMS[$sym]}"
done
fi
}
src_install() {
# lib symlinks must be in place before make install
dodir "${BASE_DIRS[@]}"
local sym
for sym in "${!LIB_SYMS[@]}" ; do
dosym "${LIB_SYMS[$sym]}" "${sym}"
done
if use symlink-usr; then
for sym in "${!USR_SYMS[@]}" ; do
dosym "${USR_SYMS[$sym]}" "${sym}"
done
fi
emake DESTDIR="${D}" install
# generate a tmpfiles.d config to cover our /usr symlinks
if use symlink-usr; then
local tmpfiles=${D}/usr/lib/tmpfiles.d/baselayout-usr.conf
echo -n > ${tmpfiles} || die
for sym in "${!USR_SYMS[@]}" ; do
echo "L ${sym} - - - - ${USR_SYMS[$sym]}" >> ${tmpfiles}
done
fi
if ! use cros_host; then
# Docker parses /etc/group directly :(
local docker_grp=$(grep "^docker:" "${D}"/usr/share/baselayout/group)
echo "f /etc/group - - - - ${docker_grp}" > \
"${D}"/usr/lib/tmpfiles.d/baselayout-docker.conf || die
fi
# Fill in all other paths defined in tmpfiles configs
tmpfiles_create
# handle multilib paths. do it here because we want this behavior
# regardless of the C library that you're using. we do explicitly
# list paths which the native ldconfig searches, but this isn't
# problematic as it doesn't change the resulting ld.so.cache or
# take longer to generate. similarly, listing both the native
# path and the symlinked path doesn't change the resulting cache.
local libdir ldpaths
for libdir in $(get_all_libdirs) ; do
ldpaths+=":/${libdir}:/usr/${libdir}:/usr/local/${libdir}"
done
echo "LDPATH='${ldpaths#:}'" >> "${D}"/etc/env.d/00basic || die
if ! use symlink-usr ; then
# modprobe uses /lib instead of /usr/lib
mv "${D}"/usr/lib/modprobe.d "${D}"/lib/modprobe.d || die
# move resolv.conf to a writable location
dosym /run/resolv.conf /etc/resolv.conf
# core is UID:GID 1000:1000 in old images
sed -i -e 's/^core:x:500:500:/core:x:1000:1000:/' \
"${D}"/usr/share/baselayout/passwd || die
sed -i -e 's/^core:x:500:/core:x:1000:/' \
"${D}"/usr/share/baselayout/group || die
# make sure the home dir ownership is correct
fowners -R 1000:1000 /home/core || die
else
fowners -R 500:500 /home/core || die
fi
if use cros_host; then
# Provided by vim in the SDK
rm -r "${D}"/etc/vim || die
else
# Don't install /etc/issue since it is handled by coreos-init right now
rm "${D}"/etc/issue || die
sed -i -e '/\/etc\/issue/d' \
"${D}"/usr/lib/tmpfiles.d/baselayout-etc.conf || die
# Set custom password for core user
if [[ -r "${SHARED_USER_PASSWD_FILE}" ]]; then
echo "core:$(<${SHARED_USER_PASSWD_FILE}):15887:0:::::" \
> "${D}"/etc/shadow || die
chmod 640 "${D}"/etc/shadow || die
fi
fi
}

View File

@ -1,19 +0,0 @@
# /etc/nsswitch.conf:
passwd: files usrfiles
shadow: files usrfiles
group: files usrfiles
hosts: files usrfiles dns
networks: files usrfiles dns
services: files usrfiles
protocols: files usrfiles
rpc: files usrfiles
ethers: files
netmasks: files
netgroup: files
bootparams: files
automount: files
aliases: files

View File

@ -1,70 +0,0 @@
#ident "@(#)rpc 1.11 95/07/14 SMI" /* SVr4.0 1.2 */
#
# rpc
#
portmapper 100000 portmap sunrpc rpcbind
rstatd 100001 rstat rup perfmeter rstat_svc
rusersd 100002 rusers
nfs 100003 nfsprog
ypserv 100004 ypprog
mountd 100005 mount showmount
ypbind 100007
walld 100008 rwall shutdown
yppasswdd 100009 yppasswd
etherstatd 100010 etherstat
rquotad 100011 rquotaprog quota rquota
sprayd 100012 spray
3270_mapper 100013
rje_mapper 100014
selection_svc 100015 selnsvc
database_svc 100016
rexd 100017 rex
alis 100018
sched 100019
llockmgr 100020
nlockmgr 100021
x25.inr 100022
statmon 100023
status 100024
bootparam 100026
ypupdated 100028 ypupdate
keyserv 100029 keyserver
sunlink_mapper 100033
tfsd 100037
nsed 100038
nsemntd 100039
showfhd 100043 showfh
ioadmd 100055 rpc.ioadmd
NETlicense 100062
sunisamd 100065
debug_svc 100066 dbsrv
ypxfrd 100069 rpc.ypxfrd
bugtraqd 100071
kerbd 100078
event 100101 na.event # SunNet Manager
logger 100102 na.logger # SunNet Manager
sync 100104 na.sync
hostperf 100107 na.hostperf
activity 100109 na.activity # SunNet Manager
hostmem 100112 na.hostmem
sample 100113 na.sample
x25 100114 na.x25
ping 100115 na.ping
rpcnfs 100116 na.rpcnfs
hostif 100117 na.hostif
etherif 100118 na.etherif
iproutes 100120 na.iproutes
layers 100121 na.layers
snmp 100122 na.snmp snmp-cmc snmp-synoptics snmp-unisys snmp-utk
traffic 100123 na.traffic
nfs_acl 100227
sadmind 100232
nisd 100300 rpc.nisd
nispasswd 100303 rpc.nispasswdd
ufsd 100233 ufsd
fedfs_admin 100418
pcnfsd 150001 pcnfs
amd 300019 amq
sgi_fam 391002 fam
bwnfsd 545580417
fypxfrd 600100069 freebsd-ypxfrd

View File

@ -1 +0,0 @@
L /tmp/nsswitch.conf - - - - /usr/share/nss/nsswitch.conf