Add docker based backend to the cloud scenario (#9751) (#10626)

* Add docker based backed

* new line

* Add validation

* Add cloud_docker_vault_cluster

* Unify cloud scenario outputs

* Use min_vault_version consistently across both modules

* random network name for docker

* Add local build for docker

* Use environment instead of backend

* make use of existing modules for docker and k8s

* connect the peers

* formatting

* copyright

* Remove old duplicated code

* use enos local exec

* get version locally

* Dont use local time

* adjust bin path for docker

* use root dockerfile

* get dockerfile to work

* Build docker image from correct binary location

* Fix it... maybe

* Add docker admin token

* whitespace

* formatting and comment cleanup

* formatting

* undo

* Apply suggestion from @ryancragun



* Move build to make

* Default to local

* Revert k8s changes

* Add admint token

* Clean map

* whitespace

* whitespace

* Pull out k8 changes and vault_cluster_raft

* Some cleaning changes

* whitespace

* Naming cleanup

---------

Co-authored-by: Luis (LT) Carbonell <lt.carbonell@hashicorp.com>
Co-authored-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
Vault Automation 2025-11-06 13:59:40 -05:00 committed by GitHub
parent 01e4c042d0
commit 40a70edc03
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 898 additions and 186 deletions

View File

@ -366,6 +366,10 @@ ci-build-ui:
ci-bundle:
@$(CURDIR)/scripts/ci-helper.sh bundle
.PHONY: ci-copy-binary
ci-copy-binary:
@$(CURDIR)/scripts/ci-helper.sh copy-binary
.PHONY: ci-get-artifact-basename
ci-get-artifact-basename:
@$(CURDIR)/scripts/ci-helper.sh artifact-basename

View File

@ -56,6 +56,18 @@ module "create_vpc" {
common_tags = var.tags
}
module "cloud_docker_vault_cluster" {
source = "./modules/cloud_docker_vault_cluster"
}
module "docker_namespace_token" {
source = "./modules/docker_namespace_token"
}
module "docker_network" {
source = "./modules/docker_network"
}
module "choose_follower_host" {
source = "./modules/choose_follower_host"
}

View File

@ -27,3 +27,6 @@ provider "enos" "ubuntu" {
provider "hcp" "default" {
}
provider "docker" "default" {
}

View File

@ -24,6 +24,10 @@ terraform "default" {
source = "hashicorp/aws"
}
docker = {
source = "kreuzwerker/docker"
}
enos = {
source = "registry.terraform.io/hashicorp-forge/enos"
version = ">= 0.4.0"

View File

@ -1,6 +1,28 @@
# Copyright IBM Corp. 2016, 2025
# SPDX-License-Identifier: BUSL-1.1
# Vault Local Build Module
#
# This module builds Vault binaries locally and produces artifacts for different use cases.
# The module enforces that at least one output artifact is created to prevent silent no-op execution.
#
# Supported workflows:
# 1. ZIP Bundle Only: artifact_path specified, docker_bin_path null
# - Builds binary to dist/
# - Creates zip bundle at artifact_path
#
# 2. Docker Only: docker_bin_path specified, artifact_path null
# - Builds binary to dist/
# - Copies binary to docker_bin_path for Docker image creation
# - No zip bundle created
#
# 3. Both ZIP and Docker: Both artifact_path and docker_bin_path specified
# - Builds binary to dist/
# - Creates zip bundle at artifact_path
# - Copies binary to docker_bin_path for Docker image creation
#
# The validation ensures at least one of artifact_path or docker_bin_path is specified.
terraform {
required_providers {
enos = {
@ -10,7 +32,9 @@ terraform {
}
variable "artifact_path" {
description = "Where to create the zip bundle of the Vault build"
type = string
description = "Where to create the zip bundle of the Vault build. If null, no zip bundle will be created."
default = null
}
variable "build_tags" {
@ -36,6 +60,17 @@ variable "goos" {
default = "linux"
}
variable "docker_bin_path" {
type = string
description = "Path to copy the built binary for Docker image creation. When specified, the binary is copied to this location for subsequent Docker image builds. If null, no Docker-specific binary copy is performed."
default = null
validation {
condition = var.artifact_path != null || var.docker_bin_path != null
error_message = "At least one of 'artifact_path' (for zip bundle) or 'docker_bin_path' (for Docker builds) must be specified. The module must produce at least one output artifact."
}
}
variable "artifactory_host" { default = null }
variable "artifactory_repo" { default = null }
variable "artifactory_token" { default = null }
@ -58,11 +93,12 @@ resource "enos_local_exec" "build" {
BASE_VERSION = module.local_metadata.version_base
BIN_PATH = abspath("${path.module}/../../../dist")
BUILD_UI = tostring(var.build_ui)
BUNDLE_PATH = abspath(var.artifact_path)
BUNDLE_PATH = var.artifact_path != null ? abspath(var.artifact_path) : ""
GO_TAGS = join(" ", var.build_tags)
GOARCH = var.goarch
GOOS = var.goos
PRERELEASE_VERSION = module.local_metadata.version_pre
VERSION_METADATA = module.local_metadata.version_meta
CUSTOM_BIN_PATH = var.docker_bin_path != null ? abspath("${path.module}/../../../${var.docker_bin_path}") : ""
}
}

View File

@ -2,6 +2,18 @@
# Copyright IBM Corp. 2016, 2025
# SPDX-License-Identifier: BUSL-1.1
# This script builds Vault binaries and optionally packages them.
#
# Two distinct workflows are supported:
# 1. Standard build: Builds to dist/ and creates zip bundle from dist/
# 2. Target path build: Builds to dist/, copies to TARGET_BIN_PATH, skips bundling
# (bundling is skipped to avoid confusion when binary exists in multiple locations)
#
# Environment variables:
# - BUILD_UI: Set to "true" to build UI components
# - TARGET_BIN_PATH: If set, copies built binary to this location instead of bundling
# - BUNDLE_PATH: If set (and TARGET_BIN_PATH is not), creates zip bundle at this path
set -eux -o pipefail
# Install yarn so we can build the UI
@ -20,5 +32,12 @@ make ci-build
popd > /dev/null
echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH"
zip -r -j "$BUNDLE_PATH" "$BIN_PATH/"
if [ -n "$TARGET_BIN_PATH" ]; then
echo "--> Target binary path specified, copying binary and skipping bundle"
make -C "$root_dir" ci-copy-binary
elif [ -n "$BUNDLE_PATH" ]; then
echo "--> Creating zip bundle from dist/"
make -C "$root_dir" ci-bundle
else
echo "--> No post-build packaging requested (neither TARGET_BIN_PATH nor BUNDLE_PATH specified)"
fi

View File

@ -0,0 +1,397 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
docker = {
source = "kreuzwerker/docker"
version = "~> 3.0"
}
enos = {
source = "registry.terraform.io/hashicorp-forge/enos"
}
}
}
variable "min_vault_version" {
type = string
description = "The minimum Vault version to deploy (e.g., 1.15.0 or v1.15.0+ent)"
}
variable "vault_edition" {
type = string
description = "The edition of Vault to deploy (ent, ce, ent.fips1403)"
default = "ent"
validation {
condition = contains(["ent", "ce", "ent.fips1403"], var.vault_edition)
error_message = "vault_edition must be one of: ent, ce, ent.fips1403"
}
}
variable "vault_license" {
type = string
description = "The Vault Enterprise license"
default = null
sensitive = true
}
variable "cluster_name" {
type = string
description = "The name of the Vault cluster"
default = "vault"
}
variable "container_count" {
type = number
description = "Number of Vault containers to create"
default = 3
}
variable "vault_port" {
type = number
description = "The port Vault listens on"
default = 8200
}
variable "use_local_build" {
type = bool
description = "If true, build a local Docker image from the current branch instead of pulling from Docker Hub"
default = false
}
# HCP-specific variables (ignored but accepted for compatibility)
variable "network_name" {
type = string
description = "Ignored - for HCP compatibility only"
default = ""
}
variable "tier" {
type = string
description = "Ignored - for HCP compatibility only"
default = ""
}
# Generate a random suffix for the network name to avoid conflicts
resource "random_string" "network_suffix" {
length = 8
lower = true
upper = false
numeric = true
special = false
}
# Create Docker network
resource "docker_network" "cluster" {
name = "${var.cluster_name}-network-${random_string.network_suffix.result}"
}
locals {
# Parse min_vault_version to extract the version number
# e.g., "v1.15.0+ent" -> "1.15.0" or "v1.15.0+ent-2cf0b2f" -> "1.15.0"
vault_version = trimprefix(split("+", var.min_vault_version)[0], "v")
image_map = {
"ent" = "hashicorp/vault-enterprise"
"ce" = "hashicorp/vault"
"ent.fips1403" = "hashicorp/vault-enterprise-fips"
}
target_map = {
"ent" = "ubi"
"ce" = "ubi"
"ent.fips1403" = "ubi-fips"
}
image = local.image_map[var.vault_edition]
tag_suffix = var.vault_edition == "ce" ? "" : "-ent"
image_tag = "${local.vault_version}${local.tag_suffix}"
local_tag = "vault-local-${var.vault_edition}:${local.vault_version}"
dockerfile = "Dockerfile"
target = local.target_map[var.vault_edition]
}
# Pull image from Docker Hub (when not using local build)
resource "docker_image" "vault_remote" {
count = var.use_local_build ? 0 : 1
name = "${local.image}:${local.image_tag}"
}
# Build image from local Dockerfile (when using local build)
resource "docker_image" "vault_local" {
count = var.use_local_build ? 1 : 0
name = local.local_tag
keep_locally = true
build {
context = "${path.module}/../../.."
dockerfile = local.dockerfile
target = local.target
tag = [local.local_tag]
pull_parent = true
build_args = {
BIN_NAME = "vault"
TARGETOS = "linux"
TARGETARCH = "amd64"
NAME = "vault"
PRODUCT_VERSION = local.vault_version
PRODUCT_REVISION = "local"
LICENSE_SOURCE = "LICENSE"
LICENSE_DEST = "/usr/share/doc/vault"
}
}
}
locals {
# Generate Vault configuration for each node
vault_config_template = <<-EOF
ui = true
listener "tcp" {
address = "0.0.0.0:${var.vault_port}"
cluster_address = "0.0.0.0:8201"
tls_disable = true
}
storage "raft" {
path = "/vault/data"
node_id = "node%s"
}
disable_mlock = true
EOF
}
# Using tmpfs for Raft data (in-memory, no persistence needed for testing)
resource "docker_container" "vault" {
count = var.container_count
name = "${var.cluster_name}-${count.index}"
image = var.use_local_build ? docker_image.vault_local[0].name : docker_image.vault_remote[0].image_id
networks_advanced {
name = docker_network.cluster.name
}
ports {
internal = var.vault_port
external = var.vault_port + count.index
}
tmpfs = {
"/vault/data" = "rw,noexec,nosuid,size=100m"
}
upload {
content = format(local.vault_config_template, count.index)
file = "/vault/config/vault.hcl"
}
user = "root"
env = concat(
[
"VAULT_API_ADDR=http://${var.cluster_name}-${count.index}:${var.vault_port}",
"VAULT_CLUSTER_ADDR=http://${var.cluster_name}-${count.index}:8201",
"SKIP_SETCAP=true",
"SKIP_CHOWN=true",
],
var.vault_license != null ? ["VAULT_LICENSE=${var.vault_license}"] : []
)
capabilities {
add = ["IPC_LOCK"]
}
command = ["vault", "server", "-config=/vault/config/vault.hcl"]
restart = "no"
}
locals {
instance_indexes = [for idx in range(var.container_count) : tostring(idx)]
leader_idx = 0
followers_idx = range(1, var.container_count)
vault_address = "http://127.0.0.1:${var.vault_port}"
leader_api_addr = "http://${var.cluster_name}-${local.leader_idx}:${var.vault_port}"
}
# Initialize Vault on the leader
resource "enos_local_exec" "init_leader" {
inline = [
<<-EOT
# Wait for Vault to be ready (output to stderr to keep stdout clean)
for i in 1 2 3 4 5 6 7 8 9 10; do
if docker exec -e VAULT_ADDR=http://127.0.0.1:${var.vault_port} ${docker_container.vault[local.leader_idx].name} vault status 2>&1 | grep -q "Initialized.*false"; then
break
fi
echo "Waiting for Vault to start (attempt $i/10)..." >&2
sleep 2
done
# Initialize Vault and output JSON to stdout
docker exec -e VAULT_ADDR=http://127.0.0.1:${var.vault_port} ${docker_container.vault[local.leader_idx].name} vault operator init \
-key-shares=1 \
-key-threshold=1 \
-format=json
EOT
]
depends_on = [docker_container.vault]
}
locals {
init_data = jsondecode(enos_local_exec.init_leader.stdout)
unseal_key = local.init_data.unseal_keys_b64[0]
root_token = local.init_data.root_token
}
# Unseal the leader
resource "enos_local_exec" "unseal_leader" {
inline = [
"docker exec -e VAULT_ADDR=http://127.0.0.1:${var.vault_port} ${docker_container.vault[local.leader_idx].name} vault operator unseal ${local.unseal_key}"
]
depends_on = [enos_local_exec.init_leader]
}
# Join followers to Raft cluster and unseal them
resource "enos_local_exec" "join_followers" {
count = length(local.followers_idx)
inline = [
<<-EOT
# Wait for Vault to be ready
for i in 1 2 3 4 5; do
docker exec -e VAULT_ADDR=http://127.0.0.1:${var.vault_port} ${docker_container.vault[local.followers_idx[count.index]].name} vault status > /dev/null 2>&1 && break || sleep 5
done
# Join the Raft cluster
docker exec -e VAULT_ADDR=http://127.0.0.1:${var.vault_port} ${docker_container.vault[local.followers_idx[count.index]].name} \
vault operator raft join ${local.leader_api_addr}
# Unseal the follower
docker exec -e VAULT_ADDR=http://127.0.0.1:${var.vault_port} ${docker_container.vault[local.followers_idx[count.index]].name} \
vault operator unseal ${local.unseal_key}
EOT
]
depends_on = [enos_local_exec.unseal_leader]
}
# Outputs that match HCP module interface
output "cloud_provider" {
value = "docker"
description = "The cloud provider (docker for local)"
}
output "cluster_id" {
value = var.cluster_name
description = "The cluster identifier"
}
output "created_at" {
value = timestamp()
description = "Timestamp of cluster creation"
}
output "id" {
value = var.cluster_name
description = "The cluster identifier"
}
output "namespace" {
value = "root"
description = "The Vault namespace"
}
output "organization_id" {
value = "docker-local"
description = "The organization identifier"
}
output "region" {
value = "local"
description = "The region or location"
}
output "self_link" {
value = ""
description = "Self link to the cluster"
}
output "state" {
value = "RUNNING"
description = "The state of the cluster"
}
output "vault_private_endpoint_url" {
value = ""
description = "Private endpoint URL (not applicable for Docker)"
}
output "vault_proxy_endpoint_url" {
value = ""
description = "Proxy endpoint URL (not applicable for Docker)"
}
output "vault_public_endpoint_url" {
value = "http://localhost:${var.vault_port}"
description = "Public endpoint URL"
}
output "vault_version" {
value = local.vault_version
description = "The version of Vault deployed"
}
# Docker-specific outputs
output "container_names" {
value = docker_container.vault[*].name
description = "The names of the Vault containers"
}
output "container_ids" {
value = docker_container.vault[*].id
description = "The IDs of the Vault containers"
}
output "vault_addresses" {
value = [
for i in range(var.container_count) :
"http://localhost:${var.vault_port + i}"
]
description = "The addresses of the Vault containers"
}
output "primary_address" {
value = "http://localhost:${var.vault_port}"
description = "The address of the primary Vault container"
}
output "network_id" {
value = docker_network.cluster.id
description = "The ID of the created Docker network"
}
output "network_name" {
value = docker_network.cluster.name
description = "The name of the created Docker network"
}
output "image_name" {
value = var.use_local_build ? (length(docker_image.vault_local) > 0 ? docker_image.vault_local[0].name : "none") : (length(docker_image.vault_remote) > 0 ? docker_image.vault_remote[0].name : "none")
description = "The Docker image being used"
}
output "is_local_build" {
value = var.use_local_build
description = "Whether this is using a local build"
}
output "vault_root_token" {
value = local.root_token
sensitive = true
description = "The root token for the Vault cluster"
}

View File

@ -0,0 +1,125 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
variable "vault_root_token" {
description = "The root token from the Docker Vault cluster"
type = string
sensitive = true
default = null
}
variable "vault_address" {
description = "The address of the Vault cluster"
type = string
default = null
}
# HCP compatibility variables (accepted but ignored)
variable "cluster_id" {
description = "Ignored - for HCP compatibility only"
type = string
default = null
}
terraform {
required_providers {
enos = {
source = "registry.terraform.io/hashicorp-forge/enos"
}
}
}
# We need the container name to exec into
variable "container_name" {
description = "The name of the Docker container running Vault (Docker only)"
type = string
default = null
}
variable "namespace_name" {
description = "The name of the namespace to create and generate the token in"
type = string
default = "admin"
}
# Create namespace using the root token (only when all required vars are present)
resource "enos_local_exec" "docker_create_namespace" {
count = var.vault_address != null && var.vault_root_token != null && var.container_name != null ? 1 : 0
inline = [
<<-EOT
docker exec -e VAULT_ADDR=${var.vault_address} -e VAULT_TOKEN=${var.vault_root_token} \
${var.container_name} vault namespace create ${var.namespace_name}
EOT
]
}
# Create policy in the namespace
resource "enos_local_exec" "docker_create_policy" {
count = var.vault_address != null && var.vault_root_token != null && var.container_name != null ? 1 : 0
inline = [
<<-EOT
# Write policy to a temp file in the container
docker exec ${var.container_name} sh -c 'cat > /tmp/${var.namespace_name}-policy.hcl << EOF
path "*" {
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}
EOF'
# Apply the policy in the namespace
docker exec -e VAULT_ADDR=${var.vault_address} -e VAULT_TOKEN=${var.vault_root_token} -e VAULT_NAMESPACE=${var.namespace_name} \
${var.container_name} vault policy write ${var.namespace_name}-policy /tmp/${var.namespace_name}-policy.hcl
EOT
]
depends_on = [enos_local_exec.docker_create_namespace]
}
# Create token in the namespace
resource "enos_local_exec" "docker_create_token" {
count = var.vault_address != null && var.vault_root_token != null && var.container_name != null ? 1 : 0
inline = [
<<-EOT
docker exec -e VAULT_ADDR=${var.vault_address} -e VAULT_TOKEN=${var.vault_root_token} -e VAULT_NAMESPACE=${var.namespace_name} \
${var.container_name} vault token create \
-policy=${var.namespace_name}-policy \
-ttl=24h \
-renewable=true \
-metadata="purpose=${var.namespace_name}-token" \
-metadata="created_by=docker_namespace_token_module" \
-format=json | jq -r '.auth.client_token'
EOT
]
depends_on = [enos_local_exec.docker_create_policy]
}
locals {
# For Docker: use the created namespace token, for HCP: use root token (fallback)
namespace_token = length(enos_local_exec.docker_create_token) > 0 ? trimspace(enos_local_exec.docker_create_token[0].stdout) : var.vault_root_token
}
output "created_at" {
value = timestamp()
}
output "id" {
value = "docker-${var.namespace_name}-token"
}
output "token" {
value = local.namespace_token
sensitive = true
}
output "namespace" {
value = length(enos_local_exec.docker_create_token) > 0 ? var.namespace_name : "root"
description = "The namespace where the token is valid"
}
output "policy" {
value = length(enos_local_exec.docker_create_token) > 0 ? "${var.namespace_name}-policy" : "root"
description = "The policy assigned to the token"
}

View File

@ -0,0 +1,31 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
docker = {
source = "kreuzwerker/docker"
version = "~> 3.0"
}
}
}
variable "network_name" {
type = string
description = "The name of the Docker network to create"
default = "vault_cluster"
}
resource "docker_network" "cluster" {
name = var.network_name
}
output "network_id" {
value = docker_network.cluster.id
description = "The ID of the created Docker network"
}
output "network_name" {
value = docker_network.cluster.name
description = "The name of the created Docker network"
}

View File

@ -6,6 +6,20 @@ variable "cluster_id" {
type = string
}
# Docker compatibility variables (accepted but ignored)
variable "vault_root_token" {
description = "Ignored - for Docker compatibility only"
type = string
default = null
sensitive = true
}
variable "vault_address" {
description = "Ignored - for Docker compatibility only"
type = string
default = null
}
resource "hcp_vault_cluster_admin_token" "token" {
cluster_id = var.cluster_id
}

View File

@ -52,6 +52,44 @@ variable "upgrade_type" {
default = "MANUAL"
}
# Docker-specific variables (ignored but accepted for compatibility)
variable "vault_edition" {
type = string
description = "Ignored - for Docker compatibility only"
default = ""
}
variable "vault_license" {
type = string
description = "Ignored - for Docker compatibility only"
default = ""
sensitive = true
}
variable "network_name" {
type = string
description = "Ignored - for Docker compatibility only"
default = ""
}
variable "cluster_name" {
type = string
description = "Ignored - for Docker compatibility only"
default = ""
}
variable "use_local_build" {
type = bool
description = "Ignored - for Docker compatibility only"
default = false
}
variable "local_build_path" {
type = string
description = "Ignored - for Docker compatibility only"
default = ""
}
data "enos_environment" "localhost" {}
resource "random_string" "id" {

View File

@ -13,261 +13,290 @@ export GIT_PAGER=cat
# Get the build date from the latest commit since it can be used across all
# builds
function build_date() {
# It's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC
: "${DATE_FORMAT:="%Y-%m-%dT%H:%M:%SZ"}"
git show --no-show-signature -s --format=%cd --date=format:"$DATE_FORMAT" HEAD
# It's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC
: "${DATE_FORMAT:="%Y-%m-%dT%H:%M:%SZ"}"
git show --no-show-signature -s --format=%cd --date=format:"$DATE_FORMAT" HEAD
}
# Get the revision, which is the latest commit SHA
function build_revision() {
git rev-parse HEAD
git rev-parse HEAD
}
# Determine our repository by looking at our origin URL
function repo() {
basename -s .git "$(git config --get remote.origin.url)"
basename -s .git "$(git config --get remote.origin.url)"
}
# Determine the artifact basename based on metadata
function artifact_basename() {
: "${PKG_NAME:="vault"}"
: "${GOOS:=$(go env GOOS)}"
: "${GOARCH:=$(go env GOARCH)}"
: "${VERSION_METADATA:="ce"}"
: "${PKG_NAME:="vault"}"
: "${GOOS:=$(go env GOOS)}"
: "${GOARCH:=$(go env GOARCH)}"
: "${VERSION_METADATA:="ce"}"
: "${VERSION:=""}"
if [ -z "$VERSION" ]; then
echo "You must specify the VERSION variable for this command" >&2
exit 1
fi
: "${VERSION:=""}"
if [ -z "$VERSION" ]; then
echo "You must specify the VERSION variable for this command" >&2
exit 1
fi
local version
version="$VERSION"
if [ "$VERSION_METADATA" != "ce" ]; then
version="${VERSION}+${VERSION_METADATA}"
fi
local version
version="$VERSION"
if [ "$VERSION_METADATA" != "ce" ]; then
version="${VERSION}+${VERSION_METADATA}"
fi
echo "${PKG_NAME}_${version}_${GOOS}_${GOARCH}"
echo "${PKG_NAME}_${version}_${GOOS}_${GOARCH}"
}
# Copy binaries from BIN_PATH to TARGET_BIN_PATH
function copy_binary() {
: "${BIN_PATH:="dist/"}"
if [ -z "${TARGET_BIN_PATH:-}" ]; then
echo "TARGET_BIN_PATH not specified, skipping binary copy" >&2
return 0
fi
echo "--> Copying binary from $BIN_PATH to $TARGET_BIN_PATH"
mkdir -p "$TARGET_BIN_PATH"
if [ -d "$BIN_PATH" ]; then
cp -r "$BIN_PATH"/* "$TARGET_BIN_PATH/"
else
echo "Warning: Source directory $BIN_PATH does not exist" >&2
return 1
fi
}
# Bundle the dist directory into a zip
# Note: This always bundles from dist/, regardless of TARGET_BIN_PATH
function bundle() {
: "${BUNDLE_PATH:=$(repo_root)/vault.zip}"
echo "--> Bundling dist/* to $BUNDLE_PATH..."
zip -r -j "$BUNDLE_PATH" dist/
: "${BUNDLE_PATH:=$(repo_root)/vault.zip}"
: "${BIN_PATH:="dist/"}"
if [ ! -d "$BIN_PATH" ] || [ -z "$(ls -A "$BIN_PATH" 2>/dev/null)" ]; then
echo "Warning: $BIN_PATH is empty or does not exist, bundle will be empty" >&2
fi
echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH..."
zip -r -j "$BUNDLE_PATH" "$BIN_PATH/"
}
# Determine the root directory of the repository
function repo_root() {
git rev-parse --show-toplevel
git rev-parse --show-toplevel
}
# Build the UI
function build_ui() {
local repo_root
repo_root=$(repo_root)
local repo_root
repo_root=$(repo_root)
pushd "$repo_root"
mkdir -p http/web_ui
popd
pushd "$repo_root/ui"
yarn install
npm rebuild node-sass
yarn run build
popd
pushd "$repo_root"
mkdir -p http/web_ui
popd
pushd "$repo_root/ui"
yarn install
npm rebuild node-sass
yarn run build
popd
}
# Build Vault
function build() {
local revision
local base_version
local build_date
local ldflags
local msg
local revision
local base_version
local build_date
local ldflags
local msg
# Get or set our basic build metadata
revision=$(build_revision)
build_date=$(build_date)
base_version=$(version_base)
version=$(version)
: "${BIN_PATH:="dist/"}" #if not run by actions-go-build (enos local) then set this explicitly
: "${GO_TAGS:=""}"
: "${REMOVE_SYMBOLS:=""}"
# Get or set our basic build metadata
revision=$(build_revision)
build_date=$(build_date)
base_version=$(version_base)
version=$(version)
: "${BIN_PATH:="dist/"}" #if not run by actions-go-build (enos local) then set this explicitly
: "${GO_TAGS:=""}"
: "${REMOVE_SYMBOLS:=""}"
# Generate code but make sure we don't slurp in cross compilation env vars
(
unset GOOS
unset GOARCH
unset CC
unset CC_FOR_TARGET
go generate ./...
)
# Generate code but make sure we don't slurp in cross compilation env vars
(
unset GOOS
unset GOARCH
unset CC
unset CC_FOR_TARGET
go generate ./...
)
# Build our ldflags
msg="--> Building Vault v$version revision $revision, built $build_date..."
# Build our ldflags
msg="--> Building Vault v$version revision $revision, built $build_date..."
# Keep the symbol and dwarf information by default
if [ -n "$REMOVE_SYMBOLS" ]; then
ldflags="-s -w "
else
ldflags=""
fi
# Keep the symbol and dwarf information by default
if [ -n "$REMOVE_SYMBOLS" ]; then
ldflags="-s -w "
else
ldflags=""
fi
# If you read what happens in the "version" package you will see that the
# "version.Version" symbol is automatically set from the embedded VERSION
# file. So why are we setting it again with linker flags?
#
# Well, some third party security scanners like Trivy attempt to determine a
# Go binaries "version" by reading the embedded debug build info. The main
# module "version" reported there has little to do with what we consider
# Vaults version and is instead what the Go module system considers the
# vault modules "pseudo-version"[0].
#
# What Go determines as the pseudo-version can be pretty complicated. If you
# tag a commit with a semver-ish tag and push it before you build the binary,
# the "pseudo-version" will be the tag value. But what if you build the binary
# before a commit has an associated tag like we do? Well, it depends. If you
# build a Go binary with "-buildvcs" enabled, the "pseudo-version" reported
# here looks something like: "<prior release tag>-<timestamp>-<sha>+dirty".
# If Go cannot resolve a prior tag you'll get "v0.0.0" in place of
# "<prior release tag>". If you disable "-buildvcs" you'll get "devel".
#
# As we can see, there's quite a lot of variance in this system and a modules
# "version" is an unreliable way to reason about a softwares "version". But
# that doesn't stop tools from using it and reporting CVEs based on it!
#
# That's right. If you publish a binary with the "+dirty" style pseudo-version,
# and the prior tag that is resolves is associated with a CVE, your binary will
# be flagged for the same CVE even if it has nothing to do with the prior tag.
# If you disable "buildvcs" (we do) these tools cannot determine a "version"
# (because it's always "devel"). When this happens these scanners also fail
# because they can't determine a version. Cool.
#
# So that brings us back to our original query: what's going on with the
# ldflags. To work around this problem, Trivy *reads arbitrary ldflags in
# the binary build info* to determine the "version"![1] when the main module
# does not report a version. And it is because of that, dear reader, that we
# inject our version again via linker flags, to please tooling that relies on
# the unreliable.
#
# [1]: https://go.dev/doc/modules/version-numbers#pseudo-version-number
# [0]: https://trivy.dev/v0.62/docs/coverage/language/golang/#main-module
ldflags="${ldflags} -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date -X github.com/hashicorp/vault/version.Version=$base_version"
# If you read what happens in the "version" package you will see that the
# "version.Version" symbol is automatically set from the embedded VERSION
# file. So why are we setting it again with linker flags?
#
# Well, some third party security scanners like Trivy attempt to determine a
# Go binaries "version" by reading the embedded debug build info. The main
# module "version" reported there has little to do with what we consider
# Vaults version and is instead what the Go module system considers the
# vault modules "pseudo-version"[0].
#
# What Go determines as the pseudo-version can be pretty complicated. If you
# tag a commit with a semver-ish tag and push it before you build the binary,
# the "pseudo-version" will be the tag value. But what if you build the binary
# before a commit has an associated tag like we do? Well, it depends. If you
# build a Go binary with "-buildvcs" enabled, the "pseudo-version" reported
# here looks something like: "<prior release tag>-<timestamp>-<sha>+dirty".
# If Go cannot resolve a prior tag you'll get "v0.0.0" in place of
# "<prior release tag>". If you disable "-buildvcs" you'll get "devel".
#
# As we can see, there's quite a lot of variance in this system and a modules
# "version" is an unreliable way to reason about a softwares "version". But
# that doesn't stop tools from using it and reporting CVEs based on it!
#
# That's right. If you publish a binary with the "+dirty" style pseudo-version,
# and the prior tag that is resolves is associated with a CVE, your binary will
# be flagged for the same CVE even if it has nothing to do with the prior tag.
# If you disable "buildvcs" (we do) these tools cannot determine a "version"
# (because it's always "devel"). When this happens these scanners also fail
# because they can't determine a version. Cool.
#
# So that brings us back to our original query: what's going on with the
# ldflags. To work around this problem, Trivy *reads arbitrary ldflags in
# the binary build info* to determine the "version"![1] when the main module
# does not report a version. And it is because of that, dear reader, that we
# inject our version again via linker flags, to please tooling that relies on
# the unreliable.
#
# [1]: https://go.dev/doc/modules/version-numbers#pseudo-version-number
# [0]: https://trivy.dev/v0.62/docs/coverage/language/golang/#main-module
ldflags="${ldflags} -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date -X github.com/hashicorp/vault/version.Version=$base_version"
if [[ ${VERSION_METADATA+x} ]]; then
msg="${msg}, metadata ${VERSION_METADATA}"
ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$VERSION_METADATA"
fi
if [[ ${VERSION_METADATA+x} ]]; then
msg="${msg}, metadata ${VERSION_METADATA}"
ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$VERSION_METADATA"
fi
# Build vault
echo "$msg"
pushd "$(repo_root)"
mkdir -p dist
mkdir -p out
set -x
go env
go build -v -buildvcs=false -tags "$GO_TAGS" -ldflags "$ldflags" -o dist/
set +x
popd
# Build vault
echo "$msg"
pushd "$(repo_root)"
mkdir -p dist
mkdir -p out
set -x
go env
go build -v -buildvcs=false -tags "$GO_TAGS" -ldflags "$ldflags" -o dist/
set +x
popd
}
# ENT: Prepare legal requirements for packaging
function prepare_ent_legal() {
: "${PKG_NAME:="vault"}"
: "${PKG_NAME:="vault"}"
if [ -z "${LICENSE_DIR:-}" ]; then
echo "You must set LICENSE_DIR; example: export LICENSE_DIR=.release/ibm-pao/license/default" 1>&2
return 1
fi
if [ -z "${LICENSE_DIR:-}" ]; then
echo "You must set LICENSE_DIR; example: export LICENSE_DIR=.release/ibm-pao/license/default" 1>&2
return 1
fi
pushd "$(repo_root)"
mkdir -p dist
cp -R "$LICENSE_DIR" dist/
mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME"
cp -R "$LICENSE_DIR" ".release/linux/package/usr/share/doc/$PKG_NAME/"
popd
pushd "$(repo_root)"
mkdir -p dist
cp -R "$LICENSE_DIR" dist/
mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME"
cp -R "$LICENSE_DIR" ".release/linux/package/usr/share/doc/$PKG_NAME/"
popd
}
# CE: Prepare legal requirements for packaging
function prepare_ce_legal() {
: "${PKG_NAME:="vault"}"
: "${PKG_NAME:="vault"}"
pushd "$(repo_root)"
pushd "$(repo_root)"
mkdir -p dist
cp LICENSE dist/LICENSE.txt
mkdir -p dist
cp LICENSE dist/LICENSE.txt
mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME"
cp LICENSE ".release/linux/package/usr/share/doc/$PKG_NAME/LICENSE.txt"
mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME"
cp LICENSE ".release/linux/package/usr/share/doc/$PKG_NAME/LICENSE.txt"
popd
popd
}
# version returns the $VAULT_VERSION env variable or reads the VERSION file.
function version() {
if [[ -n "${VAULT_VERSION+x}" ]]; then
echo "${VAULT_VERSION}"
return 0
fi
if [[ -n "${VAULT_VERSION+x}" ]]; then
echo "${VAULT_VERSION}"
return 0
fi
cat "$(readlink -f "$(dirname "$0")/../version/VERSION")"
cat "$(readlink -f "$(dirname "$0")/../version/VERSION")"
}
# Base version converts a vault version string into the base version, which omits
# any prerelease or edition metadata.
function version_base() {
local ver
ver=$(version)
echo "${ver%%-*}"
local ver
ver=$(version)
echo "${ver%%-*}"
}
# Package version converts a vault version string into a compatible representation for system
# packages.
function version_package() {
awk '{ gsub("-","~",$1); print $1 }' <<< "$(version)"
awk '{ gsub("-","~",$1); print $1 }' <<<"$(version)"
}
# Run the CI Helper
function main() {
case $1 in
artifact-basename)
artifact_basename
;;
build)
build
;;
build-ui)
build_ui
;;
bundle)
bundle
;;
date)
build_date
;;
prepare-ent-legal)
prepare_ent_legal
;;
prepare-ce-legal)
prepare_ce_legal
;;
revision)
build_revision
;;
case $1 in
artifact-basename)
artifact_basename
;;
build)
build
;;
build-ui)
build_ui
;;
bundle)
bundle
;;
copy-binary)
copy_binary
;;
date)
build_date
;;
prepare-ent-legal)
prepare_ent_legal
;;
prepare-ce-legal)
prepare_ce_legal
;;
revision)
build_revision
;;
version)
version
;;
version-base)
version_base
;;
version-package)
version_package
;;
*)
echo "unknown sub-command" >&2
exit 1
;;
esac
version
;;
version-base)
version_base
;;
version-package)
version_package
;;
*)
echo "unknown sub-command" >&2
exit 1
;;
esac
}
main "$@"